2019-04-26 14:00:03 +00:00
|
|
|
// Copyright 2019 Sorint.lab
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package datamanager
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2019-06-03 14:17:27 +00:00
|
|
|
"sort"
|
2019-04-26 14:00:03 +00:00
|
|
|
"strings"
|
|
|
|
|
2019-07-01 09:40:20 +00:00
|
|
|
ostypes "agola.io/agola/internal/objectstorage/types"
|
|
|
|
"agola.io/agola/internal/sequence"
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
uuid "github.com/satori/go.uuid"
|
2019-05-23 09:23:14 +00:00
|
|
|
errors "golang.org/x/xerrors"
|
2019-04-26 14:00:03 +00:00
|
|
|
)
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
const (
|
|
|
|
DefaultMaxDataFileSize = 10 * 1024 * 1024
|
|
|
|
)
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
type DataStatus struct {
|
2019-06-03 14:17:27 +00:00
|
|
|
DataSequence string `json:"data_sequence,omitempty"`
|
|
|
|
WalSequence string `json:"wal_sequence,omitempty"`
|
|
|
|
// an entry id ordered list of files for a specific data type (map key)
|
|
|
|
Files map[string][]*DataStatusFile `json:"files,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type DataStatusFile struct {
|
|
|
|
ID string `json:"id,omitempty"`
|
|
|
|
// the last entry id in this file
|
|
|
|
LastEntryID string `json:"last_entry_id,omitempty"`
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type DataFileIndex struct {
|
2019-06-03 14:17:27 +00:00
|
|
|
Index map[string]int64 `json:"index,omitempty"`
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type DataEntry struct {
|
|
|
|
ID string `json:"id,omitempty"`
|
|
|
|
DataType string `json:"data_type,omitempty"`
|
|
|
|
Data []byte `json:"data,omitempty"`
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// TODO(sgotti) this implementation could be heavily optimized to store less data in memory
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
// TODO(sgotti)
|
|
|
|
// split/merge data files at max N bytes (i.e 16MiB) so we'll rewrite only files
|
|
|
|
// with changed data
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// walIndex is a map of dataType of id of walEntry
|
|
|
|
// TODO(sgotti) write this index to local disk (a temporary sqlite lite) instead of storing all in memory
|
|
|
|
type walIndex map[string]walActions
|
|
|
|
|
|
|
|
// walDataEntries is an order by id list of data entries
|
|
|
|
type walActions []*Action
|
|
|
|
|
|
|
|
func (w walActions) Len() int { return len(w) }
|
|
|
|
func (w walActions) Less(i, j int) bool { return w[i].ID < w[j].ID }
|
|
|
|
func (w walActions) Swap(i, j int) { w[i], w[j] = w[j], w[i] }
|
|
|
|
|
|
|
|
func (d *DataManager) walIndex(ctx context.Context, wals []*WalData) (walIndex, error) {
|
|
|
|
wimap := map[string]map[string]*Action{}
|
|
|
|
|
|
|
|
for _, walData := range wals {
|
|
|
|
walFilef, err := d.ReadWal(walData.WalSequence)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dec := json.NewDecoder(walFilef)
|
|
|
|
var header *WalHeader
|
|
|
|
if err = dec.Decode(&header); err != nil && err != io.EOF {
|
|
|
|
walFilef.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
walFilef.Close()
|
|
|
|
|
|
|
|
walFile, err := d.ReadWalData(header.WalDataFileID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Errorf("cannot read wal data file %q: %w", header.WalDataFileID, err)
|
|
|
|
}
|
|
|
|
defer walFile.Close()
|
|
|
|
|
|
|
|
dec = json.NewDecoder(walFile)
|
|
|
|
for {
|
|
|
|
var action *Action
|
|
|
|
|
|
|
|
err := dec.Decode(&action)
|
|
|
|
if err == io.EOF {
|
|
|
|
// all done
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Errorf("failed to decode wal file: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := wimap[action.DataType]; !ok {
|
|
|
|
wimap[action.DataType] = map[string]*Action{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// only keep the last action for every entry id
|
|
|
|
wimap[action.DataType][action.ID] = action
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
wi := map[string]walActions{}
|
|
|
|
for dataType, dd := range wimap {
|
|
|
|
for _, de := range dd {
|
|
|
|
wi[dataType] = append(wi[dataType], de)
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
sort.Sort(wi[dataType])
|
|
|
|
}
|
|
|
|
|
|
|
|
return wi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeDataSnapshot will create a new data snapshot merging the uncheckpointed
|
|
|
|
// wals. It will split data files at maxDataFileSize bytes so we'll rewrite only
|
|
|
|
// files with changed data.
|
|
|
|
// Only new files will be created, previous snapshot data files won't be touched
|
|
|
|
//
|
|
|
|
// TODO(sgotti) add a function to merge small data files (i.e after deletions) to avoid fragmentation
|
|
|
|
// TODO(sgotti) add a function to delete old data files keeping only N snapshots
|
|
|
|
func (d *DataManager) writeDataSnapshot(ctx context.Context, wals []*WalData) error {
|
|
|
|
dataSequence, err := sequence.IncSequence(ctx, d.e, etcdCheckpointSeqKey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var lastWalSequence string
|
|
|
|
for _, walData := range wals {
|
|
|
|
lastWalSequence = walData.WalSequence
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatus := &DataStatus{
|
|
|
|
DataSequence: dataSequence.String(),
|
|
|
|
WalSequence: lastWalSequence,
|
2019-06-03 14:17:27 +00:00
|
|
|
Files: make(map[string][]*DataStatusFile),
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
wi, err := d.walIndex(ctx, wals)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil && err != ostypes.ErrNotExist {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
for _, dataType := range d.dataTypes {
|
2019-06-03 14:17:27 +00:00
|
|
|
var curDataStatusFiles []*DataStatusFile
|
|
|
|
if curDataStatus != nil {
|
|
|
|
curDataStatusFiles = curDataStatus.Files[dataType]
|
|
|
|
}
|
|
|
|
dataStatusFiles, err := d.writeDataType(ctx, wi, dataType, curDataStatusFiles)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dataStatus.Files[dataType] = dataStatusFiles
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusj, err := json.Marshal(dataStatus)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-03 15:03:37 +00:00
|
|
|
if err := d.ost.WriteObject(d.dataStatusPath(dataSequence.String()), bytes.NewReader(dataStatusj), int64(len(dataStatusj)), true); err != nil {
|
2019-04-26 14:00:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
func (d *DataManager) writeDataFile(ctx context.Context, buf *bytes.Buffer, size int64, dataFileIndex *DataFileIndex, dataFileID, dataType string) error {
|
|
|
|
if buf.Len() == 0 {
|
|
|
|
return fmt.Errorf("empty data entries")
|
|
|
|
}
|
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
if err := d.ost.WriteObject(d.DataFilePath(dataType, dataFileID), buf, size, true); err != nil {
|
2019-06-03 14:17:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataFileIndexj, err := json.Marshal(dataFileIndex)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-03 15:03:37 +00:00
|
|
|
if err := d.ost.WriteObject(d.DataFileIndexPath(dataType, dataFileID), bytes.NewReader(dataFileIndexj), int64(len(dataFileIndexj)), true); err != nil {
|
2019-04-26 14:00:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
type ActionGroup struct {
|
|
|
|
DataStatusFile *DataStatusFile
|
|
|
|
StartActionIndex int
|
|
|
|
ActionsSize int
|
|
|
|
PreviousDataStatusFiles []*DataStatusFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) actionGroups(ctx context.Context, wi walIndex, dataType string, curDataStatusFiles []*DataStatusFile) ([]*ActionGroup, []*DataStatusFile) {
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
remainingDataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
actionGroups := []*ActionGroup{}
|
|
|
|
|
|
|
|
var startActionIndex int
|
|
|
|
var actionsSize int
|
|
|
|
|
|
|
|
var actionIndex int
|
|
|
|
var curDataStatusFileIndex int
|
|
|
|
for {
|
|
|
|
var action *Action
|
|
|
|
if actionIndex <= len(wi[dataType])-1 {
|
|
|
|
action = wi[dataType][actionIndex]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
var curDataStatusFile *DataStatusFile
|
|
|
|
if curDataStatusFileIndex <= len(curDataStatusFiles)-1 {
|
|
|
|
curDataStatusFile = curDataStatusFiles[curDataStatusFileIndex]
|
|
|
|
}
|
|
|
|
|
|
|
|
if action == nil {
|
|
|
|
if actionsSize > 0 {
|
|
|
|
actionGroup := &ActionGroup{
|
|
|
|
DataStatusFile: curDataStatusFile,
|
|
|
|
StartActionIndex: startActionIndex,
|
|
|
|
ActionsSize: actionsSize,
|
|
|
|
PreviousDataStatusFiles: dataStatusFiles,
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
actionGroups = append(actionGroups, actionGroup)
|
|
|
|
curDataStatusFileIndex++
|
|
|
|
if curDataStatusFileIndex <= len(curDataStatusFiles)-1 {
|
|
|
|
remainingDataStatusFiles = curDataStatusFiles[curDataStatusFileIndex:]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataStatusFile != nil {
|
|
|
|
if curDataStatusFile.LastEntryID >= action.ID || curDataStatusFileIndex == len(curDataStatusFiles)-1 {
|
|
|
|
// continue using this status file
|
|
|
|
actionIndex++
|
|
|
|
actionsSize++
|
|
|
|
} else {
|
|
|
|
// find new status file
|
|
|
|
if actionsSize > 0 {
|
|
|
|
actionGroup := &ActionGroup{
|
|
|
|
DataStatusFile: curDataStatusFile,
|
|
|
|
StartActionIndex: startActionIndex,
|
|
|
|
ActionsSize: actionsSize,
|
|
|
|
PreviousDataStatusFiles: dataStatusFiles,
|
|
|
|
}
|
|
|
|
actionGroups = append(actionGroups, actionGroup)
|
|
|
|
|
|
|
|
startActionIndex = actionIndex
|
|
|
|
actionsSize = 0
|
|
|
|
dataStatusFiles = []*DataStatusFile{}
|
|
|
|
} else {
|
|
|
|
dataStatusFiles = append(dataStatusFiles, curDataStatusFile)
|
|
|
|
}
|
|
|
|
curDataStatusFileIndex++
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
actionIndex++
|
|
|
|
actionsSize++
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return actionGroups, remainingDataStatusFiles
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) writeDataType(ctx context.Context, wi walIndex, dataType string, curDataStatusFiles []*DataStatusFile) ([]*DataStatusFile, error) {
|
|
|
|
type SplitPoint struct {
|
|
|
|
pos int64
|
|
|
|
lastEntryID string
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(wi[dataType]) == 0 {
|
|
|
|
// no actions
|
|
|
|
return curDataStatusFiles, nil
|
|
|
|
}
|
|
|
|
actionGroups, remainingDataStatusFiles := d.actionGroups(ctx, wi, dataType, curDataStatusFiles)
|
|
|
|
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
for _, actionGroup := range actionGroups {
|
|
|
|
dataStatusFiles = append(dataStatusFiles, actionGroup.PreviousDataStatusFiles...)
|
|
|
|
|
|
|
|
splitPoints := []SplitPoint{}
|
|
|
|
dataFileIndexes := []*DataFileIndex{}
|
|
|
|
dataFileIndex := &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
dataEntries := []*DataEntry{}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
var pos int64
|
|
|
|
var lastEntryID string
|
|
|
|
|
|
|
|
if actionGroup.DataStatusFile != nil {
|
|
|
|
// TODO(sgotti) instead of reading all entries in memory decode it's contents one by one when needed
|
2019-07-03 15:03:37 +00:00
|
|
|
oldDataf, err := d.ost.ReadObject(d.DataFilePath(dataType, actionGroup.DataStatusFile.ID))
|
2019-06-03 14:17:27 +00:00
|
|
|
if err != nil && err != ostypes.ErrNotExist {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err != ostypes.ErrNotExist {
|
|
|
|
dec := json.NewDecoder(oldDataf)
|
|
|
|
for {
|
|
|
|
var de *DataEntry
|
|
|
|
|
|
|
|
err := dec.Decode(&de)
|
|
|
|
if err == io.EOF {
|
|
|
|
// all done
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
oldDataf.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataEntries = append(dataEntries, de)
|
|
|
|
}
|
|
|
|
oldDataf.Close()
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
dataEntryIndex := 0
|
|
|
|
actionIndex := actionGroup.StartActionIndex
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// iterate over data entries and action in order
|
2019-04-26 14:00:03 +00:00
|
|
|
for {
|
2019-06-03 14:17:27 +00:00
|
|
|
exists := false
|
|
|
|
useAction := false
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
var action *Action
|
2019-06-03 14:17:27 +00:00
|
|
|
if actionIndex < actionGroup.StartActionIndex+actionGroup.ActionsSize {
|
|
|
|
action = wi[dataType][actionIndex]
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
var de *DataEntry
|
|
|
|
if dataEntryIndex <= len(dataEntries)-1 {
|
|
|
|
de = dataEntries[dataEntryIndex]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
if de == nil && action == nil {
|
|
|
|
break
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
if action != nil {
|
|
|
|
if de != nil {
|
|
|
|
if de.ID == action.ID {
|
|
|
|
exists = true
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
if de.ID > action.ID {
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if useAction {
|
|
|
|
de = nil
|
|
|
|
switch action.ActionType {
|
|
|
|
case ActionTypePut:
|
|
|
|
de = &DataEntry{
|
|
|
|
ID: action.ID,
|
|
|
|
DataType: action.DataType,
|
|
|
|
Data: action.Data,
|
|
|
|
}
|
|
|
|
if exists {
|
|
|
|
// replace current data entry with the action data
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
case ActionTypeDelete:
|
|
|
|
if exists {
|
|
|
|
// skip current data entry
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
actionIndex++
|
|
|
|
} else {
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dataEntryIndex++
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
if de != nil {
|
|
|
|
lastEntryID = de.ID
|
|
|
|
dataEntryj, err := json.Marshal(de)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := buf.Write(dataEntryj); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dataFileIndex.Index[de.ID] = pos
|
|
|
|
prevPos := pos
|
|
|
|
pos += int64(len(dataEntryj))
|
|
|
|
var lastSplitPos int64
|
|
|
|
if len(splitPoints) > 0 {
|
|
|
|
lastSplitPos = splitPoints[len(splitPoints)-1].pos
|
|
|
|
}
|
|
|
|
if pos-lastSplitPos > d.maxDataFileSize {
|
|
|
|
// add split point only if it's different (less) than the previous one
|
|
|
|
if lastSplitPos < prevPos {
|
|
|
|
splitPoints = append(splitPoints, SplitPoint{pos: int64(buf.Len()), lastEntryID: lastEntryID})
|
|
|
|
dataFileIndexes = append(dataFileIndexes, dataFileIndex)
|
|
|
|
dataFileIndex = &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// save remaining data
|
|
|
|
if buf.Len() != 0 {
|
|
|
|
var curPos int64
|
|
|
|
var lastSplitPos int64
|
|
|
|
if len(splitPoints) > 0 {
|
|
|
|
lastSplitPos = splitPoints[len(splitPoints)-1].pos
|
|
|
|
}
|
|
|
|
// add final split point if there's something left in the buffer
|
|
|
|
if lastSplitPos != int64(buf.Len()) {
|
|
|
|
splitPoints = append(splitPoints, SplitPoint{pos: int64(buf.Len()), lastEntryID: lastEntryID})
|
|
|
|
}
|
|
|
|
dataFileIndexes = append(dataFileIndexes, dataFileIndex)
|
|
|
|
for i, sp := range splitPoints {
|
|
|
|
curDataFileID := uuid.NewV4().String()
|
|
|
|
if err := d.writeDataFile(ctx, &buf, sp.pos-curPos, dataFileIndexes[i], curDataFileID, dataType); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// insert new dataStatusFile
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: curDataFileID,
|
|
|
|
LastEntryID: sp.lastEntryID,
|
|
|
|
})
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
curPos = sp.pos
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
dataStatusFiles = append(dataStatusFiles, remainingDataStatusFiles...)
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return dataStatusFiles, nil
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) Read(dataType, id string) (io.Reader, error) {
|
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
curFiles := curDataStatus.Files
|
|
|
|
|
|
|
|
var matchingDataFileID string
|
|
|
|
// get the matching data file for the action entry ID
|
|
|
|
if len(curFiles[dataType]) == 0 {
|
|
|
|
return nil, ostypes.ErrNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
matchingDataFileID = curFiles[dataType][0].ID
|
|
|
|
for _, dataStatusFile := range curFiles[dataType] {
|
|
|
|
if dataStatusFile.LastEntryID > id {
|
|
|
|
matchingDataFileID = dataStatusFile.ID
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
dataFileIndexf, err := d.ost.ReadObject(d.DataFileIndexPath(dataType, matchingDataFileID))
|
2019-04-26 14:00:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var dataFileIndex *DataFileIndex
|
|
|
|
dec := json.NewDecoder(dataFileIndexf)
|
|
|
|
err = dec.Decode(&dataFileIndex)
|
|
|
|
if err != nil {
|
|
|
|
dataFileIndexf.Close()
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
dataFileIndexf.Close()
|
|
|
|
|
|
|
|
pos, ok := dataFileIndex.Index[id]
|
|
|
|
if !ok {
|
2019-05-21 13:17:53 +00:00
|
|
|
return nil, ostypes.ErrNotExist
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
dataf, err := d.ost.ReadObject(d.DataFilePath(dataType, matchingDataFileID))
|
2019-04-26 14:00:03 +00:00
|
|
|
if err != nil {
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
if _, err := dataf.Seek(int64(pos), io.SeekStart); err != nil {
|
|
|
|
dataf.Close()
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
var de *DataEntry
|
|
|
|
dec = json.NewDecoder(dataf)
|
|
|
|
if err := dec.Decode(&de); err != nil {
|
|
|
|
dataf.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dataf.Close()
|
|
|
|
|
|
|
|
return bytes.NewReader(de.Data), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) GetLastDataStatusPath() (string, error) {
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
defer close(doneCh)
|
|
|
|
|
|
|
|
var dataStatusPath string
|
2019-07-03 15:03:37 +00:00
|
|
|
for object := range d.ost.List(d.storageDataDir()+"/", "", false, doneCh) {
|
2019-04-26 14:00:03 +00:00
|
|
|
if object.Err != nil {
|
|
|
|
return "", object.Err
|
|
|
|
}
|
|
|
|
if strings.HasSuffix(object.Path, ".status") {
|
|
|
|
dataStatusPath = object.Path
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if dataStatusPath == "" {
|
2019-05-21 13:17:53 +00:00
|
|
|
return "", ostypes.ErrNotExist
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return dataStatusPath, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) GetLastDataStatus() (*DataStatus, error) {
|
|
|
|
dataStatusPath, err := d.GetLastDataStatusPath()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusf, err := d.ost.ReadObject(dataStatusPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer dataStatusf.Close()
|
|
|
|
var dataStatus *DataStatus
|
|
|
|
dec := json.NewDecoder(dataStatusf)
|
|
|
|
|
|
|
|
return dataStatus, dec.Decode(&dataStatus)
|
|
|
|
}
|
2019-07-17 15:16:35 +00:00
|
|
|
|
|
|
|
func (d *DataManager) Export(ctx context.Context, w io.Writer) error {
|
|
|
|
if err := d.checkpoint(ctx, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, dataType := range d.dataTypes {
|
|
|
|
var curDataStatusFiles []*DataStatusFile
|
|
|
|
if curDataStatus != nil {
|
|
|
|
curDataStatusFiles = curDataStatus.Files[dataType]
|
|
|
|
}
|
|
|
|
for _, dsf := range curDataStatusFiles {
|
|
|
|
dataf, err := d.ost.ReadObject(d.DataFilePath(dataType, dsf.ID))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(w, dataf); err != nil {
|
|
|
|
dataf.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataf.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) Import(ctx context.Context, r io.Reader) error {
|
|
|
|
// delete contents in etcd
|
|
|
|
if err := d.deleteEtcd(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// we require all entries of the same datatypes grouped together
|
|
|
|
seenDataTypes := map[string]struct{}{}
|
|
|
|
|
|
|
|
// create a new sequence, we assume that it'll be greater than previous data sequences
|
|
|
|
dataSequence, err := sequence.IncSequence(ctx, d.e, etcdCheckpointSeqKey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatus := &DataStatus{
|
|
|
|
DataSequence: dataSequence.String(),
|
|
|
|
// no last wal sequence on import
|
|
|
|
WalSequence: "",
|
|
|
|
Files: make(map[string][]*DataStatusFile),
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
var lastEntryID string
|
|
|
|
var curDataType string
|
|
|
|
var buf bytes.Buffer
|
|
|
|
var pos int64
|
|
|
|
dataFileIndex := &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
dec := json.NewDecoder(r)
|
|
|
|
|
|
|
|
for {
|
|
|
|
var de *DataEntry
|
|
|
|
|
|
|
|
err := dec.Decode(&de)
|
|
|
|
if err == io.EOF {
|
|
|
|
dataFileID := uuid.NewV4().String()
|
|
|
|
if err := d.writeDataFile(ctx, &buf, int64(buf.Len()), dataFileIndex, dataFileID, curDataType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: dataFileID,
|
|
|
|
LastEntryID: lastEntryID,
|
|
|
|
})
|
|
|
|
dataStatus.Files[curDataType] = dataStatusFiles
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataType == "" {
|
|
|
|
curDataType = de.DataType
|
|
|
|
seenDataTypes[de.DataType] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
mustWrite := false
|
|
|
|
mustReset := false
|
|
|
|
if pos > d.maxDataFileSize {
|
|
|
|
mustWrite = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataType != de.DataType {
|
|
|
|
if _, ok := seenDataTypes[de.DataType]; ok {
|
|
|
|
return errors.Errorf("dataType %q already imported", de.DataType)
|
|
|
|
}
|
|
|
|
mustWrite = true
|
|
|
|
mustReset = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if mustWrite {
|
|
|
|
dataFileID := uuid.NewV4().String()
|
|
|
|
if err := d.writeDataFile(ctx, &buf, int64(buf.Len()), dataFileIndex, dataFileID, curDataType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: dataFileID,
|
|
|
|
LastEntryID: lastEntryID,
|
|
|
|
})
|
|
|
|
|
|
|
|
if mustReset {
|
|
|
|
dataStatus.Files[curDataType] = dataStatusFiles
|
|
|
|
|
|
|
|
dataStatusFiles = []*DataStatusFile{}
|
|
|
|
curDataType = de.DataType
|
|
|
|
lastEntryID = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
dataFileIndex = &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
buf = bytes.Buffer{}
|
|
|
|
pos = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if de.ID <= lastEntryID {
|
|
|
|
// entries for the same datatype must be unique and ordered
|
|
|
|
return errors.Errorf("entry id %q is less or equal than previous entry id %q", de.ID, lastEntryID)
|
|
|
|
}
|
|
|
|
lastEntryID = de.ID
|
|
|
|
|
|
|
|
dataEntryj, err := json.Marshal(de)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := buf.Write(dataEntryj); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dataFileIndex.Index[de.ID] = pos
|
|
|
|
pos += int64(len(dataEntryj))
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusj, err := json.Marshal(dataStatus)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := d.ost.WriteObject(d.dataStatusPath(dataSequence.String()), bytes.NewReader(dataStatusj), int64(len(dataStatusj)), true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize etcd providing the specific datastatus
|
|
|
|
if err := d.InitEtcd(ctx, dataStatus); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|