2019-04-26 14:00:03 +00:00
|
|
|
// Copyright 2019 Sorint.lab
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package datamanager
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-10-29 12:23:42 +00:00
|
|
|
"container/ring"
|
2019-04-26 14:00:03 +00:00
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2019-10-29 12:23:42 +00:00
|
|
|
"path"
|
|
|
|
"regexp"
|
2019-06-03 14:17:27 +00:00
|
|
|
"sort"
|
2019-04-26 14:00:03 +00:00
|
|
|
"strings"
|
|
|
|
|
2019-11-08 15:25:53 +00:00
|
|
|
"agola.io/agola/internal/objectstorage"
|
2019-07-01 09:40:20 +00:00
|
|
|
"agola.io/agola/internal/sequence"
|
2019-11-06 12:29:42 +00:00
|
|
|
"agola.io/agola/internal/util"
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
uuid "github.com/satori/go.uuid"
|
2019-05-23 09:23:14 +00:00
|
|
|
errors "golang.org/x/xerrors"
|
2019-04-26 14:00:03 +00:00
|
|
|
)
|
|
|
|
|
2019-11-06 12:29:42 +00:00
|
|
|
// ErrNoDataStatus represent when there's no data status files in the ost
|
|
|
|
var ErrNoDataStatus = errors.New("no data status files")
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
const (
|
|
|
|
DefaultMaxDataFileSize = 10 * 1024 * 1024
|
2019-10-29 12:23:42 +00:00
|
|
|
dataStatusToKeep = 3
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
DataFileRegexp = regexp.MustCompile(`^([a-zA-Z0-9]+-[a-zA-Z0-9]+)-([a-zA-Z0-9-]+)\.(data|index)$`)
|
|
|
|
DataStatusFileRegexp = regexp.MustCompile(`^([a-zA-Z0-9]+-[a-zA-Z0-9]+)\.status$`)
|
2019-06-03 14:17:27 +00:00
|
|
|
)
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
type DataStatus struct {
|
2019-06-03 14:17:27 +00:00
|
|
|
DataSequence string `json:"data_sequence,omitempty"`
|
|
|
|
WalSequence string `json:"wal_sequence,omitempty"`
|
|
|
|
// an entry id ordered list of files for a specific data type (map key)
|
|
|
|
Files map[string][]*DataStatusFile `json:"files,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type DataStatusFile struct {
|
|
|
|
ID string `json:"id,omitempty"`
|
|
|
|
// the last entry id in this file
|
|
|
|
LastEntryID string `json:"last_entry_id,omitempty"`
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type DataFileIndex struct {
|
2019-06-03 14:17:27 +00:00
|
|
|
Index map[string]int64 `json:"index,omitempty"`
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type DataEntry struct {
|
|
|
|
ID string `json:"id,omitempty"`
|
|
|
|
DataType string `json:"data_type,omitempty"`
|
|
|
|
Data []byte `json:"data,omitempty"`
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// TODO(sgotti) this implementation could be heavily optimized to store less data in memory
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
// TODO(sgotti)
|
|
|
|
// split/merge data files at max N bytes (i.e 16MiB) so we'll rewrite only files
|
|
|
|
// with changed data
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// walIndex is a map of dataType of id of walEntry
|
|
|
|
// TODO(sgotti) write this index to local disk (a temporary sqlite lite) instead of storing all in memory
|
|
|
|
type walIndex map[string]walActions
|
|
|
|
|
|
|
|
// walDataEntries is an order by id list of data entries
|
|
|
|
type walActions []*Action
|
|
|
|
|
|
|
|
func (w walActions) Len() int { return len(w) }
|
|
|
|
func (w walActions) Less(i, j int) bool { return w[i].ID < w[j].ID }
|
|
|
|
func (w walActions) Swap(i, j int) { w[i], w[j] = w[j], w[i] }
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
func (d *DataManager) dataFileID(dataSequence *sequence.Sequence, next string) string {
|
|
|
|
return fmt.Sprintf("%s-%s", dataSequence.String(), next)
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
func (d *DataManager) walIndex(ctx context.Context, wals []*WalData) (walIndex, error) {
|
|
|
|
wimap := map[string]map[string]*Action{}
|
|
|
|
|
|
|
|
for _, walData := range wals {
|
2019-11-08 08:44:22 +00:00
|
|
|
header, err := d.ReadWal(walData.WalSequence)
|
2019-06-03 14:17:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
walFile, err := d.ReadWalData(header.WalDataFileID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Errorf("cannot read wal data file %q: %w", header.WalDataFileID, err)
|
|
|
|
}
|
|
|
|
defer walFile.Close()
|
|
|
|
|
2019-11-08 08:44:22 +00:00
|
|
|
dec := json.NewDecoder(walFile)
|
2019-06-03 14:17:27 +00:00
|
|
|
for {
|
|
|
|
var action *Action
|
|
|
|
|
|
|
|
err := dec.Decode(&action)
|
|
|
|
if err == io.EOF {
|
|
|
|
// all done
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Errorf("failed to decode wal file: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := wimap[action.DataType]; !ok {
|
|
|
|
wimap[action.DataType] = map[string]*Action{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// only keep the last action for every entry id
|
|
|
|
wimap[action.DataType][action.ID] = action
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
wi := map[string]walActions{}
|
|
|
|
for dataType, dd := range wimap {
|
|
|
|
for _, de := range dd {
|
|
|
|
wi[dataType] = append(wi[dataType], de)
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
sort.Sort(wi[dataType])
|
|
|
|
}
|
|
|
|
|
|
|
|
return wi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeDataSnapshot will create a new data snapshot merging the uncheckpointed
|
|
|
|
// wals. It will split data files at maxDataFileSize bytes so we'll rewrite only
|
|
|
|
// files with changed data.
|
|
|
|
// Only new files will be created, previous snapshot data files won't be touched
|
|
|
|
//
|
|
|
|
// TODO(sgotti) add a function to merge small data files (i.e after deletions) to avoid fragmentation
|
|
|
|
// TODO(sgotti) add a function to delete old data files keeping only N snapshots
|
|
|
|
func (d *DataManager) writeDataSnapshot(ctx context.Context, wals []*WalData) error {
|
|
|
|
dataSequence, err := sequence.IncSequence(ctx, d.e, etcdCheckpointSeqKey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var lastWalSequence string
|
|
|
|
for _, walData := range wals {
|
|
|
|
lastWalSequence = walData.WalSequence
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatus := &DataStatus{
|
|
|
|
DataSequence: dataSequence.String(),
|
|
|
|
WalSequence: lastWalSequence,
|
2019-06-03 14:17:27 +00:00
|
|
|
Files: make(map[string][]*DataStatusFile),
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
2019-11-05 16:48:11 +00:00
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil && !errors.Is(err, ErrNoDataStatus) {
|
2019-06-03 14:17:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-05 16:48:11 +00:00
|
|
|
startWalIndex := 0
|
|
|
|
if curDataStatus != nil {
|
|
|
|
// skip wals already checkpointed in this data status
|
|
|
|
for i, wal := range wals {
|
|
|
|
if wal.WalSequence <= curDataStatus.WalSequence {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
startWalIndex = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
wals = wals[startWalIndex:]
|
|
|
|
|
|
|
|
wi, err := d.walIndex(ctx, wals)
|
|
|
|
if err != nil {
|
2019-06-03 14:17:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
for _, dataType := range d.dataTypes {
|
2019-06-03 14:17:27 +00:00
|
|
|
var curDataStatusFiles []*DataStatusFile
|
|
|
|
if curDataStatus != nil {
|
|
|
|
curDataStatusFiles = curDataStatus.Files[dataType]
|
|
|
|
}
|
2019-10-29 12:23:42 +00:00
|
|
|
dataStatusFiles, err := d.writeDataType(ctx, wi, dataType, dataSequence, curDataStatusFiles)
|
2019-06-03 14:17:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dataStatus.Files[dataType] = dataStatusFiles
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusj, err := json.Marshal(dataStatus)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-29 12:23:42 +00:00
|
|
|
if err := d.ost.WriteObject(d.dataStatusPath(dataSequence), bytes.NewReader(dataStatusj), int64(len(dataStatusj)), true); err != nil {
|
2019-04-26 14:00:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
func (d *DataManager) writeDataFile(ctx context.Context, buf *bytes.Buffer, size int64, dataFileIndex *DataFileIndex, dataFileID, dataType string) error {
|
|
|
|
if buf.Len() == 0 {
|
|
|
|
return fmt.Errorf("empty data entries")
|
|
|
|
}
|
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
if err := d.ost.WriteObject(d.DataFilePath(dataType, dataFileID), buf, size, true); err != nil {
|
2019-06-03 14:17:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataFileIndexj, err := json.Marshal(dataFileIndex)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-03 15:03:37 +00:00
|
|
|
if err := d.ost.WriteObject(d.DataFileIndexPath(dataType, dataFileID), bytes.NewReader(dataFileIndexj), int64(len(dataFileIndexj)), true); err != nil {
|
2019-04-26 14:00:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
type ActionGroup struct {
|
|
|
|
DataStatusFile *DataStatusFile
|
|
|
|
StartActionIndex int
|
|
|
|
ActionsSize int
|
|
|
|
PreviousDataStatusFiles []*DataStatusFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) actionGroups(ctx context.Context, wi walIndex, dataType string, curDataStatusFiles []*DataStatusFile) ([]*ActionGroup, []*DataStatusFile) {
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
remainingDataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
actionGroups := []*ActionGroup{}
|
|
|
|
|
|
|
|
var startActionIndex int
|
|
|
|
var actionsSize int
|
|
|
|
|
|
|
|
var actionIndex int
|
|
|
|
var curDataStatusFileIndex int
|
|
|
|
for {
|
|
|
|
var action *Action
|
|
|
|
if actionIndex <= len(wi[dataType])-1 {
|
|
|
|
action = wi[dataType][actionIndex]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
var curDataStatusFile *DataStatusFile
|
|
|
|
if curDataStatusFileIndex <= len(curDataStatusFiles)-1 {
|
|
|
|
curDataStatusFile = curDataStatusFiles[curDataStatusFileIndex]
|
|
|
|
}
|
|
|
|
|
|
|
|
if action == nil {
|
|
|
|
if actionsSize > 0 {
|
|
|
|
actionGroup := &ActionGroup{
|
|
|
|
DataStatusFile: curDataStatusFile,
|
|
|
|
StartActionIndex: startActionIndex,
|
|
|
|
ActionsSize: actionsSize,
|
|
|
|
PreviousDataStatusFiles: dataStatusFiles,
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
actionGroups = append(actionGroups, actionGroup)
|
|
|
|
curDataStatusFileIndex++
|
|
|
|
if curDataStatusFileIndex <= len(curDataStatusFiles)-1 {
|
|
|
|
remainingDataStatusFiles = curDataStatusFiles[curDataStatusFileIndex:]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataStatusFile != nil {
|
|
|
|
if curDataStatusFile.LastEntryID >= action.ID || curDataStatusFileIndex == len(curDataStatusFiles)-1 {
|
|
|
|
// continue using this status file
|
|
|
|
actionIndex++
|
|
|
|
actionsSize++
|
|
|
|
} else {
|
|
|
|
// find new status file
|
|
|
|
if actionsSize > 0 {
|
|
|
|
actionGroup := &ActionGroup{
|
|
|
|
DataStatusFile: curDataStatusFile,
|
|
|
|
StartActionIndex: startActionIndex,
|
|
|
|
ActionsSize: actionsSize,
|
|
|
|
PreviousDataStatusFiles: dataStatusFiles,
|
|
|
|
}
|
|
|
|
actionGroups = append(actionGroups, actionGroup)
|
|
|
|
|
|
|
|
startActionIndex = actionIndex
|
|
|
|
actionsSize = 0
|
|
|
|
dataStatusFiles = []*DataStatusFile{}
|
|
|
|
} else {
|
|
|
|
dataStatusFiles = append(dataStatusFiles, curDataStatusFile)
|
|
|
|
}
|
|
|
|
curDataStatusFileIndex++
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
actionIndex++
|
|
|
|
actionsSize++
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return actionGroups, remainingDataStatusFiles
|
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
func (d *DataManager) writeDataType(ctx context.Context, wi walIndex, dataType string, dataSequence *sequence.Sequence, curDataStatusFiles []*DataStatusFile) ([]*DataStatusFile, error) {
|
2019-06-03 14:17:27 +00:00
|
|
|
type SplitPoint struct {
|
|
|
|
pos int64
|
|
|
|
lastEntryID string
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(wi[dataType]) == 0 {
|
|
|
|
// no actions
|
|
|
|
return curDataStatusFiles, nil
|
|
|
|
}
|
|
|
|
actionGroups, remainingDataStatusFiles := d.actionGroups(ctx, wi, dataType, curDataStatusFiles)
|
|
|
|
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
for _, actionGroup := range actionGroups {
|
|
|
|
dataStatusFiles = append(dataStatusFiles, actionGroup.PreviousDataStatusFiles...)
|
|
|
|
|
|
|
|
splitPoints := []SplitPoint{}
|
|
|
|
dataFileIndexes := []*DataFileIndex{}
|
|
|
|
dataFileIndex := &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
dataEntries := []*DataEntry{}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
var pos int64
|
|
|
|
var lastEntryID string
|
|
|
|
|
|
|
|
if actionGroup.DataStatusFile != nil {
|
|
|
|
// TODO(sgotti) instead of reading all entries in memory decode it's contents one by one when needed
|
2019-07-03 15:03:37 +00:00
|
|
|
oldDataf, err := d.ost.ReadObject(d.DataFilePath(dataType, actionGroup.DataStatusFile.ID))
|
2019-11-06 12:29:42 +00:00
|
|
|
if err != nil && !objectstorage.IsNotExist(err) {
|
2019-06-03 14:17:27 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-06 12:29:42 +00:00
|
|
|
if !objectstorage.IsNotExist(err) {
|
2019-06-03 14:17:27 +00:00
|
|
|
dec := json.NewDecoder(oldDataf)
|
|
|
|
for {
|
|
|
|
var de *DataEntry
|
|
|
|
|
|
|
|
err := dec.Decode(&de)
|
|
|
|
if err == io.EOF {
|
|
|
|
// all done
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
oldDataf.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataEntries = append(dataEntries, de)
|
|
|
|
}
|
|
|
|
oldDataf.Close()
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
dataEntryIndex := 0
|
|
|
|
actionIndex := actionGroup.StartActionIndex
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
// iterate over data entries and action in order
|
2019-04-26 14:00:03 +00:00
|
|
|
for {
|
2019-06-03 14:17:27 +00:00
|
|
|
exists := false
|
|
|
|
useAction := false
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
var action *Action
|
2019-06-03 14:17:27 +00:00
|
|
|
if actionIndex < actionGroup.StartActionIndex+actionGroup.ActionsSize {
|
|
|
|
action = wi[dataType][actionIndex]
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
var de *DataEntry
|
|
|
|
if dataEntryIndex <= len(dataEntries)-1 {
|
|
|
|
de = dataEntries[dataEntryIndex]
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
if de == nil && action == nil {
|
|
|
|
break
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
|
|
|
|
if action != nil {
|
|
|
|
if de != nil {
|
|
|
|
if de.ID == action.ID {
|
|
|
|
exists = true
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
if de.ID > action.ID {
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
useAction = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if useAction {
|
|
|
|
de = nil
|
|
|
|
switch action.ActionType {
|
|
|
|
case ActionTypePut:
|
|
|
|
de = &DataEntry{
|
|
|
|
ID: action.ID,
|
|
|
|
DataType: action.DataType,
|
|
|
|
Data: action.Data,
|
|
|
|
}
|
|
|
|
if exists {
|
|
|
|
// replace current data entry with the action data
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
case ActionTypeDelete:
|
|
|
|
if exists {
|
|
|
|
// skip current data entry
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
actionIndex++
|
|
|
|
} else {
|
|
|
|
dataEntryIndex++
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dataEntryIndex++
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
if de != nil {
|
2019-11-06 12:33:01 +00:00
|
|
|
var lastSplitPos int64
|
|
|
|
if len(splitPoints) > 0 {
|
|
|
|
lastSplitPos = splitPoints[len(splitPoints)-1].pos
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
lastEntryID = de.ID
|
|
|
|
dataEntryj, err := json.Marshal(de)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := buf.Write(dataEntryj); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-06 12:33:01 +00:00
|
|
|
dataFileIndex.Index[de.ID] = pos - lastSplitPos
|
2019-06-03 14:17:27 +00:00
|
|
|
prevPos := pos
|
|
|
|
pos += int64(len(dataEntryj))
|
|
|
|
if pos-lastSplitPos > d.maxDataFileSize {
|
|
|
|
// add split point only if it's different (less) than the previous one
|
|
|
|
if lastSplitPos < prevPos {
|
|
|
|
splitPoints = append(splitPoints, SplitPoint{pos: int64(buf.Len()), lastEntryID: lastEntryID})
|
|
|
|
dataFileIndexes = append(dataFileIndexes, dataFileIndex)
|
|
|
|
dataFileIndex = &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 12:33:01 +00:00
|
|
|
// save data
|
2019-06-03 14:17:27 +00:00
|
|
|
if buf.Len() != 0 {
|
|
|
|
var curPos int64
|
|
|
|
var lastSplitPos int64
|
|
|
|
if len(splitPoints) > 0 {
|
|
|
|
lastSplitPos = splitPoints[len(splitPoints)-1].pos
|
|
|
|
}
|
|
|
|
// add final split point if there's something left in the buffer
|
|
|
|
if lastSplitPos != int64(buf.Len()) {
|
|
|
|
splitPoints = append(splitPoints, SplitPoint{pos: int64(buf.Len()), lastEntryID: lastEntryID})
|
|
|
|
}
|
|
|
|
dataFileIndexes = append(dataFileIndexes, dataFileIndex)
|
|
|
|
for i, sp := range splitPoints {
|
2019-10-29 12:23:42 +00:00
|
|
|
curDataFileID := d.dataFileID(dataSequence, uuid.NewV4().String())
|
2019-06-03 14:17:27 +00:00
|
|
|
if err := d.writeDataFile(ctx, &buf, sp.pos-curPos, dataFileIndexes[i], curDataFileID, dataType); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// insert new dataStatusFile
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: curDataFileID,
|
|
|
|
LastEntryID: sp.lastEntryID,
|
|
|
|
})
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
curPos = sp.pos
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
dataStatusFiles = append(dataStatusFiles, remainingDataStatusFiles...)
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-06-03 14:17:27 +00:00
|
|
|
return dataStatusFiles, nil
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) Read(dataType, id string) (io.Reader, error) {
|
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-03 14:17:27 +00:00
|
|
|
curFiles := curDataStatus.Files
|
|
|
|
|
|
|
|
var matchingDataFileID string
|
|
|
|
// get the matching data file for the action entry ID
|
|
|
|
if len(curFiles[dataType]) == 0 {
|
2019-11-06 12:29:42 +00:00
|
|
|
return nil, util.NewErrNotExist(errors.Errorf("datatype %q doesn't exists", dataType))
|
2019-06-03 14:17:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
matchingDataFileID = curFiles[dataType][0].ID
|
|
|
|
for _, dataStatusFile := range curFiles[dataType] {
|
2019-11-06 12:33:01 +00:00
|
|
|
if dataStatusFile.LastEntryID >= id {
|
2019-06-03 14:17:27 +00:00
|
|
|
matchingDataFileID = dataStatusFile.ID
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
dataFileIndexf, err := d.ost.ReadObject(d.DataFileIndexPath(dataType, matchingDataFileID))
|
2019-04-26 14:00:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var dataFileIndex *DataFileIndex
|
|
|
|
dec := json.NewDecoder(dataFileIndexf)
|
|
|
|
err = dec.Decode(&dataFileIndex)
|
|
|
|
if err != nil {
|
|
|
|
dataFileIndexf.Close()
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
dataFileIndexf.Close()
|
|
|
|
|
|
|
|
pos, ok := dataFileIndex.Index[id]
|
|
|
|
if !ok {
|
2019-11-06 12:29:42 +00:00
|
|
|
return nil, util.NewErrNotExist(errors.Errorf("datatype %q, id %q doesn't exists", dataType, id))
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
dataf, err := d.ost.ReadObject(d.DataFilePath(dataType, matchingDataFileID))
|
2019-04-26 14:00:03 +00:00
|
|
|
if err != nil {
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
if _, err := dataf.Seek(int64(pos), io.SeekStart); err != nil {
|
|
|
|
dataf.Close()
|
2019-05-23 09:23:14 +00:00
|
|
|
return nil, err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
var de *DataEntry
|
|
|
|
dec = json.NewDecoder(dataf)
|
|
|
|
if err := dec.Decode(&de); err != nil {
|
|
|
|
dataf.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dataf.Close()
|
|
|
|
|
|
|
|
return bytes.NewReader(de.Data), nil
|
|
|
|
}
|
|
|
|
|
2019-11-08 09:10:56 +00:00
|
|
|
func (d *DataManager) GetFirstDataStatusSequences(n int) ([]*sequence.Sequence, error) {
|
|
|
|
if n < 1 {
|
|
|
|
return nil, errors.Errorf("n must be greater than 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusSequences := []*sequence.Sequence{}
|
|
|
|
c := 0
|
|
|
|
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
defer close(doneCh)
|
|
|
|
for object := range d.ost.List(d.storageDataDir()+"/", "", false, doneCh) {
|
|
|
|
if object.Err != nil {
|
|
|
|
return nil, object.Err
|
|
|
|
}
|
|
|
|
if m := DataStatusFileRegexp.FindStringSubmatch(path.Base(object.Path)); m != nil {
|
|
|
|
seq, err := sequence.Parse(m[1])
|
|
|
|
if err != nil {
|
|
|
|
d.log.Warnf("cannot parse sequence for data status file %q", object.Path)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dataStatusSequences = append(dataStatusSequences, seq)
|
|
|
|
c++
|
|
|
|
} else {
|
|
|
|
d.log.Warnf("bad file %q found in storage data dir", object.Path)
|
|
|
|
}
|
|
|
|
if c >= n {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(dataStatusSequences) == 0 {
|
2019-11-06 12:29:42 +00:00
|
|
|
return nil, ErrNoDataStatus
|
2019-11-08 09:10:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return dataStatusSequences, nil
|
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
func (d *DataManager) GetLastDataStatusSequences(n int) ([]*sequence.Sequence, error) {
|
|
|
|
if n < 1 {
|
|
|
|
return nil, errors.Errorf("n must be greater than 0")
|
|
|
|
}
|
|
|
|
r := ring.New(n)
|
|
|
|
re := r
|
|
|
|
|
2019-04-26 14:00:03 +00:00
|
|
|
doneCh := make(chan struct{})
|
|
|
|
defer close(doneCh)
|
|
|
|
|
2019-07-03 15:03:37 +00:00
|
|
|
for object := range d.ost.List(d.storageDataDir()+"/", "", false, doneCh) {
|
2019-04-26 14:00:03 +00:00
|
|
|
if object.Err != nil {
|
2019-10-29 12:23:42 +00:00
|
|
|
return nil, object.Err
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
2019-10-29 12:23:42 +00:00
|
|
|
if m := DataStatusFileRegexp.FindStringSubmatch(path.Base(object.Path)); m != nil {
|
|
|
|
seq, err := sequence.Parse(m[1])
|
|
|
|
if err != nil {
|
|
|
|
d.log.Warnf("cannot parse sequence for data status file %q", object.Path)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
re.Value = seq
|
|
|
|
re = re.Next()
|
|
|
|
} else {
|
|
|
|
d.log.Warnf("bad file %q found in storage data dir", object.Path)
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
dataStatusSequences := []*sequence.Sequence{}
|
|
|
|
re.Do(func(x interface{}) {
|
|
|
|
if x != nil {
|
|
|
|
dataStatusSequences = append([]*sequence.Sequence{x.(*sequence.Sequence)}, dataStatusSequences...)
|
|
|
|
}
|
|
|
|
})
|
2019-04-26 14:00:03 +00:00
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
if len(dataStatusSequences) == 0 {
|
2019-11-06 12:29:42 +00:00
|
|
|
return nil, ErrNoDataStatus
|
2019-04-26 14:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
return dataStatusSequences, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) GetDataStatus(dataSequence *sequence.Sequence) (*DataStatus, error) {
|
|
|
|
dataStatusf, err := d.ost.ReadObject(d.dataStatusPath(dataSequence))
|
2019-04-26 14:00:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer dataStatusf.Close()
|
|
|
|
var dataStatus *DataStatus
|
|
|
|
dec := json.NewDecoder(dataStatusf)
|
|
|
|
|
|
|
|
return dataStatus, dec.Decode(&dataStatus)
|
|
|
|
}
|
2019-07-17 15:16:35 +00:00
|
|
|
|
2019-11-08 09:10:56 +00:00
|
|
|
func (d *DataManager) GetFirstDataStatusSequence() (*sequence.Sequence, error) {
|
|
|
|
dataStatusSequences, err := d.GetFirstDataStatusSequences(1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dataStatusSequences[0], nil
|
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
func (d *DataManager) GetLastDataStatusSequence() (*sequence.Sequence, error) {
|
|
|
|
dataStatusSequences, err := d.GetLastDataStatusSequences(1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dataStatusSequences[0], nil
|
|
|
|
}
|
|
|
|
|
2019-11-08 09:10:56 +00:00
|
|
|
func (d *DataManager) GetFirstDataStatus() (*DataStatus, error) {
|
|
|
|
dataStatusSequence, err := d.GetFirstDataStatusSequence()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return d.GetDataStatus(dataStatusSequence)
|
|
|
|
}
|
|
|
|
|
2019-10-29 12:23:42 +00:00
|
|
|
func (d *DataManager) GetLastDataStatus() (*DataStatus, error) {
|
|
|
|
dataStatusSequence, err := d.GetLastDataStatusSequence()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return d.GetDataStatus(dataStatusSequence)
|
|
|
|
}
|
|
|
|
|
2019-07-17 15:16:35 +00:00
|
|
|
func (d *DataManager) Export(ctx context.Context, w io.Writer) error {
|
|
|
|
if err := d.checkpoint(ctx, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
curDataStatus, err := d.GetLastDataStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, dataType := range d.dataTypes {
|
|
|
|
var curDataStatusFiles []*DataStatusFile
|
|
|
|
if curDataStatus != nil {
|
|
|
|
curDataStatusFiles = curDataStatus.Files[dataType]
|
|
|
|
}
|
|
|
|
for _, dsf := range curDataStatusFiles {
|
|
|
|
dataf, err := d.ost.ReadObject(d.DataFilePath(dataType, dsf.ID))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(w, dataf); err != nil {
|
|
|
|
dataf.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataf.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) Import(ctx context.Context, r io.Reader) error {
|
|
|
|
// delete contents in etcd
|
|
|
|
if err := d.deleteEtcd(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// we require all entries of the same datatypes grouped together
|
|
|
|
seenDataTypes := map[string]struct{}{}
|
|
|
|
|
|
|
|
// create a new sequence, we assume that it'll be greater than previous data sequences
|
|
|
|
dataSequence, err := sequence.IncSequence(ctx, d.e, etcdCheckpointSeqKey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatus := &DataStatus{
|
|
|
|
DataSequence: dataSequence.String(),
|
|
|
|
// no last wal sequence on import
|
|
|
|
WalSequence: "",
|
|
|
|
Files: make(map[string][]*DataStatusFile),
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles := []*DataStatusFile{}
|
|
|
|
|
|
|
|
var lastEntryID string
|
|
|
|
var curDataType string
|
|
|
|
var buf bytes.Buffer
|
|
|
|
var pos int64
|
|
|
|
dataFileIndex := &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
dec := json.NewDecoder(r)
|
|
|
|
|
|
|
|
for {
|
|
|
|
var de *DataEntry
|
|
|
|
|
|
|
|
err := dec.Decode(&de)
|
|
|
|
if err == io.EOF {
|
2019-10-29 12:23:42 +00:00
|
|
|
dataFileID := d.dataFileID(dataSequence, uuid.NewV4().String())
|
2019-07-17 15:16:35 +00:00
|
|
|
if err := d.writeDataFile(ctx, &buf, int64(buf.Len()), dataFileIndex, dataFileID, curDataType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: dataFileID,
|
|
|
|
LastEntryID: lastEntryID,
|
|
|
|
})
|
|
|
|
dataStatus.Files[curDataType] = dataStatusFiles
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataType == "" {
|
|
|
|
curDataType = de.DataType
|
|
|
|
seenDataTypes[de.DataType] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
mustWrite := false
|
|
|
|
mustReset := false
|
|
|
|
if pos > d.maxDataFileSize {
|
|
|
|
mustWrite = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if curDataType != de.DataType {
|
|
|
|
if _, ok := seenDataTypes[de.DataType]; ok {
|
|
|
|
return errors.Errorf("dataType %q already imported", de.DataType)
|
|
|
|
}
|
|
|
|
mustWrite = true
|
|
|
|
mustReset = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if mustWrite {
|
2019-10-29 12:23:42 +00:00
|
|
|
dataFileID := d.dataFileID(dataSequence, uuid.NewV4().String())
|
2019-07-17 15:16:35 +00:00
|
|
|
if err := d.writeDataFile(ctx, &buf, int64(buf.Len()), dataFileIndex, dataFileID, curDataType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusFiles = append(dataStatusFiles, &DataStatusFile{
|
|
|
|
ID: dataFileID,
|
|
|
|
LastEntryID: lastEntryID,
|
|
|
|
})
|
|
|
|
|
|
|
|
if mustReset {
|
|
|
|
dataStatus.Files[curDataType] = dataStatusFiles
|
|
|
|
|
|
|
|
dataStatusFiles = []*DataStatusFile{}
|
|
|
|
curDataType = de.DataType
|
|
|
|
lastEntryID = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
dataFileIndex = &DataFileIndex{
|
|
|
|
Index: make(map[string]int64),
|
|
|
|
}
|
|
|
|
buf = bytes.Buffer{}
|
|
|
|
pos = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if de.ID <= lastEntryID {
|
|
|
|
// entries for the same datatype must be unique and ordered
|
|
|
|
return errors.Errorf("entry id %q is less or equal than previous entry id %q", de.ID, lastEntryID)
|
|
|
|
}
|
|
|
|
lastEntryID = de.ID
|
|
|
|
|
|
|
|
dataEntryj, err := json.Marshal(de)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := buf.Write(dataEntryj); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dataFileIndex.Index[de.ID] = pos
|
|
|
|
pos += int64(len(dataEntryj))
|
|
|
|
}
|
|
|
|
|
|
|
|
dataStatusj, err := json.Marshal(dataStatus)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-29 12:23:42 +00:00
|
|
|
if err := d.ost.WriteObject(d.dataStatusPath(dataSequence), bytes.NewReader(dataStatusj), int64(len(dataStatusj)), true); err != nil {
|
2019-07-17 15:16:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize etcd providing the specific datastatus
|
|
|
|
if err := d.InitEtcd(ctx, dataStatus); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-29 12:23:42 +00:00
|
|
|
|
|
|
|
func (d *DataManager) CleanOldCheckpoints(ctx context.Context) error {
|
|
|
|
dataStatusSequences, err := d.GetLastDataStatusSequences(dataStatusToKeep)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return d.cleanOldCheckpoints(ctx, dataStatusSequences)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DataManager) cleanOldCheckpoints(ctx context.Context, dataStatusSequences []*sequence.Sequence) error {
|
|
|
|
if len(dataStatusSequences) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
lastDataStatusSequence := dataStatusSequences[0]
|
|
|
|
|
|
|
|
// Remove old data status paths
|
|
|
|
if len(dataStatusSequences) >= dataStatusToKeep {
|
|
|
|
dataStatusPathsMap := map[string]struct{}{}
|
|
|
|
for _, seq := range dataStatusSequences {
|
|
|
|
dataStatusPathsMap[d.dataStatusPath(seq)] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
defer close(doneCh)
|
|
|
|
for object := range d.ost.List(d.storageDataDir()+"/", "", false, doneCh) {
|
|
|
|
if object.Err != nil {
|
|
|
|
return object.Err
|
|
|
|
}
|
|
|
|
|
|
|
|
skip := false
|
|
|
|
if m := DataStatusFileRegexp.FindStringSubmatch(path.Base(object.Path)); m != nil {
|
|
|
|
seq, err := sequence.Parse(m[1])
|
|
|
|
if err == nil && seq.String() > lastDataStatusSequence.String() {
|
|
|
|
d.log.Infof("skipping file %q since its sequence is greater than %q", object.Path, lastDataStatusSequence)
|
|
|
|
skip = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if skip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := dataStatusPathsMap[object.Path]; !ok {
|
|
|
|
d.log.Infof("removing %q", object.Path)
|
|
|
|
if err := d.ost.DeleteObject(object.Path); err != nil {
|
2019-11-06 12:29:42 +00:00
|
|
|
if !objectstorage.IsNotExist(err) {
|
2019-10-29 12:23:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A list of files to keep
|
|
|
|
files := map[string]struct{}{}
|
|
|
|
|
|
|
|
for _, dataStatusSequence := range dataStatusSequences {
|
|
|
|
dataStatus, err := d.GetDataStatus(dataStatusSequence)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for dataType := range dataStatus.Files {
|
|
|
|
for _, file := range dataStatus.Files[dataType] {
|
|
|
|
files[d.DataFileBasePath(dataType, file.ID)] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
defer close(doneCh)
|
|
|
|
|
|
|
|
for object := range d.ost.List(d.storageDataDir()+"/", "", true, doneCh) {
|
|
|
|
if object.Err != nil {
|
|
|
|
return object.Err
|
|
|
|
}
|
|
|
|
|
|
|
|
p := object.Path
|
|
|
|
// object file relative to the storageDataDir
|
|
|
|
pr := strings.TrimPrefix(p, d.storageDataDir()+"/")
|
|
|
|
// object file full path without final extension
|
|
|
|
pne := strings.TrimSuffix(p, path.Ext(p))
|
|
|
|
// object file base name
|
|
|
|
pb := path.Base(p)
|
|
|
|
|
|
|
|
// skip status files
|
|
|
|
if !strings.Contains(pr, "/") && strings.HasSuffix(pr, ".status") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip data files with a sequence greater than the last known sequence.
|
|
|
|
// this is to avoid possible conditions where there's a Clean concurrent
|
|
|
|
// with a running Checkpoint (also if protect by etcd locks, they cannot
|
|
|
|
// enforce these kind of operations that are acting on resources
|
|
|
|
// external to etcd during network errors) that will remove the objects
|
|
|
|
// created by this checkpoint since the data status file doesn't yet
|
|
|
|
// exist.
|
|
|
|
skip := false
|
|
|
|
// extract the data sequence from the object name
|
|
|
|
if m := DataFileRegexp.FindStringSubmatch(pb); m != nil {
|
|
|
|
seq, err := sequence.Parse(m[1])
|
|
|
|
if err == nil && seq.String() > lastDataStatusSequence.String() {
|
|
|
|
d.log.Infof("skipping file %q since its sequence is greater than %q", p, lastDataStatusSequence)
|
|
|
|
skip = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if skip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := files[pne]; !ok {
|
|
|
|
d.log.Infof("removing %q", object.Path)
|
|
|
|
if err := d.ost.DeleteObject(object.Path); err != nil {
|
2019-11-06 12:29:42 +00:00
|
|
|
if !objectstorage.IsNotExist(err) {
|
2019-10-29 12:23:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|