From 60aaf19d156edc31d7a9a054e8d05cc14227b643 Mon Sep 17 00:00:00 2001 From: Tai Groot Date: Tue, 1 Feb 2022 19:06:30 -0800 Subject: [PATCH] Add all files again with v2 postfix to URL --- v2/bitcask.go | 1060 +++++++++++ v2/bitcask_test.go | 2281 ++++++++++++++++++++++++ v2/doc.go | 3 + v2/doc_test.go | 13 + v2/errors.go | 77 + v2/go.mod | 26 + v2/go.sum | 668 +++++++ v2/internal/config/config.go | 51 + v2/internal/data/codec/decoder.go | 110 ++ v2/internal/data/codec/decoder_test.go | 130 ++ v2/internal/data/codec/encoder.go | 69 + v2/internal/data/codec/encoder_test.go | 32 + v2/internal/data/datafile.go | 200 +++ v2/internal/data/recover.go | 95 + v2/internal/entry.go | 27 + v2/internal/index/codec_index.go | 134 ++ v2/internal/index/codec_index_test.go | 126 ++ v2/internal/index/index.go | 59 + v2/internal/index/ttl_index.go | 71 + v2/internal/index/ttl_index_test.go | 54 + v2/internal/item.go | 10 + v2/internal/metadata/metadata.go | 22 + v2/internal/mocks/datafile.go | 158 ++ v2/internal/mocks/indexer.go | 56 + v2/internal/utils.go | 112 ++ v2/internal/utils_test.go | 108 ++ v2/internal/version.go | 18 + v2/internal/version_test.go | 15 + v2/options.go | 118 ++ v2/scripts/migrations/v0_to_v1.go | 159 ++ v2/scripts/migrations/v0_to_v1_test.go | 58 + 31 files changed, 6120 insertions(+) create mode 100644 v2/bitcask.go create mode 100644 v2/bitcask_test.go create mode 100644 v2/doc.go create mode 100644 v2/doc_test.go create mode 100644 v2/errors.go create mode 100644 v2/go.mod create mode 100644 v2/go.sum create mode 100644 v2/internal/config/config.go create mode 100644 v2/internal/data/codec/decoder.go create mode 100644 v2/internal/data/codec/decoder_test.go create mode 100644 v2/internal/data/codec/encoder.go create mode 100644 v2/internal/data/codec/encoder_test.go create mode 100644 v2/internal/data/datafile.go create mode 100644 v2/internal/data/recover.go create mode 100644 v2/internal/entry.go create mode 100644 v2/internal/index/codec_index.go create mode 100644 v2/internal/index/codec_index_test.go create mode 100644 v2/internal/index/index.go create mode 100644 v2/internal/index/ttl_index.go create mode 100644 v2/internal/index/ttl_index_test.go create mode 100644 v2/internal/item.go create mode 100644 v2/internal/metadata/metadata.go create mode 100644 v2/internal/mocks/datafile.go create mode 100644 v2/internal/mocks/indexer.go create mode 100644 v2/internal/utils.go create mode 100644 v2/internal/utils_test.go create mode 100644 v2/internal/version.go create mode 100644 v2/internal/version_test.go create mode 100644 v2/options.go create mode 100644 v2/scripts/migrations/v0_to_v1.go create mode 100644 v2/scripts/migrations/v0_to_v1_test.go diff --git a/v2/bitcask.go b/v2/bitcask.go new file mode 100644 index 0000000..481f789 --- /dev/null +++ b/v2/bitcask.go @@ -0,0 +1,1060 @@ +package bitcask + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/abcum/lcp" + "github.com/gofrs/flock" + art "github.com/plar/go-adaptive-radix-tree" + log "github.com/sirupsen/logrus" + + "git.mills.io/prologic/bitcask/v2/internal" + "git.mills.io/prologic/bitcask/v2/internal/config" + "git.mills.io/prologic/bitcask/v2/internal/data" + "git.mills.io/prologic/bitcask/v2/internal/data/codec" + "git.mills.io/prologic/bitcask/v2/internal/index" + "git.mills.io/prologic/bitcask/v2/internal/metadata" + "git.mills.io/prologic/bitcask/v2/scripts/migrations" +) + +const ( + lockfile = "lock" + ttlIndexFile = "ttl_index" +) + +// Bitcask is a struct that represents a on-disk LSM and WAL data structure +// and in-memory hash of key/value pairs as per the Bitcask paper and seen +// in the Riak database. +type Bitcask struct { + mu sync.RWMutex + flock *flock.Flock + config *config.Config + options []Option + path string + curr data.Datafile + datafiles map[int]data.Datafile + trie art.Tree + indexer index.Indexer + ttlIndexer index.Indexer + ttlIndex art.Tree + metadata *metadata.MetaData + isMerging bool +} + +// Stats is a struct returned by Stats() on an open Bitcask instance +type Stats struct { + Datafiles int + Keys int + Size int64 +} + +// Stats returns statistics about the database including the number of +// data files, keys and overall size on disk of the data +func (b *Bitcask) Stats() (stats Stats, err error) { + if stats.Size, err = internal.DirSize(b.path); err != nil { + return + } + + b.mu.RLock() + stats.Datafiles = len(b.datafiles) + stats.Keys = b.trie.Size() + b.mu.RUnlock() + + return +} + +// Close closes the database and removes the lock. It is important to call +// Close() as this is the only way to cleanup the lock held by the open +// database. +func (b *Bitcask) Close() error { + b.mu.RLock() + defer func() { + b.mu.RUnlock() + b.flock.Unlock() + }() + + return b.close() +} + +func (b *Bitcask) close() error { + if err := b.saveIndexes(); err != nil { + return err + } + + b.metadata.IndexUpToDate = true + if err := b.saveMetadata(); err != nil { + return err + } + + for _, df := range b.datafiles { + if err := df.Close(); err != nil { + return err + } + } + + return b.curr.Close() +} + +// Sync flushes all buffers to disk ensuring all data is written +func (b *Bitcask) Sync() error { + b.mu.RLock() + defer b.mu.RUnlock() + + if err := b.saveMetadata(); err != nil { + return err + } + + return b.curr.Sync() +} + +// Get fetches value for a key +func (b *Bitcask) Get(key []byte) ([]byte, error) { + b.mu.RLock() + defer b.mu.RUnlock() + e, err := b.get(key) + if err != nil { + return nil, err + } + return e.Value, nil +} + +// Has returns true if the key exists in the database, false otherwise. +func (b *Bitcask) Has(key []byte) bool { + b.mu.RLock() + defer b.mu.RUnlock() + _, found := b.trie.Search(key) + if found { + return !b.isExpired(key) + } + return found +} + +// Put stores the key and value in the database. +func (b *Bitcask) Put(key, value []byte) error { + if len(key) == 0 { + return ErrEmptyKey + } + if b.config.MaxKeySize > 0 && uint32(len(key)) > b.config.MaxKeySize { + return ErrKeyTooLarge + } + if b.config.MaxValueSize > 0 && uint64(len(value)) > b.config.MaxValueSize { + return ErrValueTooLarge + } + + b.mu.Lock() + defer b.mu.Unlock() + offset, n, err := b.put(key, value) + if err != nil { + return err + } + + if b.config.Sync { + if err := b.curr.Sync(); err != nil { + return err + } + } + + // in case of successful `put`, IndexUpToDate will be always be false + b.metadata.IndexUpToDate = false + + if oldItem, found := b.trie.Search(key); found { + b.metadata.ReclaimableSpace += oldItem.(internal.Item).Size + } + + item := internal.Item{FileID: b.curr.FileID(), Offset: offset, Size: n} + b.trie.Insert(key, item) + + return nil +} + +// PutWithTTL stores the key and value in the database with the given TTL +func (b *Bitcask) PutWithTTL(key, value []byte, ttl time.Duration) error { + if len(key) == 0 { + return ErrEmptyKey + } + if b.config.MaxKeySize > 0 && uint32(len(key)) > b.config.MaxKeySize { + return ErrKeyTooLarge + } + if b.config.MaxValueSize > 0 && uint64(len(value)) > b.config.MaxValueSize { + return ErrValueTooLarge + } + + expiry := time.Now().Add(ttl) + + b.mu.Lock() + defer b.mu.Unlock() + offset, n, err := b.putWithExpiry(key, value, expiry) + if err != nil { + return err + } + + if b.config.Sync { + if err := b.curr.Sync(); err != nil { + return err + } + } + + // in case of successful `put`, IndexUpToDate will be always be false + b.metadata.IndexUpToDate = false + + if oldItem, found := b.trie.Search(key); found { + b.metadata.ReclaimableSpace += oldItem.(internal.Item).Size + } + + item := internal.Item{FileID: b.curr.FileID(), Offset: offset, Size: n} + b.trie.Insert(key, item) + b.ttlIndex.Insert(key, expiry) + + return nil +} + +// Delete deletes the named key. +func (b *Bitcask) Delete(key []byte) error { + b.mu.Lock() + defer b.mu.Unlock() + return b.delete(key) +} + +// delete deletes the named key. If the key doesn't exist or an I/O error +// occurs the error is returned. +func (b *Bitcask) delete(key []byte) error { + _, _, err := b.put(key, []byte{}) + if err != nil { + return err + } + if item, found := b.trie.Search(key); found { + b.metadata.ReclaimableSpace += item.(internal.Item).Size + codec.MetaInfoSize + int64(len(key)) + } + b.trie.Delete(key) + b.ttlIndex.Delete(key) + + return nil +} + +// Sift iterates over all keys in the database calling the function `f` for +// each key. If the KV pair is expired or the function returns true, that key is +// deleted from the database. +// If the function returns an error on any key, no further keys are processed, no +// keys are deleted, and the first error is returned. +func (b *Bitcask) Sift(f func(key []byte) (bool, error)) (err error) { + keysToDelete := art.New() + + b.mu.RLock() + b.trie.ForEach(func(node art.Node) bool { + if b.isExpired(node.Key()) { + keysToDelete.Insert(node.Key(), true) + return true + } + var shouldDelete bool + if shouldDelete, err = f(node.Key()); err != nil { + return false + } else if shouldDelete { + keysToDelete.Insert(node.Key(), true) + } + return true + }) + b.mu.RUnlock() + + b.mu.Lock() + defer b.mu.Unlock() + keysToDelete.ForEach(func(node art.Node) (cont bool) { + b.delete(node.Key()) + return true + }) + return +} + +// DeleteAll deletes all the keys. If an I/O error occurs the error is returned. +func (b *Bitcask) DeleteAll() (err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + b.trie.ForEach(func(node art.Node) bool { + _, _, err = b.put(node.Key(), []byte{}) + if err != nil { + return false + } + item, _ := b.trie.Search(node.Key()) + b.metadata.ReclaimableSpace += item.(internal.Item).Size + codec.MetaInfoSize + int64(len(node.Key())) + return true + }) + b.trie = art.New() + b.ttlIndex = art.New() + + return +} + +// Scan performs a prefix scan of keys matching the given prefix and calling +// the function `f` with the keys found. If the function returns an error +// no further keys are processed and the first error is returned. +func (b *Bitcask) Scan(prefix []byte, f func(key []byte) error) (err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + b.trie.ForEachPrefix(prefix, func(node art.Node) bool { + // Skip the root node + if len(node.Key()) == 0 { + return true + } + + if err = f(node.Key()); err != nil { + return false + } + return true + }) + return +} + +// SiftScan iterates over all keys in the database beginning with the given +// prefix, calling the function `f` for each key. If the KV pair is expired or +// the function returns true, that key is deleted from the database. +// If the function returns an error on any key, no further keys are processed, +// no keys are deleted, and the first error is returned. +func (b *Bitcask) SiftScan(prefix []byte, f func(key []byte) (bool, error)) (err error) { + keysToDelete := art.New() + + b.mu.RLock() + b.trie.ForEachPrefix(prefix, func(node art.Node) bool { + // Skip the root node + if len(node.Key()) == 0 { + return true + } + if b.isExpired(node.Key()) { + keysToDelete.Insert(node.Key(), true) + return true + } + var shouldDelete bool + if shouldDelete, err = f(node.Key()); err != nil { + return false + } else if shouldDelete { + keysToDelete.Insert(node.Key(), true) + } + return true + }) + b.mu.RUnlock() + + b.mu.Lock() + defer b.mu.Unlock() + keysToDelete.ForEach(func(node art.Node) (cont bool) { + b.delete(node.Key()) + return true + }) + return +} + +// Range performs a range scan of keys matching a range of keys between the +// start key and end key and calling the function `f` with the keys found. +// If the function returns an error no further keys are processed and the +// first error returned. +func (b *Bitcask) Range(start, end []byte, f func(key []byte) error) (err error) { + if bytes.Compare(start, end) == 1 { + return ErrInvalidRange + } + + commonPrefix := lcp.LCP(start, end) + if commonPrefix == nil { + return ErrInvalidRange + } + + b.mu.RLock() + defer b.mu.RUnlock() + + b.trie.ForEachPrefix(commonPrefix, func(node art.Node) bool { + if bytes.Compare(node.Key(), start) >= 0 && bytes.Compare(node.Key(), end) <= 0 { + if err = f(node.Key()); err != nil { + return false + } + return true + } else if bytes.Compare(node.Key(), start) >= 0 && bytes.Compare(node.Key(), end) > 0 { + return false + } + return true + }) + return +} + +// SiftRange performs a range scan of keys matching a range of keys between the +// start key and end key and calling the function `f` with the keys found. +// If the KV pair is expired or the function returns true, that key is deleted +// from the database. +// If the function returns an error on any key, no further keys are processed, no +// keys are deleted, and the first error is returned. +func (b *Bitcask) SiftRange(start, end []byte, f func(key []byte) (bool, error)) (err error) { + if bytes.Compare(start, end) == 1 { + return ErrInvalidRange + } + + commonPrefix := lcp.LCP(start, end) + if commonPrefix == nil { + return ErrInvalidRange + } + + keysToDelete := art.New() + + b.mu.RLock() + b.trie.ForEachPrefix(commonPrefix, func(node art.Node) bool { + if bytes.Compare(node.Key(), start) >= 0 && bytes.Compare(node.Key(), end) <= 0 { + if b.isExpired(node.Key()) { + keysToDelete.Insert(node.Key(), true) + return true + } + var shouldDelete bool + if shouldDelete, err = f(node.Key()); err != nil { + return false + } else if shouldDelete { + keysToDelete.Insert(node.Key(), true) + } + return true + } else if bytes.Compare(node.Key(), start) >= 0 && bytes.Compare(node.Key(), end) > 0 { + return false + } + return true + }) + b.mu.RUnlock() + + b.mu.Lock() + defer b.mu.Unlock() + + keysToDelete.ForEach(func(node art.Node) (cont bool) { + b.delete(node.Key()) + return true + }) + + return +} + +// Len returns the total number of keys in the database +func (b *Bitcask) Len() int { + b.mu.RLock() + defer b.mu.RUnlock() + return b.trie.Size() +} + +// Keys returns all keys in the database as a channel of keys +func (b *Bitcask) Keys() chan []byte { + ch := make(chan []byte) + go func() { + b.mu.RLock() + defer b.mu.RUnlock() + + for it := b.trie.Iterator(); it.HasNext(); { + node, _ := it.Next() + if b.isExpired(node.Key()) { + continue + } + ch <- node.Key() + } + close(ch) + }() + + return ch +} + +// RunGC deletes all expired keys +func (b *Bitcask) RunGC() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.runGC() +} + +// runGC deletes all keys that are expired +// caller function should take care of the locking when calling this method +func (b *Bitcask) runGC() (err error) { + keysToDelete := art.New() + + b.ttlIndex.ForEach(func(node art.Node) (cont bool) { + if !b.isExpired(node.Key()) { + // later, return false here when the ttlIndex is sorted + return true + } + keysToDelete.Insert(node.Key(), true) + //keysToDelete = append(keysToDelete, node.Key()) + return true + }) + + keysToDelete.ForEach(func(node art.Node) (cont bool) { + b.delete(node.Key()) + return true + }) + + return nil +} + +// Fold iterates over all keys in the database calling the function `f` for +// each key. If the function returns an error, no further keys are processed +// and the error is returned. +func (b *Bitcask) Fold(f func(key []byte) error) (err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + b.trie.ForEach(func(node art.Node) bool { + if err = f(node.Key()); err != nil { + return false + } + return true + }) + + return +} + +// get retrieves the value of the given key +func (b *Bitcask) get(key []byte) (internal.Entry, error) { + var df data.Datafile + + value, found := b.trie.Search(key) + if !found { + return internal.Entry{}, ErrKeyNotFound + } + if b.isExpired(key) { + return internal.Entry{}, ErrKeyExpired + } + + item := value.(internal.Item) + + if item.FileID == b.curr.FileID() { + df = b.curr + } else { + df = b.datafiles[item.FileID] + } + + e, err := df.ReadAt(item.Offset, item.Size) + if err != nil { + return internal.Entry{}, err + } + + checksum := crc32.ChecksumIEEE(e.Value) + if checksum != e.Checksum { + return internal.Entry{}, ErrChecksumFailed + } + + return e, nil +} + +func (b *Bitcask) maybeRotate() error { + size := b.curr.Size() + if size < int64(b.config.MaxDatafileSize) { + return nil + } + + err := b.curr.Close() + if err != nil { + return err + } + + id := b.curr.FileID() + + df, err := data.NewDatafile( + b.path, id, true, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + + b.datafiles[id] = df + + id = b.curr.FileID() + 1 + curr, err := data.NewDatafile( + b.path, id, false, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + b.curr = curr + err = b.saveIndexes() + if err != nil { + return err + } + + return nil +} + +// put inserts a new (key, value). Both key and value are valid inputs. +func (b *Bitcask) put(key, value []byte) (int64, int64, error) { + if err := b.maybeRotate(); err != nil { + return -1, 0, fmt.Errorf("error rotating active datafile: %w", err) + } + + return b.curr.Write(internal.NewEntry(key, value, nil)) +} + +// putWithExpiry inserts a new (key, value, expiry). +// Both key and value are valid inputs. +func (b *Bitcask) putWithExpiry(key, value []byte, expiry time.Time) (int64, int64, error) { + if err := b.maybeRotate(); err != nil { + return -1, 0, fmt.Errorf("error rotating active datafile: %w", err) + } + + return b.curr.Write(internal.NewEntry(key, value, &expiry)) +} + +// closeCurrentFile closes current datafile and makes it read only. +func (b *Bitcask) closeCurrentFile() error { + if err := b.curr.Close(); err != nil { + return err + } + + id := b.curr.FileID() + df, err := data.NewDatafile( + b.path, id, true, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + + b.datafiles[id] = df + return nil +} + +// openNewWritableFile opens new datafile for writing data +func (b *Bitcask) openNewWritableFile() error { + id := b.curr.FileID() + 1 + curr, err := data.NewDatafile( + b.path, id, false, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + b.curr = curr + return nil +} + +// Reopen closes and reopsns the database +func (b *Bitcask) Reopen() error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.reopen() +} + +// reopen reloads a bitcask object with index and datafiles +// caller of this method should take care of locking +func (b *Bitcask) reopen() error { + datafiles, lastID, err := loadDatafiles( + b.path, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + t, ttlIndex, err := loadIndexes(b, datafiles, lastID) + if err != nil { + return err + } + + curr, err := data.NewDatafile( + b.path, lastID, false, + b.config.MaxKeySize, + b.config.MaxValueSize, + b.config.FileFileModeBeforeUmask, + ) + if err != nil { + return err + } + + b.trie = t + b.curr = curr + b.ttlIndex = ttlIndex + b.datafiles = datafiles + + return nil +} + +// Merge merges all datafiles in the database. Old keys are squashed +// and deleted keys removes. Duplicate key/value pairs are also removed. +// Call this function periodically to reclaim disk space. +func (b *Bitcask) Merge() error { + b.mu.Lock() + if b.isMerging { + b.mu.Unlock() + return ErrMergeInProgress + } + b.isMerging = true + b.mu.Unlock() + defer func() { + b.isMerging = false + }() + b.mu.RLock() + err := b.closeCurrentFile() + if err != nil { + b.mu.RUnlock() + return err + } + filesToMerge := make([]int, 0, len(b.datafiles)) + for k := range b.datafiles { + filesToMerge = append(filesToMerge, k) + } + err = b.openNewWritableFile() + if err != nil { + b.mu.RUnlock() + return err + } + b.mu.RUnlock() + sort.Ints(filesToMerge) + + // Temporary merged database path + temp, err := ioutil.TempDir(b.path, "merge") + if err != nil { + return err + } + defer os.RemoveAll(temp) + + // Create a merged database + mdb, err := Open(temp, withConfig(b.config)) + if err != nil { + return err + } + + // Rewrite all key/value pairs into merged database + // Doing this automatically strips deleted keys and + // old key/value pairs + err = b.Fold(func(key []byte) error { + item, _ := b.trie.Search(key) + // if key was updated after start of merge operation, nothing to do + if item.(internal.Item).FileID > filesToMerge[len(filesToMerge)-1] { + return nil + } + e, err := b.get(key) + if err != nil { + return err + } + + if e.Expiry != nil { + if err := mdb.PutWithTTL(key, e.Value, time.Until(*e.Expiry)); err != nil { + return err + } + } else { + if err := mdb.Put(key, e.Value); err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + if err = mdb.Close(); err != nil { + return err + } + // no reads and writes till we reopen + b.mu.Lock() + defer b.mu.Unlock() + if err = b.close(); err != nil { + return err + } + + // Remove data files + files, err := ioutil.ReadDir(b.path) + if err != nil { + return err + } + for _, file := range files { + if file.IsDir() || file.Name() == lockfile { + continue + } + ids, err := internal.ParseIds([]string{file.Name()}) + if err != nil { + return err + } + // if datafile was created after start of merge, skip + if len(ids) > 0 && ids[0] > filesToMerge[len(filesToMerge)-1] { + continue + } + err = os.RemoveAll(path.Join(b.path, file.Name())) + if err != nil { + return err + } + } + + // Rename all merged data files + files, err = ioutil.ReadDir(mdb.path) + if err != nil { + return err + } + for _, file := range files { + // see #225 + if file.Name() == lockfile { + continue + } + err := os.Rename( + path.Join([]string{mdb.path, file.Name()}...), + path.Join([]string{b.path, file.Name()}...), + ) + if err != nil { + return err + } + } + b.metadata.ReclaimableSpace = 0 + + // And finally reopen the database + return b.reopen() +} + +// Open opens the database at the given path with optional options. +// Options can be provided with the `WithXXX` functions that provide +// configuration options as functions. +func Open(path string, options ...Option) (*Bitcask, error) { + var ( + cfg *config.Config + err error + meta *metadata.MetaData + ) + + configPath := filepath.Join(path, "config.json") + if internal.Exists(configPath) { + cfg, err = config.Load(configPath) + if err != nil { + return nil, &ErrBadConfig{err} + } + } else { + cfg = newDefaultConfig() + } + + if err := checkAndUpgrade(cfg, configPath); err != nil { + return nil, err + } + + for _, opt := range options { + if err := opt(cfg); err != nil { + return nil, err + } + } + + if err := os.MkdirAll(path, cfg.DirFileModeBeforeUmask); err != nil { + return nil, err + } + + meta, err = loadMetadata(path) + if err != nil { + return nil, &ErrBadMetadata{err} + } + + bitcask := &Bitcask{ + flock: flock.New(filepath.Join(path, lockfile)), + config: cfg, + options: options, + path: path, + indexer: index.NewIndexer(), + ttlIndexer: index.NewTTLIndexer(), + metadata: meta, + } + + ok, err := bitcask.flock.TryLock() + if err != nil { + return nil, err + } + + if !ok { + return nil, ErrDatabaseLocked + } + + if err := cfg.Save(configPath); err != nil { + return nil, err + } + + if cfg.AutoRecovery { + if err := data.CheckAndRecover(path, cfg); err != nil { + return nil, fmt.Errorf("recovering database: %s", err) + } + } + if err := bitcask.Reopen(); err != nil { + return nil, err + } + + return bitcask, nil +} + +// checkAndUpgrade checks if DB upgrade is required +// if yes, then applies version upgrade and saves updated config +func checkAndUpgrade(cfg *config.Config, configPath string) error { + if cfg.DBVersion == CurrentDBVersion { + return nil + } + if cfg.DBVersion > CurrentDBVersion { + return ErrInvalidVersion + } + // for v0 to v1 upgrade, we need to append 8 null bytes after each encoded entry in datafiles + if cfg.DBVersion == uint32(0) && CurrentDBVersion == uint32(1) { + log.Warn("upgrading db version, might take some time....") + cfg.DBVersion = CurrentDBVersion + return migrations.ApplyV0ToV1(filepath.Dir(configPath), cfg.MaxDatafileSize) + } + return nil +} + +// Backup copies db directory to given path +// it creates path if it does not exist +func (b *Bitcask) Backup(path string) error { + if !internal.Exists(path) { + if err := os.MkdirAll(path, b.config.DirFileModeBeforeUmask); err != nil { + return err + } + } + return internal.Copy(b.path, path, []string{lockfile}) +} + +// saveIndex saves index and ttl_index currently in RAM to disk +func (b *Bitcask) saveIndexes() error { + tempIdx := "temp_index" + if err := b.indexer.Save(b.trie, filepath.Join(b.path, tempIdx)); err != nil { + return err + } + if err := os.Rename(filepath.Join(b.path, tempIdx), filepath.Join(b.path, "index")); err != nil { + return err + } + if err := b.ttlIndexer.Save(b.ttlIndex, filepath.Join(b.path, tempIdx)); err != nil { + return err + } + return os.Rename(filepath.Join(b.path, tempIdx), filepath.Join(b.path, ttlIndexFile)) +} + +// saveMetadata saves metadata into disk +func (b *Bitcask) saveMetadata() error { + return b.metadata.Save(filepath.Join(b.path, "meta.json"), b.config.DirFileModeBeforeUmask) +} + +// Reclaimable returns space that can be reclaimed +func (b *Bitcask) Reclaimable() int64 { + return b.metadata.ReclaimableSpace +} + +// isExpired returns true if a key has expired +// it returns false if key does not exist in ttl index +func (b *Bitcask) isExpired(key []byte) bool { + expiry, found := b.ttlIndex.Search(key) + if !found { + return false + } + return expiry.(time.Time).Before(time.Now().UTC()) +} + +func loadDatafiles(path string, maxKeySize uint32, maxValueSize uint64, fileModeBeforeUmask os.FileMode) (datafiles map[int]data.Datafile, lastID int, err error) { + fns, err := internal.GetDatafiles(path) + if err != nil { + return nil, 0, err + } + + ids, err := internal.ParseIds(fns) + if err != nil { + return nil, 0, err + } + + datafiles = make(map[int]data.Datafile, len(ids)) + for _, id := range ids { + datafiles[id], err = data.NewDatafile( + path, id, true, + maxKeySize, + maxValueSize, + fileModeBeforeUmask, + ) + if err != nil { + return + } + + } + if len(ids) > 0 { + lastID = ids[len(ids)-1] + } + return +} + +func getSortedDatafiles(datafiles map[int]data.Datafile) []data.Datafile { + out := make([]data.Datafile, len(datafiles)) + idx := 0 + for _, df := range datafiles { + out[idx] = df + idx++ + } + sort.Slice(out, func(i, j int) bool { + return out[i].FileID() < out[j].FileID() + }) + return out +} + +// loadIndexes loads index from disk to memory. If index is not available or partially available (last bitcask process crashed) +// then it iterates over last datafile and construct index +// we construct ttl_index here also along with normal index +func loadIndexes(b *Bitcask, datafiles map[int]data.Datafile, lastID int) (art.Tree, art.Tree, error) { + t, found, err := b.indexer.Load(filepath.Join(b.path, "index"), b.config.MaxKeySize) + if err != nil { + return nil, nil, err + } + ttlIndex, _, err := b.ttlIndexer.Load(filepath.Join(b.path, ttlIndexFile), b.config.MaxKeySize) + if err != nil { + return nil, nil, err + } + if found && b.metadata.IndexUpToDate { + return t, ttlIndex, nil + } + if found { + if err := loadIndexFromDatafile(t, ttlIndex, datafiles[lastID]); err != nil { + return nil, ttlIndex, err + } + return t, ttlIndex, nil + } + sortedDatafiles := getSortedDatafiles(datafiles) + for _, df := range sortedDatafiles { + if err := loadIndexFromDatafile(t, ttlIndex, df); err != nil { + return nil, ttlIndex, err + } + } + return t, ttlIndex, nil +} + +func loadIndexFromDatafile(t art.Tree, ttlIndex art.Tree, df data.Datafile) error { + var offset int64 + for { + e, n, err := df.Read() + if err != nil { + if err == io.EOF { + break + } + return err + } + // Tombstone value (deleted key) + if len(e.Value) == 0 { + t.Delete(e.Key) + offset += n + continue + } + item := internal.Item{FileID: df.FileID(), Offset: offset, Size: n} + t.Insert(e.Key, item) + if e.Expiry != nil { + ttlIndex.Insert(e.Key, *e.Expiry) + } + offset += n + } + return nil +} + +func loadMetadata(path string) (*metadata.MetaData, error) { + if !internal.Exists(filepath.Join(path, "meta.json")) { + meta := new(metadata.MetaData) + return meta, nil + } + return metadata.Load(filepath.Join(path, "meta.json")) +} diff --git a/v2/bitcask_test.go b/v2/bitcask_test.go new file mode 100644 index 0000000..0743016 --- /dev/null +++ b/v2/bitcask_test.go @@ -0,0 +1,2281 @@ +package bitcask + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "git.mills.io/prologic/bitcask/v2/internal" + "git.mills.io/prologic/bitcask/v2/internal/config" + "git.mills.io/prologic/bitcask/v2/internal/mocks" +) + +var ( + ErrMockError = errors.New("error: mock error") +) + +type sortByteArrays [][]byte + +func (b sortByteArrays) Len() int { + return len(b) +} + +func (b sortByteArrays) Less(i, j int) bool { + switch bytes.Compare(b[i], b[j]) { + case -1: + return true + case 0, 1: + return false + } + return false +} + +func (b sortByteArrays) Swap(i, j int) { + b[j], b[i] = b[i], b[j] +} + +func SortByteArrays(src [][]byte) [][]byte { + sorted := sortByteArrays(src) + sort.Sort(sorted) + return sorted +} + +func TestAll(t *testing.T) { + var ( + db *Bitcask + testdir string + err error + ) + + assert := assert.New(t) + + testdir, err = ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Len", func(t *testing.T) { + assert.Equal(1, db.Len()) + }) + + t.Run("PutWithTTL", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar"), []byte("baz"), 0) + assert.NoError(err) + }) + + t.Run("GetExpiredKey", func(t *testing.T) { + time.Sleep(time.Millisecond) + _, err := db.Get([]byte("bar")) + assert.Error(err) + assert.Equal(ErrKeyExpired, err) + }) + + t.Run("Has", func(t *testing.T) { + assert.True(db.Has([]byte("foo"))) + }) + + t.Run("HasWithExpired", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar"), []byte("baz"), 0) + assert.NoError(err) + time.Sleep(time.Millisecond) + assert.False(db.Has([]byte("bar"))) + }) + + t.Run("RunGC", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar"), []byte("baz"), 0) + assert.NoError(err) + time.Sleep(time.Millisecond) + err = db.RunGC() + assert.NoError(err) + _, err := db.Get([]byte("bar")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Keys", func(t *testing.T) { + keys := make([][]byte, 0) + for key := range db.Keys() { + keys = append(keys, key) + } + assert.Equal([][]byte{[]byte("foo")}, keys) + }) + + t.Run("Fold", func(t *testing.T) { + var ( + keys [][]byte + values [][]byte + ) + + err := db.Fold(func(key []byte) error { + value, err := db.Get(key) + if err != nil { + return err + } + keys = append(keys, key) + values = append(values, value) + return nil + }) + assert.NoError(err) + assert.Equal([][]byte{[]byte("foo")}, keys) + assert.Equal([][]byte{[]byte("bar")}, values) + }) + + t.Run("Delete", func(t *testing.T) { + err := db.Delete([]byte("foo")) + assert.NoError(err) + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Backup", func(t *testing.T) { + path, err := ioutil.TempDir("", "backup") + defer os.RemoveAll(path) + assert.NoError(err) + err = db.Backup(filepath.Join(path, "db-backup")) + assert.NoError(err) + }) + + t.Run("Sift", func(t *testing.T) { + err = db.Put([]byte("toBeSifted"), []byte("siftMe")) + assert.NoError(err) + err = db.Put([]byte("notToBeSifted"), []byte("dontSiftMe")) + assert.NoError(err) + err := db.Sift(func(key []byte) (bool, error) { + value, err := db.Get(key) + if err != nil { + return false, err + } + if string(value) == "siftMe" { + return true, nil + } + return false, nil + }) + assert.NoError(err) + _, err = db.Get([]byte("toBeSifted")) + assert.Equal(ErrKeyNotFound, err) + _, err = db.Get([]byte("notToBeSifted")) + assert.NoError(err) + }) + + t.Run("SiftScan", func(t *testing.T) { + err := db.DeleteAll() + assert.NoError(err) + err = db.Put([]byte("toBeSifted"), []byte("siftMe")) + assert.NoError(err) + err = db.Put([]byte("toBeSkipped"), []byte("siftMe")) + assert.NoError(err) + err = db.Put([]byte("toBeSiftedAsWell"), []byte("siftMe")) + assert.NoError(err) + err = db.Put([]byte("toBeSiftedButNotReally"), []byte("dontSiftMe")) + assert.NoError(err) + err = db.SiftScan([]byte("toBeSifted"), func(key []byte) (bool, error) { + value, err := db.Get(key) + if err != nil { + return false, err + } + if string(value) == "siftMe" { + return true, nil + } + return false, nil + }) + assert.NoError(err) + _, err = db.Get([]byte("toBeSifted")) + assert.Equal(ErrKeyNotFound, err) + _, err = db.Get([]byte("toBeSiftedAsWell")) + assert.Equal(ErrKeyNotFound, err) + _, err = db.Get([]byte("toBeSkipped")) + assert.NoError(err) + _, err = db.Get([]byte("toBeSiftedButNotReally")) + assert.NoError(err) + }) + + t.Run("DeleteAll", func(t *testing.T) { + err = db.DeleteAll() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) +} + +func TestDeleteAll(t *testing.T) { + assert := assert.New(t) + testdir, _ := ioutil.TempDir("", "bitcask") + db, _ := Open(testdir) + _ = db.Put([]byte("foo"), []byte("foo")) + _ = db.Put([]byte("bar"), []byte("bar")) + _ = db.Put([]byte("baz"), []byte("baz")) + assert.Equal(3, db.Len()) + err := db.DeleteAll() + assert.NoError(err) + assert.Equal(0, db.Len()) + _, err = db.Get([]byte("foo")) + assert.Equal(ErrKeyNotFound, err) + _, err = db.Get([]byte("bar")) + assert.Equal(ErrKeyNotFound, err) + _, err = db.Get([]byte("baz")) + assert.Equal(ErrKeyNotFound, err) +} + +func TestReopen1(t *testing.T) { + assert := assert.New(t) + for i := 0; i < 10; i++ { + testdir, _ := ioutil.TempDir("", "bitcask") + db, _ := Open(testdir, WithMaxDatafileSize(1)) + _ = db.Put([]byte("foo"), []byte("bar")) + _ = db.Put([]byte("foo"), []byte("bar1")) + _ = db.Put([]byte("foo"), []byte("bar2")) + _ = db.Put([]byte("foo"), []byte("bar3")) + _ = db.Put([]byte("foo"), []byte("bar4")) + _ = db.Put([]byte("foo"), []byte("bar5")) + _ = db.Reopen() + val, _ := db.Get([]byte("foo")) + assert.Equal("bar5", string(val)) + } +} + +func TestReopen(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Reopen", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("PutWithTTL", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar"), []byte("baz"), 0) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Reopen", func(t *testing.T) { + err = db.Reopen() + assert.NoError(err) + }) + + t.Run("GetAfterReopen", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("PutAfterReopen", func(t *testing.T) { + err = db.Put([]byte("zzz"), []byte("foo")) + assert.NoError(err) + }) + + t.Run("GetAfterReopenAndPut", func(t *testing.T) { + val, err := db.Get([]byte("zzz")) + assert.NoError(err) + assert.Equal([]byte("foo"), val) + }) + + t.Run("GetExpiredKeyAfterReopen", func(t *testing.T) { + val, err := db.Get([]byte("bar")) + assert.Error(err) + assert.Equal(ErrKeyExpired, err) + assert.Nil(val) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestDeletedKeys(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Delete", func(t *testing.T) { + err := db.Delete([]byte("foo")) + assert.NoError(err) + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) + + t.Run("Reopen", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestMetadata(t *testing.T) { + assert := assert.New(t) + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir) + assert.NoError(err) + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + err = db.Close() + assert.NoError(err) + db, err = Open(testdir) + assert.NoError(err) + + t.Run("IndexUptoDateAfterCloseAndOpen", func(t *testing.T) { + assert.Equal(true, db.metadata.IndexUpToDate) + }) + t.Run("IndexUptoDateAfterPut", func(t *testing.T) { + assert.NoError(db.Put([]byte("foo1"), []byte("bar1"))) + assert.Equal(false, db.metadata.IndexUpToDate) + }) + t.Run("Reclaimable", func(t *testing.T) { + assert.Equal(int64(0), db.Reclaimable()) + }) + t.Run("ReclaimableAfterNewPut", func(t *testing.T) { + assert.NoError(db.Put([]byte("hello"), []byte("world"))) + assert.Equal(int64(0), db.Reclaimable()) + }) + t.Run("ReclaimableAfterRepeatedPut", func(t *testing.T) { + assert.NoError(db.Put([]byte("hello"), []byte("world"))) + assert.Equal(int64(34), db.Reclaimable()) + }) + t.Run("ReclaimableAfterDelete", func(t *testing.T) { + assert.NoError(db.Delete([]byte("hello"))) + assert.Equal(int64(97), db.Reclaimable()) + }) + t.Run("ReclaimableAfterNonExistingDelete", func(t *testing.T) { + assert.NoError(db.Delete([]byte("hello1"))) + assert.Equal(int64(97), db.Reclaimable()) + }) + t.Run("ReclaimableAfterDeleteAll", func(t *testing.T) { + assert.NoError(db.DeleteAll()) + assert.Equal(int64(214), db.Reclaimable()) + }) + t.Run("ReclaimableAfterMerge", func(t *testing.T) { + assert.NoError(db.Merge()) + assert.Equal(int64(0), db.Reclaimable()) + }) + t.Run("IndexUptoDateAfterMerge", func(t *testing.T) { + assert.Equal(true, db.metadata.IndexUpToDate) + }) + t.Run("ReclaimableAfterMergeAndDeleteAll", func(t *testing.T) { + assert.NoError(db.DeleteAll()) + assert.Equal(int64(0), db.Reclaimable()) + }) +} + +func TestConfigErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("CorruptConfig", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir) + assert.NoError(err) + assert.NoError(db.Close()) + + assert.NoError(ioutil.WriteFile(filepath.Join(testdir, "config.json"), []byte("foo bar baz"), 0600)) + + _, err = Open(testdir) + assert.Error(err) + }) + + t.Run("BadConfigPath", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + assert.NoError(os.Mkdir(filepath.Join(testdir, "config.json"), 0700)) + + _, err = Open(testdir) + assert.Error(err) + }) +} + +func TestAutoRecovery(t *testing.T) { + withAutoRecovery := []bool{false, true} + + for _, autoRecovery := range withAutoRecovery { + t.Run(fmt.Sprintf("%v", autoRecovery), func(t *testing.T) { + require := require.New(t) + testdir, err := ioutil.TempDir("", "bitcask") + require.NoError(err) + db, err := Open(testdir) + require.NoError(err) + + // Insert 10 key-value pairs and verify all is ok. + makeKeyVal := func(i int) ([]byte, []byte) { + return []byte(fmt.Sprintf("foo%d", i)), []byte(fmt.Sprintf("bar%d", i)) + } + n := 10 + for i := 0; i < n; i++ { + key, val := makeKeyVal(i) + err = db.Put(key, val) + require.NoError(err) + } + for i := 0; i < n; i++ { + key, val := makeKeyVal(i) + rval, err := db.Get(key) + require.NoError(err) + require.Equal(val, rval) + } + err = db.Close() + require.NoError(err) + + // Corrupt the last inserted key + f, err := os.OpenFile(path.Join(testdir, "000000000.data"), os.O_RDWR, 0755) + require.NoError(err) + fi, err := f.Stat() + require.NoError(err) + err = f.Truncate(fi.Size() - 1) + require.NoError(err) + err = f.Close() + require.NoError(err) + + db, err = Open(testdir, WithAutoRecovery(autoRecovery)) + t.Logf("err: %s", err) + require.NoError(err) + defer db.Close() + // Check that all values but the last are still intact. + for i := 0; i < 9; i++ { + key, val := makeKeyVal(i) + rval, err := db.Get(key) + require.NoError(err) + require.Equal(val, rval) + } + // Check the index has no more keys than non-corrupted ones. + // i.e: all but the last one. + numKeys := 0 + for range db.Keys() { + numKeys++ + } + if !autoRecovery { + // We are opening without autorepair, and thus are + // in a corrupted state. The index isn't coherent with + // the datafile. + require.Equal(n, numKeys) + return + } + + require.Equal(n-1, numKeys, "The index should have n-1 keys") + + // Double-check explicitly the corrupted one isn't here. + // This check is redundant considering the last two checks, + // but doesn't hurt. + corrKey, _ := makeKeyVal(9) + _, err = db.Get(corrKey) + require.Equal(ErrKeyNotFound, err) + }) + } +} + +func TestLoadIndexes(t *testing.T) { + assert := assert.New(t) + testdir, err1 := ioutil.TempDir("", "bitcask") + assert.NoError(err1) + defer os.RemoveAll(testdir) + + var db *Bitcask + var err error + + t.Run("Setup", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + for i := 0; i < 5; i++ { + key := fmt.Sprintf("key%d", i) + val := fmt.Sprintf("val%d", i) + err := db.Put([]byte(key), []byte(val)) + assert.NoError(err) + } + for i := 0; i < 5; i++ { + key := fmt.Sprintf("foo%d", i) + val := fmt.Sprintf("bar%d", i) + err := db.PutWithTTL([]byte(key), []byte(val), time.Duration(i)*time.Second) + assert.NoError(err) + } + err = db.Close() + assert.NoError(err) + }) + + t.Run("OpenAgain", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + assert.Equal(10, db.trie.Size()) + assert.Equal(5, db.ttlIndex.Size()) + }) +} + +func TestReIndex(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("PutWithExpiry", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar"), []byte("baz"), 0) + assert.NoError(err) + }) + + t.Run("PutWithLargeExpiry", func(t *testing.T) { + err = db.PutWithTTL([]byte("bar1"), []byte("baz1"), time.Hour) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + + t.Run("DeleteIndex", func(t *testing.T) { + err := os.Remove(filepath.Join(testdir, "index")) + assert.NoError(err) + err = os.Remove(filepath.Join(testdir, ttlIndexFile)) + assert.NoError(err) + }) + }) + + t.Run("Reopen", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("GetKeyWithExpiry", func(t *testing.T) { + val, err := db.Get([]byte("bar")) + assert.Error(err) + assert.Equal(ErrKeyExpired, err) + assert.Nil(val) + val, err = db.Get([]byte("bar1")) + assert.NoError(err) + assert.Equal([]byte("baz1"), val) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestReIndexDeletedKeys(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Delete", func(t *testing.T) { + err := db.Delete([]byte("foo")) + assert.NoError(err) + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + + t.Run("DeleteIndex", func(t *testing.T) { + err := os.Remove(filepath.Join(testdir, "index")) + assert.NoError(err) + }) + }) + + t.Run("Reopen", func(t *testing.T) { + var ( + db *Bitcask + err error + ) + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + _, err := db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrKeyNotFound, err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestSync(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir, WithSync(true)) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + key := []byte(strings.Repeat(" ", 17)) + value := []byte("foobar") + err = db.Put(key, value) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("hello"), []byte("world")) + assert.NoError(err) + }) +} + +func TestMaxKeySize(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir, WithMaxKeySize(16)) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + key := []byte(strings.Repeat(" ", 17)) + value := []byte("foobar") + err = db.Put(key, value) + assert.Error(err) + assert.Equal(ErrKeyTooLarge, err) + }) +} + +func TestMaxValueSize(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir, WithMaxValueSize(16)) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + key := []byte("foo") + value := []byte(strings.Repeat(" ", 17)) + err = db.Put(key, value) + assert.Error(err) + assert.Equal(ErrValueTooLarge, err) + }) +} + +func TestStats(t *testing.T) { + var ( + db *Bitcask + err error + ) + + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err := db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Stats", func(t *testing.T) { + stats, err := db.Stats() + assert.NoError(err) + assert.Equal(stats.Datafiles, 0) + assert.Equal(stats.Keys, 1) + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestStatsError(t *testing.T) { + var ( + db *Bitcask + err error + ) + + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err := db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + }) + + t.Run("Stats", func(t *testing.T) { + stats, err := db.Stats() + assert.NoError(err) + assert.Equal(stats.Datafiles, 0) + assert.Equal(stats.Keys, 1) + }) + }) + + t.Run("Test", func(t *testing.T) { + t.Run("FabricatedDestruction", func(t *testing.T) { + // This would never happen in reality :D + // Or would it? :) + err = os.RemoveAll(testdir) + assert.NoError(err) + }) + + t.Run("Stats", func(t *testing.T) { + _, err := db.Stats() + assert.Error(err) + }) + }) +} + +func TestDirFileModeBeforeUmask(t *testing.T) { + assert := assert.New(t) + + t.Run("Setup", func(t *testing.T) { + t.Run("Default DirFileModeBeforeUmask is 0700", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + embeddedDir := filepath.Join(testdir, "cache") + assert.NoError(err) + defer os.RemoveAll(testdir) + + defaultTestMode := os.FileMode(0700) + + db, err := Open(embeddedDir) + assert.NoError(err) + defer db.Close() + err = filepath.Walk(testdir, func(path string, info os.FileInfo, err error) error { + // skip the root directory + if path == testdir { + return nil + } + if info.IsDir() { + // perms for directory on disk are filtered through defaultTestMode, AND umask of user running test. + // this means the mkdir calls can only FURTHER restrict permissions, not grant more (preventing escalatation). + // to make this test OS agnostic, we'll skip using golang.org/x/sys/unix, inferring umask via XOR and AND NOT. + + // create anotherDir with allPerms - to infer umask + anotherDir := filepath.Join(testdir, "temp") + err := os.Mkdir(anotherDir, os.ModePerm) + assert.NoError(err) + defer os.RemoveAll(anotherDir) + + anotherStat, err := os.Stat(anotherDir) + assert.NoError(err) + + // infer umask from anotherDir + umask := os.ModePerm ^ (anotherStat.Mode() & os.ModePerm) + + assert.Equal(info.Mode()&os.ModePerm, defaultTestMode&^umask) + } + return nil + }) + assert.NoError(err) + }) + + t.Run("Dir FileModeBeforeUmask is set via options for all subdirectories", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + embeddedDir := filepath.Join(testdir, "cache") + assert.NoError(err) + defer os.RemoveAll(testdir) + + testMode := os.FileMode(0713) + + db, err := Open(embeddedDir, WithDirFileModeBeforeUmask(testMode)) + assert.NoError(err) + defer db.Close() + err = filepath.Walk(testdir, func(path string, info os.FileInfo, err error) error { + // skip the root directory + if path == testdir { + return nil + } + if info.IsDir() { + // create anotherDir with allPerms - to infer umask + anotherDir := filepath.Join(testdir, "temp") + err := os.Mkdir(anotherDir, os.ModePerm) + assert.NoError(err) + defer os.RemoveAll(anotherDir) + + anotherStat, _ := os.Stat(anotherDir) + + // infer umask from anotherDir + umask := os.ModePerm ^ (anotherStat.Mode() & os.ModePerm) + + assert.Equal(info.Mode()&os.ModePerm, testMode&^umask) + } + return nil + }) + assert.NoError(err) + }) + + }) +} + +func TestFileFileModeBeforeUmask(t *testing.T) { + assert := assert.New(t) + + t.Run("Setup", func(t *testing.T) { + t.Run("Default File FileModeBeforeUmask is 0600", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + defaultTestMode := os.FileMode(0600) + + db, err := Open(testdir) + assert.NoError(err) + defer db.Close() + err = filepath.Walk(testdir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + // the lock file is set within Flock, so ignore it + if filepath.Base(path) == "lock" { + return nil + } + // create aFile with allPerms - to infer umask + aFilePath := filepath.Join(testdir, "temp") + _, err := os.OpenFile(aFilePath, os.O_CREATE, os.ModePerm) + assert.NoError(err) + defer os.RemoveAll(aFilePath) + + fileStat, _ := os.Stat(aFilePath) + + // infer umask from anotherDir + umask := os.ModePerm ^ (fileStat.Mode() & os.ModePerm) + + assert.Equal(info.Mode()&os.ModePerm, defaultTestMode&^umask) + } + return nil + }) + assert.NoError(err) + }) + + t.Run("File FileModeBeforeUmask is set via options for all files", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + testMode := os.FileMode(0673) + + db, err := Open(testdir, WithFileFileModeBeforeUmask(testMode)) + assert.NoError(err) + defer db.Close() + err = filepath.Walk(testdir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + // the lock file is set within Flock, so ignore it + if filepath.Base(path) == "lock" { + return nil + } + // create aFile with allPerms - to infer umask + aFilePath := filepath.Join(testdir, "temp") + _, err := os.OpenFile(aFilePath, os.O_CREATE, os.ModePerm) + assert.NoError(err) + defer os.RemoveAll(aFilePath) + + fileStat, _ := os.Stat(aFilePath) + + // infer umask from anotherDir + umask := os.ModePerm ^ (fileStat.Mode() & os.ModePerm) + + assert.Equal(info.Mode()&os.ModePerm, testMode&^umask) + } + return nil + }) + assert.NoError(err) + }) + }) +} + +func TestMaxDatafileSize(t *testing.T) { + var ( + db *Bitcask + err error + ) + + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err := db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + }) + + t.Run("Put", func(t *testing.T) { + for i := 0; i < 10; i++ { + err := db.Put([]byte(fmt.Sprintf("key_%d", i)), []byte("bar")) + assert.NoError(err) + } + }) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Get", func(t *testing.T) { + val, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + + for i := 0; i < 10; i++ { + val, err = db.Get([]byte(fmt.Sprintf("key_%d", i))) + assert.NoError(err) + assert.Equal([]byte("bar"), val) + } + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) +} + +func TestMerge(t *testing.T) { + var ( + db *Bitcask + err error + ) + + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err := db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + + s1, err := db.Stats() + assert.NoError(err) + assert.Equal(0, s1.Datafiles) + assert.Equal(1, s1.Keys) + + t.Run("Put", func(t *testing.T) { + for i := 0; i < 10; i++ { + err := db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + } + }) + + s2, err := db.Stats() + assert.NoError(err) + assert.Equal(5, s2.Datafiles) + assert.Equal(1, s2.Keys) + assert.True(s2.Size > s1.Size) + + t.Run("Merge", func(t *testing.T) { + err := db.Merge() + assert.NoError(err) + }) + + s3, err := db.Stats() + assert.NoError(err) + assert.Equal(2, s3.Datafiles) + assert.Equal(1, s3.Keys) + assert.True(s3.Size > s1.Size) + assert.True(s3.Size < s2.Size) + + t.Run("Sync", func(t *testing.T) { + err = db.Sync() + assert.NoError(err) + }) + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestGetErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("ReadError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("FileID").Return(0) + mockDatafile.On("ReadAt", int64(0), int64(30)).Return( + internal.Entry{}, + ErrMockError, + ) + db.curr = mockDatafile + + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("ChecksumError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir, WithMaxDatafileSize(40)) + assert.NoError(err) + + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("FileID").Return(0) + mockDatafile.On("ReadAt", int64(0), int64(30)).Return( + internal.Entry{ + Checksum: 0x0, + Key: []byte("foo"), + Offset: 0, + Value: []byte("bar"), + }, + nil, + ) + db.curr = mockDatafile + + _, err = db.Get([]byte("foo")) + assert.Error(err) + assert.Equal(ErrChecksumFailed, err) + }) + +} + +func TestPutBorderCases(t *testing.T) { + assert := assert.New(t) + + t.Run("EmptyValue", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + + err = db.Put([]byte("alice"), nil) + assert.NoError(err) + z, err := db.Get([]byte("alice")) + assert.NoError(err) + assert.Empty(z) + }) +} + +func TestPutErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("WriteError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Size").Return(int64(0)) + mockDatafile.On( + "Write", + internal.Entry{ + Checksum: 0x76ff8caa, + Key: []byte("foo"), + Offset: 0, + Value: []byte("bar"), + }, + ).Return(int64(0), int64(0), ErrMockError) + db.curr = mockDatafile + + err = db.Put([]byte("foo"), []byte("bar")) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("SyncError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + db, err := Open(testdir, WithSync(true)) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Size").Return(int64(0)) + mockDatafile.On( + "Write", + internal.Entry{ + Checksum: 0x78240498, + Key: []byte("bar"), + Offset: 0, + Value: []byte("baz"), + }, + ).Return(int64(0), int64(0), nil) + mockDatafile.On("Sync").Return(ErrMockError) + db.curr = mockDatafile + + err = db.Put([]byte("bar"), []byte("baz")) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("EmptyKey", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + err = db.Put(nil, []byte("hello")) + assert.Equal(ErrEmptyKey, err) + + }) + +} + +func TestOpenErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("BadPath", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + assert.NoError(ioutil.WriteFile(filepath.Join(testdir, "foo"), []byte("foo"), 0600)) + + _, err = Open(filepath.Join(testdir, "foo", "tmp.db")) + assert.Error(err) + }) + + t.Run("BadOption", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + withBogusOption := func() Option { + return func(cfg *config.Config) error { + return errors.New("mocked error") + } + } + + _, err = Open(testdir, withBogusOption()) + assert.Error(err) + }) + + t.Run("LoadDatafilesError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir) + assert.NoError(err) + + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + + err = db.Close() + assert.NoError(err) + + // Simulate some horrible that happened to the datafiles! + err = os.Rename(filepath.Join(testdir, "000000000.data"), filepath.Join(testdir, "000000000xxx.data")) + assert.NoError(err) + + _, err = Open(testdir) + assert.Error(err) + assert.Equal("strconv.ParseInt: parsing \"000000000xxx\": invalid syntax", err.Error()) + }) +} + +func TestCloseErrors(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + t.Run("CloseIndexError", func(t *testing.T) { + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + mockIndexer := new(mocks.Indexer) + mockIndexer.On("Save", db.trie, filepath.Join(db.path, "temp_index")).Return(ErrMockError) + db.indexer = mockIndexer + + err = db.Close() + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("CloseDatafilesError", func(t *testing.T) { + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Close").Return(ErrMockError) + db.datafiles[0] = mockDatafile + + err = db.Close() + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("CloseActiveDatafileError", func(t *testing.T) { + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Close").Return(ErrMockError) + db.curr = mockDatafile + + err = db.Close() + assert.Error(err) + assert.Equal(ErrMockError, err) + }) +} + +func TestDeleteErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("WriteError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Size").Return(int64(0)) + mockDatafile.On( + "Write", + internal.Entry{ + Checksum: 0x0, + Key: []byte("foo"), + Offset: 0, + Value: []byte{}, + }, + ).Return(int64(0), int64(0), ErrMockError) + db.curr = mockDatafile + + err = db.Delete([]byte("foo")) + assert.Error(err) + }) +} + +func TestMergeErrors(t *testing.T) { + assert := assert.New(t) + + t.Run("RemoveDatabaseDirectory", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir, WithMaxDatafileSize(32)) + assert.NoError(err) + + assert.NoError(os.RemoveAll(testdir)) + + err = db.Merge() + assert.Error(err) + }) + + t.Run("EmptyCloseError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir) + assert.NoError(err) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Close").Return(ErrMockError) + db.curr = mockDatafile + + err = db.Merge() + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("ReadError", func(t *testing.T) { + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + + db, err := Open(testdir, WithMaxDatafileSize(22)) + assert.NoError(err) + + assert.NoError(db.Put([]byte("foo"), []byte("bar"))) + assert.NoError(db.Put([]byte("bar"), []byte("baz"))) + + mockDatafile := new(mocks.Datafile) + mockDatafile.On("Close").Return(nil) + mockDatafile.On("ReadAt", int64(0), int64(30)).Return( + internal.Entry{}, + ErrMockError, + ) + db.datafiles[0] = mockDatafile + + err = db.Merge() + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + +} + +func TestConcurrent(t *testing.T) { + var ( + db *Bitcask + err error + ) + + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + err = db.Put([]byte("foo"), []byte("bar")) + assert.NoError(err) + }) + }) + + t.Run("Concurrent", func(t *testing.T) { + t.Run("Put", func(t *testing.T) { + f := func(wg *sync.WaitGroup, x int) { + defer func() { + wg.Done() + }() + for i := 0; i <= 100; i++ { + if i%x == 0 { + key := []byte(fmt.Sprintf("k%d", i)) + value := []byte(fmt.Sprintf("v%d", i)) + err := db.Put(key, value) + assert.NoError(err) + } + } + } + + wg := &sync.WaitGroup{} + wg.Add(3) + + go f(wg, 2) + go f(wg, 3) + go f(wg, 5) + + wg.Wait() + }) + + t.Run("Get", func(t *testing.T) { + f := func(wg *sync.WaitGroup, N int) { + defer func() { + wg.Done() + }() + for i := 0; i <= N; i++ { + value, err := db.Get([]byte("foo")) + assert.NoError(err) + assert.Equal([]byte("bar"), value) + } + } + + wg := &sync.WaitGroup{} + wg.Add(3) + go f(wg, 100) + go f(wg, 100) + go f(wg, 100) + + wg.Wait() + }) + + // Test concurrent Put() with concurrent Scan() + t.Run("PutScan", func(t *testing.T) { + doPut := func(wg *sync.WaitGroup, x int) { + defer func() { + wg.Done() + }() + for i := 0; i <= 100; i++ { + if i%x == 0 { + key := []byte(fmt.Sprintf("k%d", i)) + value := []byte(fmt.Sprintf("v%d", i)) + err := db.Put(key, value) + assert.NoError(err) + } + } + } + + doScan := func(wg *sync.WaitGroup, x int) { + defer func() { + wg.Done() + }() + for i := 0; i <= 100; i++ { + if i%x == 0 { + err := db.Scan([]byte("k"), func(key []byte) error { + return nil + }) + assert.NoError(err) + } + } + } + + wg := &sync.WaitGroup{} + wg.Add(6) + + go doPut(wg, 2) + go doPut(wg, 3) + go doPut(wg, 5) + go doScan(wg, 1) + go doScan(wg, 2) + go doScan(wg, 4) + + wg.Wait() + }) + + // XXX: This has data races + /* Test concurrent Scan() with concurrent Merge() + t.Run("ScanMerge", func(t *testing.T) { + doScan := func(wg *sync.WaitGroup, x int) { + defer func() { + wg.Done() + }() + for i := 0; i <= 100; i++ { + if i%x == 0 { + err := db.Scan([]byte("k"), func(key []byte) error { + return nil + }) + assert.NoError(err) + } + } + } + + doMerge := func(wg *sync.WaitGroup, x int) { + defer func() { + wg.Done() + }() + for i := 0; i <= 100; i++ { + if i%x == 0 { + err := db.Merge() + assert.NoError(err) + } + } + } + + wg := &sync.WaitGroup{} + wg.Add(6) + + go doScan(wg, 2) + go doScan(wg, 3) + go doScan(wg, 5) + go doMerge(wg, 1) + go doMerge(wg, 2) + go doMerge(wg, 4) + + wg.Wait() + }) + */ + + t.Run("Close", func(t *testing.T) { + err = db.Close() + assert.NoError(err) + }) + }) +} + +func TestSift(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + var items = map[string][]byte{ + "1": []byte("1"), + "2": []byte("2"), + "3": []byte("3"), + "food": []byte("pizza"), + "foo": []byte([]byte("foo")), + "fooz": []byte("fooz ball"), + "hello": []byte("world"), + } + for k, v := range items { + err = db.Put([]byte(k), v) + assert.NoError(err) + } + }) + }) + + t.Run("SiftErrors", func(t *testing.T) { + err = db.Sift(func(key []byte) (bool, error) { + return false, ErrMockError + }) + assert.Equal(ErrMockError, err) + + err = db.SiftScan([]byte("fo"), func(key []byte) (bool, error) { + return true, ErrMockError + }) + assert.Equal(ErrMockError, err) + }) +} +func TestSiftScan(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + var items = map[string][]byte{ + "1": []byte("1"), + "2": []byte("2"), + "3": []byte("3"), + "food": []byte("pizza"), + "foo": []byte([]byte("foo")), + "fooz": []byte("fooz ball"), + "hello": []byte("world"), + } + for k, v := range items { + err = db.Put([]byte(k), v) + assert.NoError(err) + } + }) + }) + + t.Run("SiftScanErrors", func(t *testing.T) { + err = db.SiftScan([]byte("fo"), func(key []byte) (bool, error) { + return false, ErrMockError + }) + assert.Equal(ErrMockError, err) + + err = db.SiftScan([]byte("fo"), func(key []byte) (bool, error) { + return true, ErrMockError + }) + assert.Equal(ErrMockError, err) + }) +} + +func TestScan(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + var items = map[string][]byte{ + "1": []byte("1"), + "2": []byte("2"), + "3": []byte("3"), + "food": []byte("pizza"), + "foo": []byte([]byte("foo")), + "fooz": []byte("fooz ball"), + "hello": []byte("world"), + } + for k, v := range items { + err = db.Put([]byte(k), v) + assert.NoError(err) + } + }) + }) + + t.Run("Scan", func(t *testing.T) { + var ( + vals [][]byte + expected = [][]byte{ + []byte("foo"), + []byte("fooz ball"), + []byte("pizza"), + } + ) + + err = db.Scan([]byte("fo"), func(key []byte) error { + val, err := db.Get(key) + assert.NoError(err) + vals = append(vals, val) + return nil + }) + vals = SortByteArrays(vals) + assert.Equal(expected, vals) + }) + + t.Run("ScanErrors", func(t *testing.T) { + err = db.Scan([]byte("fo"), func(key []byte) error { + return ErrMockError + }) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) +} +func TestSiftRange(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + for i := 1; i < 10; i++ { + key := []byte(fmt.Sprintf("foo_%d", i)) + val := []byte(fmt.Sprintf("%d", i)) + err = db.Put(key, val) + assert.NoError(err) + } + }) + }) + + t.Run("SiftRange", func(t *testing.T) { + var ( + vals [][]byte + expected = [][]byte{ + []byte("1"), + []byte("2"), + []byte("4"), + []byte("5"), + []byte("6"), + []byte("7"), + []byte("8"), + []byte("9"), + } + ) + + err = db.SiftRange([]byte("foo_3"), []byte("foo_7"), func(key []byte) (bool, error) { + val, err := db.Get(key) + assert.NoError(err) + if string(val) == "3" { + return true, nil + } + return false, nil + }) + err = db.Fold(func(key []byte) error { + val, err := db.Get(key) + assert.NoError(err) + vals = append(vals, val) + + return nil + }) + + _, err = db.Get([]byte("foo_3")) + assert.Equal(ErrKeyNotFound, err) + vals = SortByteArrays(vals) + assert.Equal(expected, vals) + }) + + t.Run("SiftRangeErrors", func(t *testing.T) { + err = db.SiftRange([]byte("foo_3"), []byte("foo_7"), func(key []byte) (bool, error) { + return true, ErrMockError + }) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("InvalidRange", func(t *testing.T) { + err = db.SiftRange([]byte("foo_3"), []byte("foo_1"), func(key []byte) (bool, error) { + return false, nil + }) + assert.Error(err) + assert.Equal(ErrInvalidRange, err) + }) +} + +func TestRange(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + var db *Bitcask + + t.Run("Setup", func(t *testing.T) { + t.Run("Open", func(t *testing.T) { + db, err = Open(testdir) + assert.NoError(err) + }) + + t.Run("Put", func(t *testing.T) { + for i := 1; i < 10; i++ { + key := []byte(fmt.Sprintf("foo_%d", i)) + val := []byte(fmt.Sprintf("%d", i)) + err = db.Put(key, val) + assert.NoError(err) + } + }) + }) + + t.Run("Range", func(t *testing.T) { + var ( + vals [][]byte + expected = [][]byte{ + []byte("3"), + []byte("4"), + []byte("5"), + []byte("6"), + []byte("7"), + } + ) + + err = db.Range([]byte("foo_3"), []byte("foo_7"), func(key []byte) error { + val, err := db.Get(key) + assert.NoError(err) + vals = append(vals, val) + return nil + }) + vals = SortByteArrays(vals) + assert.Equal(expected, vals) + }) + + t.Run("RangeErrors", func(t *testing.T) { + err = db.Range([]byte("foo_3"), []byte("foo_7"), func(key []byte) error { + return ErrMockError + }) + assert.Error(err) + assert.Equal(ErrMockError, err) + }) + + t.Run("InvalidRange", func(t *testing.T) { + err = db.Range([]byte("foo_3"), []byte("foo_1"), func(key []byte) error { + return nil + }) + assert.Error(err) + assert.Equal(ErrInvalidRange, err) + }) +} + +func TestLocking(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + defer db.Close() + + _, err = Open(testdir) + assert.Error(err) +} + +func TestLockingAfterMerge(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + defer db.Close() + + _, err = Open(testdir) + assert.Error(err) + + err = db.Merge() + assert.NoError(err) + + // This should still error. + _, err = Open(testdir) + assert.Error(err) +} + +func TestGetExpiredInsideFold(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + defer db.Close() + // Add a node to the tree that won't expire + db.Put([]byte("static"), []byte("static")) + // Add a node that expires almost immediately to the tree + db.PutWithTTL([]byte("shortLived"), []byte("shortLived"), 1*time.Millisecond) + db.Put([]byte("skipped"), []byte("skipped")) + db.Put([]byte("static2"), []byte("static2")) + time.Sleep(2 * time.Millisecond) + var arr []string + _ = db.Fold(func(key []byte) error { + val, err := db.Get(key) + switch string(key) { + case "skipped": + fallthrough + case "static2": + fallthrough + case "static": + assert.NoError(err) + assert.Equal(string(val), string(key)) + case "shortLived": + assert.Error(err) + } + arr = append(arr, string(val)) + return nil + }) + assert.Contains(arr, "skipped") +} + +func TestRunGCDeletesAllExpired(t *testing.T) { + assert := assert.New(t) + + testdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + + db, err := Open(testdir) + assert.NoError(err) + defer db.Close() + + // Add a node to the tree that won't expire + db.Put([]byte("static"), []byte("static")) + + // Add a node that expires almost immediately to the tree + db.PutWithTTL([]byte("shortLived"), []byte("shortLived"), 0) + db.PutWithTTL([]byte("longLived"), []byte("longLived"), time.Hour) + db.PutWithTTL([]byte("longLived2"), []byte("longLived2"), time.Hour) + db.PutWithTTL([]byte("shortLived2"), []byte("shortLived2"), 0) + db.PutWithTTL([]byte("shortLived3"), []byte("shortLived3"), 0) + db.Put([]byte("static2"), []byte("static2")) + + // Sleep a bit and run the Garbage Collector + time.Sleep(3 * time.Millisecond) + db.RunGC() + + _ = db.Fold(func(key []byte) error { + _, err := db.Get(key) + assert.NoError(err) + return nil + }) +} + +type benchmarkTestCase struct { + name string + size int +} + +func BenchmarkGet(b *testing.B) { + currentDir, err := os.Getwd() + if err != nil { + b.Fatal(err) + } + + testdir, err := ioutil.TempDir(currentDir, "bitcask_bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(testdir) + + tests := []benchmarkTestCase{ + {"128B", 128}, + {"256B", 256}, + {"512B", 512}, + {"1K", 1024}, + {"2K", 2048}, + {"4K", 4096}, + {"8K", 8192}, + {"16K", 16384}, + {"32K", 32768}, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + b.SetBytes(int64(tt.size)) + + key := []byte("foo") + value := []byte(strings.Repeat(" ", tt.size)) + + options := []Option{ + WithMaxKeySize(uint32(len(key))), + WithMaxValueSize(uint64(tt.size)), + } + db, err := Open(testdir, options...) + if err != nil { + b.Fatal(err) + } + + err = db.Put(key, value) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + val, err := db.Get(key) + if err != nil { + b.Fatal(err) + } + if !bytes.Equal(val, value) { + b.Errorf("unexpected value") + } + } + b.StopTimer() + db.Close() + }) + } +} + +func BenchmarkPut(b *testing.B) { + currentDir, err := os.Getwd() + if err != nil { + b.Fatal(err) + } + + tests := []benchmarkTestCase{ + {"128B", 128}, + {"256B", 256}, + {"1K", 1024}, + {"2K", 2048}, + {"4K", 4096}, + {"8K", 8192}, + {"16K", 16384}, + {"32K", 32768}, + } + + variants := map[string][]Option{ + "NoSync": { + WithSync(false), + }, + "Sync": { + WithSync(true), + }, + } + + for name, options := range variants { + testdir, err := ioutil.TempDir(currentDir, "bitcask_bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(testdir) + + db, err := Open(testdir, options...) + if err != nil { + b.Fatal(err) + } + defer db.Close() + + for _, tt := range tests { + b.Run(tt.name+name, func(b *testing.B) { + b.SetBytes(int64(tt.size)) + + key := []byte("foo") + value := []byte(strings.Repeat(" ", tt.size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := db.Put(key, value) + if err != nil { + b.Fatal(err) + } + } + }) + } + } +} + +func BenchmarkScan(b *testing.B) { + currentDir, err := os.Getwd() + if err != nil { + b.Fatal(err) + } + + testdir, err := ioutil.TempDir(currentDir, "bitcask_bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(testdir) + + db, err := Open(testdir) + if err != nil { + b.Fatal(err) + } + defer db.Close() + + var items = map[string][]byte{ + "1": []byte("1"), + "2": []byte("2"), + "3": []byte("3"), + "food": []byte("pizza"), + "foo": []byte([]byte("foo")), + "fooz": []byte("fooz ball"), + "hello": []byte("world"), + } + for k, v := range items { + err := db.Put([]byte(k), v) + if err != nil { + b.Fatal(err) + } + } + + var expected = [][]byte{[]byte("foo"), []byte("food"), []byte("fooz")} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var keys [][]byte + err = db.Scan([]byte("fo"), func(key []byte) error { + keys = append(keys, key) + return nil + }) + if err != nil { + b.Fatal(err) + } + keys = SortByteArrays(keys) + if !reflect.DeepEqual(expected, keys) { + b.Fatal(fmt.Errorf("expected keys=#%v got=%#v", expected, keys)) + } + } +} diff --git a/v2/doc.go b/v2/doc.go new file mode 100644 index 0000000..09f36a4 --- /dev/null +++ b/v2/doc.go @@ -0,0 +1,3 @@ +// Package bitcask implements a high-performance key-value store based on a +// WAL and LSM. +package bitcask diff --git a/v2/doc_test.go b/v2/doc_test.go new file mode 100644 index 0000000..8cc50e5 --- /dev/null +++ b/v2/doc_test.go @@ -0,0 +1,13 @@ +package bitcask + +func Example() { + _, _ = Open("path/to/db") +} + +func Example_withOptions() { + opts := []Option{ + WithMaxKeySize(1024), + WithMaxValueSize(4096), + } + _, _ = Open("path/to/db", opts...) +} diff --git a/v2/errors.go b/v2/errors.go new file mode 100644 index 0000000..e465ebc --- /dev/null +++ b/v2/errors.go @@ -0,0 +1,77 @@ +package bitcask + +import ( + "errors" + "fmt" +) + +var ( + // ErrKeyNotFound is the error returned when a key is not found + ErrKeyNotFound = errors.New("error: key not found") + + // ErrKeyTooLarge is the error returned for a key that exceeds the + // maximum allowed key size (configured with WithMaxKeySize). + ErrKeyTooLarge = errors.New("error: key too large") + + // ErrKeyExpired is the error returned when a key is queried which has + // already expired (due to ttl) + ErrKeyExpired = errors.New("error: key expired") + + // ErrEmptyKey is the error returned for a value with an empty key. + ErrEmptyKey = errors.New("error: empty key") + + // ErrValueTooLarge is the error returned for a value that exceeds the + // maximum allowed value size (configured with WithMaxValueSize). + ErrValueTooLarge = errors.New("error: value too large") + + // ErrChecksumFailed is the error returned if a key/value retrieved does + // not match its CRC checksum + ErrChecksumFailed = errors.New("error: checksum failed") + + // ErrDatabaseLocked is the error returned if the database is locked + // (typically opened by another process) + ErrDatabaseLocked = errors.New("error: database locked") + + // ErrInvalidRange is the error returned when the range scan is invalid + ErrInvalidRange = errors.New("error: invalid range") + + // ErrInvalidVersion is the error returned when the database version is invalid + ErrInvalidVersion = errors.New("error: invalid db version") + + // ErrMergeInProgress is the error returned if merge is called when already a merge + // is in progress + ErrMergeInProgress = errors.New("error: merge already in progress") +) + +// ErrBadConfig is the error returned on failure to load the database config +type ErrBadConfig struct { + Err error +} + +func (e *ErrBadConfig) Is(target error) bool { + if _, ok := target.(*ErrBadConfig); ok { + return true + } + return errors.Is(e.Err, target) +} +func (e *ErrBadConfig) Unwrap() error { return e.Err } +func (e *ErrBadConfig) Error() string { + return fmt.Sprintf("error reading config.json: %s", e.Err) +} + +// ErrBadMetadata is the error returned on failure to load the database metadata +type ErrBadMetadata struct { + Err error +} + +func (e *ErrBadMetadata) Is(target error) bool { + if _, ok := target.(*ErrBadMetadata); ok { + return true + } + return errors.Is(e.Err, target) +} + +func (e *ErrBadMetadata) Unwrap() error { return e.Err } +func (e *ErrBadMetadata) Error() string { + return fmt.Sprintf("error reading meta.json: %s", e.Err) +} diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 0000000..cf9e850 --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,26 @@ +module git.mills.io/prologic/bitcask/v2 + +go 1.17 + +require ( + git.mills.io/prologic/bitcask v1.0.2 + github.com/abcum/lcp v0.0.0-20201209214815-7a3f3840be81 + github.com/gofrs/flock v0.8.0 + github.com/pkg/errors v0.9.1 + github.com/plar/go-adaptive-radix-tree v1.0.4 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v0.0.7 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.8.1 + github.com/stretchr/testify v1.7.0 + github.com/tidwall/redcon v1.4.1 + golang.org/x/exp v0.0.0-20200228211341-fcea875c7e85 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.2.0 // indirect + golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 0000000..29867c9 --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,668 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.mills.io/prologic/bitcask v1.0.2 h1:Iy9x3mVVd1fB+SWY0LTmsSDPGbzMrd7zCZPKbsb/tDA= +git.mills.io/prologic/bitcask v1.0.2/go.mod h1:ppXpR3haeYrijyJDleAkSGH3p90w6sIHxEA/7UHMxH4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/abcum/lcp v0.0.0-20201209214815-7a3f3840be81 h1:uHogIJ9bXH75ZYrXnVShHIyywFiUZ7OOabwd9Sfd8rw= +github.com/abcum/lcp v0.0.0-20201209214815-7a3f3840be81/go.mod h1:6ZvnjTZX1LNo1oLpfaJK8h+MXqHxcBFBIwkgsv+xlv0= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/plar/go-adaptive-radix-tree v1.0.4 h1:Ucd8R6RH2E7RW8ZtDKrsWyOD3paG2qqJO0I20WQ8oWQ= +github.com/plar/go-adaptive-radix-tree v1.0.4/go.mod h1:Ot8d28EII3i7Lv4PSvBlF8ejiD/CtRYDuPsySJbSaK8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/btree v0.4.2/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/redcon v1.4.1/go.mod h1:XwNPFbJ4ShWNNSA2Jazhbdje6jegTCcwFR6mfaADvHA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200228211341-fcea875c7e85 h1:jqhIzSw5SQNkbu5hOGpgMHhkfXxrbsLJdkIRcX19gCY= +golang.org/x/exp v0.0.0-20200228211341-fcea875c7e85/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/v2/internal/config/config.go b/v2/internal/config/config.go new file mode 100644 index 0000000..8f5dd30 --- /dev/null +++ b/v2/internal/config/config.go @@ -0,0 +1,51 @@ +package config + +import ( + "encoding/json" + "io/ioutil" + "os" +) + +// Config contains the bitcask configuration parameters +type Config struct { + MaxDatafileSize int `json:"max_datafile_size"` + MaxKeySize uint32 `json:"max_key_size"` + MaxValueSize uint64 `json:"max_value_size"` + Sync bool `json:"sync"` + AutoRecovery bool `json:"autorecovery"` + DBVersion uint32 `json:"db_version"` + DirFileModeBeforeUmask os.FileMode + FileFileModeBeforeUmask os.FileMode +} + +// Load loads a configuration from the given path +func Load(path string) (*Config, error) { + var cfg Config + + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &cfg); err != nil { + return nil, err + } + + return &cfg, nil +} + +// Save saves the configuration to the provided path +func (c *Config) Save(path string) error { + + data, err := json.Marshal(c) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, data, c.FileFileModeBeforeUmask) + if err != nil { + return err + } + + return nil +} diff --git a/v2/internal/data/codec/decoder.go b/v2/internal/data/codec/decoder.go new file mode 100644 index 0000000..b8086b0 --- /dev/null +++ b/v2/internal/data/codec/decoder.go @@ -0,0 +1,110 @@ +package codec + +import ( + "encoding/binary" + "io" + "time" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/pkg/errors" +) + +var ( + errInvalidKeyOrValueSize = errors.New("key/value size is invalid") + errCantDecodeOnNilEntry = errors.New("can't decode on nil entry") + errTruncatedData = errors.New("data is truncated") +) + +// NewDecoder creates a streaming Entry decoder. +func NewDecoder(r io.Reader, maxKeySize uint32, maxValueSize uint64) *Decoder { + return &Decoder{ + r: r, + maxKeySize: maxKeySize, + maxValueSize: maxValueSize, + } +} + +// Decoder wraps an underlying io.Reader and allows you to stream +// Entry decodings on it. +type Decoder struct { + r io.Reader + maxKeySize uint32 + maxValueSize uint64 +} + +// Decode decodes the next Entry from the current stream +func (d *Decoder) Decode(v *internal.Entry) (int64, error) { + if v == nil { + return 0, errCantDecodeOnNilEntry + } + + prefixBuf := make([]byte, keySize+valueSize) + + _, err := io.ReadFull(d.r, prefixBuf) + if err != nil { + return 0, err + } + + actualKeySize, actualValueSize, err := getKeyValueSizes(prefixBuf, d.maxKeySize, d.maxValueSize) + if err != nil { + return 0, err + } + + buf := make([]byte, uint64(actualKeySize)+actualValueSize+checksumSize+ttlSize) + if _, err = io.ReadFull(d.r, buf); err != nil { + return 0, errTruncatedData + } + + decodeWithoutPrefix(buf, actualKeySize, v) + return int64(keySize + valueSize + uint64(actualKeySize) + actualValueSize + checksumSize + ttlSize), nil +} + +// DecodeEntry decodes a serialized entry +func DecodeEntry(b []byte, e *internal.Entry, maxKeySize uint32, maxValueSize uint64) error { + valueOffset, _, err := getKeyValueSizes(b, maxKeySize, maxValueSize) + if err != nil { + return errors.Wrap(err, "key/value sizes are invalid") + } + + decodeWithoutPrefix(b[keySize+valueSize:], valueOffset, e) + + return nil +} + +func getKeyValueSizes(buf []byte, maxKeySize uint32, maxValueSize uint64) (uint32, uint64, error) { + actualKeySize := binary.BigEndian.Uint32(buf[:keySize]) + actualValueSize := binary.BigEndian.Uint64(buf[keySize:]) + + if (maxKeySize > 0 && actualKeySize > maxKeySize) || (maxValueSize > 0 && actualValueSize > maxValueSize) || actualKeySize == 0 { + + return 0, 0, errInvalidKeyOrValueSize + } + + return actualKeySize, actualValueSize, nil +} + +func decodeWithoutPrefix(buf []byte, valueOffset uint32, v *internal.Entry) { + v.Key = buf[:valueOffset] + v.Value = buf[valueOffset : len(buf)-checksumSize-ttlSize] + v.Checksum = binary.BigEndian.Uint32(buf[len(buf)-checksumSize-ttlSize : len(buf)-ttlSize]) + v.Expiry = getKeyExpiry(buf) +} + +func getKeyExpiry(buf []byte) *time.Time { + expiry := binary.BigEndian.Uint64(buf[len(buf)-ttlSize:]) + if expiry == uint64(0) { + return nil + } + t := time.Unix(int64(expiry), 0).UTC() + return &t +} + +// IsCorruptedData indicates if the error correspondes to possible data corruption +func IsCorruptedData(err error) bool { + switch err { + case errCantDecodeOnNilEntry, errInvalidKeyOrValueSize, errTruncatedData: + return true + default: + return false + } +} diff --git a/v2/internal/data/codec/decoder_test.go b/v2/internal/data/codec/decoder_test.go new file mode 100644 index 0000000..86d7d08 --- /dev/null +++ b/v2/internal/data/codec/decoder_test.go @@ -0,0 +1,130 @@ +package codec + +import ( + "bytes" + "encoding/binary" + "io" + "testing" + "time" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/stretchr/testify/assert" +) + +func TestDecodeOnNilEntry(t *testing.T) { + t.Parallel() + assert := assert.New(t) + decoder := NewDecoder(&bytes.Buffer{}, 1, 1) + + _, err := decoder.Decode(nil) + if assert.Error(err) { + assert.Equal(errCantDecodeOnNilEntry, err) + } +} + +func TestShortPrefix(t *testing.T) { + t.Parallel() + assert := assert.New(t) + maxKeySize, maxValueSize := uint32(10), uint64(20) + prefix := make([]byte, keySize+valueSize) + binary.BigEndian.PutUint32(prefix, 1) + binary.BigEndian.PutUint64(prefix[keySize:], 1) + + truncBytesCount := 2 + buf := bytes.NewBuffer(prefix[:keySize+valueSize-truncBytesCount]) + decoder := NewDecoder(buf, maxKeySize, maxValueSize) + _, err := decoder.Decode(&internal.Entry{}) + if assert.Error(err) { + assert.Equal(io.ErrUnexpectedEOF, err) + } +} + +func TestInvalidValueKeySizes(t *testing.T) { + assert := assert.New(t) + maxKeySize, maxValueSize := uint32(10), uint64(20) + + tests := []struct { + keySize uint32 + valueSize uint64 + name string + }{ + {keySize: 0, valueSize: 5, name: "zero key size"}, //zero value size is correct for tombstones + {keySize: 11, valueSize: 5, name: "key size overflow"}, + {keySize: 5, valueSize: 21, name: "value size overflow"}, + {keySize: 11, valueSize: 21, name: "key and value size overflow"}, + } + + for i := range tests { + i := i + t.Run(tests[i].name, func(t *testing.T) { + t.Parallel() + prefix := make([]byte, keySize+valueSize) + binary.BigEndian.PutUint32(prefix, tests[i].keySize) + binary.BigEndian.PutUint64(prefix[keySize:], tests[i].valueSize) + + buf := bytes.NewBuffer(prefix) + decoder := NewDecoder(buf, maxKeySize, maxValueSize) + _, err := decoder.Decode(&internal.Entry{}) + if assert.Error(err) { + assert.Equal(errInvalidKeyOrValueSize, err) + } + }) + } +} + +func TestTruncatedData(t *testing.T) { + assert := assert.New(t) + maxKeySize, maxValueSize := uint32(10), uint64(20) + + key := []byte("foo") + value := []byte("bar") + data := make([]byte, keySize+valueSize+len(key)+len(value)+checksumSize) + + binary.BigEndian.PutUint32(data, uint32(len(key))) + binary.BigEndian.PutUint64(data[keySize:], uint64(len(value))) + copy(data[keySize+valueSize:], key) + copy(data[keySize+valueSize+len(key):], value) + copy(data[keySize+valueSize+len(key)+len(value):], bytes.Repeat([]byte("0"), checksumSize)) + + tests := []struct { + data []byte + name string + }{ + {data: data[:keySize+valueSize+len(key)-1], name: "truncated key"}, + {data: data[:keySize+valueSize+len(key)+len(value)-1], name: "truncated value"}, + {data: data[:keySize+valueSize+len(key)+len(value)+checksumSize-1], name: "truncated checksum"}, + } + + for i := range tests { + i := i + t.Run(tests[i].name, func(t *testing.T) { + t.Parallel() + buf := bytes.NewBuffer(tests[i].data) + decoder := NewDecoder(buf, maxKeySize, maxValueSize) + _, err := decoder.Decode(&internal.Entry{}) + if assert.Error(err) { + assert.Equal(errTruncatedData, err) + } + }) + } +} + +func TestDecodeWithoutPrefix(t *testing.T) { + assert := assert.New(t) + e := internal.Entry{} + buf := []byte{0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 7, 109, 121, 107, 101, 121, 109, 121, 118, 97, 108, 117, 101, 0, 6, 81, 189, 0, 0, 0, 0, 95, 117, 28, 0} + valueOffset := uint32(5) + mockTime := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC) + expectedEntry := internal.Entry{ + Key: []byte("mykey"), + Value: []byte("myvalue"), + Checksum: 414141, + Expiry: &mockTime, + } + decodeWithoutPrefix(buf[keySize+valueSize:], valueOffset, &e) + assert.Equal(expectedEntry.Key, e.Key) + assert.Equal(expectedEntry.Value, e.Value) + assert.Equal(expectedEntry.Checksum, e.Checksum) + assert.Equal(expectedEntry.Offset, e.Offset) + assert.Equal(*expectedEntry.Expiry, *e.Expiry) +} diff --git a/v2/internal/data/codec/encoder.go b/v2/internal/data/codec/encoder.go new file mode 100644 index 0000000..4ef9362 --- /dev/null +++ b/v2/internal/data/codec/encoder.go @@ -0,0 +1,69 @@ +package codec + +import ( + "bufio" + "encoding/binary" + "io" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/pkg/errors" +) + +const ( + keySize = 4 + valueSize = 8 + checksumSize = 4 + ttlSize = 8 + MetaInfoSize = keySize + valueSize + checksumSize + ttlSize +) + +// NewEncoder creates a streaming Entry encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: bufio.NewWriter(w)} +} + +// Encoder wraps an underlying io.Writer and allows you to stream +// Entry encodings on it. +type Encoder struct { + w *bufio.Writer +} + +// Encode takes any Entry and streams it to the underlying writer. +// Messages are framed with a key-length and value-length prefix. +func (e *Encoder) Encode(msg internal.Entry) (int64, error) { + var bufKeyValue = make([]byte, keySize+valueSize) + binary.BigEndian.PutUint32(bufKeyValue[:keySize], uint32(len(msg.Key))) + binary.BigEndian.PutUint64(bufKeyValue[keySize:keySize+valueSize], uint64(len(msg.Value))) + if _, err := e.w.Write(bufKeyValue); err != nil { + return 0, errors.Wrap(err, "failed writing key & value length prefix") + } + + if _, err := e.w.Write(msg.Key); err != nil { + return 0, errors.Wrap(err, "failed writing key data") + } + if _, err := e.w.Write(msg.Value); err != nil { + return 0, errors.Wrap(err, "failed writing value data") + } + + bufChecksumSize := bufKeyValue[:checksumSize] + binary.BigEndian.PutUint32(bufChecksumSize, msg.Checksum) + if _, err := e.w.Write(bufChecksumSize); err != nil { + return 0, errors.Wrap(err, "failed writing checksum data") + } + + bufTTL := bufKeyValue[:ttlSize] + if msg.Expiry == nil { + binary.BigEndian.PutUint64(bufTTL, uint64(0)) + } else { + binary.BigEndian.PutUint64(bufTTL, uint64(msg.Expiry.Unix())) + } + if _, err := e.w.Write(bufTTL); err != nil { + return 0, errors.Wrap(err, "failed writing ttl data") + } + + if err := e.w.Flush(); err != nil { + return 0, errors.Wrap(err, "failed flushing data") + } + + return int64(keySize + valueSize + len(msg.Key) + len(msg.Value) + checksumSize + ttlSize), nil +} diff --git a/v2/internal/data/codec/encoder_test.go b/v2/internal/data/codec/encoder_test.go new file mode 100644 index 0000000..6dad38f --- /dev/null +++ b/v2/internal/data/codec/encoder_test.go @@ -0,0 +1,32 @@ +package codec + +import ( + "bytes" + "encoding/hex" + "testing" + "time" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/stretchr/testify/assert" +) + +func TestEncode(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + var buf bytes.Buffer + mockTime := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC) + encoder := NewEncoder(&buf) + _, err := encoder.Encode(internal.Entry{ + Key: []byte("mykey"), + Value: []byte("myvalue"), + Checksum: 414141, + Offset: 424242, + Expiry: &mockTime, + }) + + expectedHex := "0000000500000000000000076d796b65796d7976616c7565000651bd000000005f751c00" + if assert.NoError(err) { + assert.Equal(expectedHex, hex.EncodeToString(buf.Bytes())) + } +} diff --git a/v2/internal/data/datafile.go b/v2/internal/data/datafile.go new file mode 100644 index 0000000..07e258f --- /dev/null +++ b/v2/internal/data/datafile.go @@ -0,0 +1,200 @@ +package data + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "git.mills.io/prologic/bitcask/v2/internal" + "git.mills.io/prologic/bitcask/v2/internal/data/codec" + "github.com/pkg/errors" + "golang.org/x/exp/mmap" +) + +const ( + defaultDatafileFilename = "%09d.data" +) + +var ( + errReadonly = errors.New("error: read only datafile") + errReadError = errors.New("error: read error") +) + +// Datafile is an interface that represents a readable and writeable datafile +type Datafile interface { + FileID() int + Name() string + Close() error + Sync() error + Size() int64 + Read() (internal.Entry, int64, error) + ReadAt(index, size int64) (internal.Entry, error) + Write(internal.Entry) (int64, int64, error) +} + +type datafile struct { + sync.RWMutex + + id int + r *os.File + ra *mmap.ReaderAt + w *os.File + offset int64 + dec *codec.Decoder + enc *codec.Encoder + maxKeySize uint32 + maxValueSize uint64 +} + +// NewDatafile opens an existing datafile +func NewDatafile(path string, id int, readonly bool, maxKeySize uint32, maxValueSize uint64, fileMode os.FileMode) (Datafile, error) { + var ( + r *os.File + ra *mmap.ReaderAt + w *os.File + err error + ) + + fn := filepath.Join(path, fmt.Sprintf(defaultDatafileFilename, id)) + + if !readonly { + w, err = os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, fileMode) + if err != nil { + return nil, err + } + } + + r, err = os.Open(fn) + if err != nil { + return nil, err + } + stat, err := r.Stat() + if err != nil { + return nil, errors.Wrap(err, "error calling Stat()") + } + + if readonly { + ra, err = mmap.Open(fn) + if err != nil { + return nil, err + } + } + + offset := stat.Size() + + dec := codec.NewDecoder(r, maxKeySize, maxValueSize) + enc := codec.NewEncoder(w) + + return &datafile{ + id: id, + r: r, + ra: ra, + w: w, + offset: offset, + dec: dec, + enc: enc, + maxKeySize: maxKeySize, + maxValueSize: maxValueSize, + }, nil +} + +func (df *datafile) FileID() int { + return df.id +} + +func (df *datafile) Name() string { + return df.r.Name() +} + +func (df *datafile) Close() error { + defer func() { + if df.ra != nil { + df.ra.Close() + } + df.r.Close() + }() + + // Readonly datafile -- Nothing further to close on the write side + if df.w == nil { + return nil + } + + err := df.Sync() + if err != nil { + return err + } + return df.w.Close() +} + +func (df *datafile) Sync() error { + if df.w == nil { + return nil + } + return df.w.Sync() +} + +func (df *datafile) Size() int64 { + df.RLock() + defer df.RUnlock() + return df.offset +} + +// Read reads the next entry from the datafile +func (df *datafile) Read() (e internal.Entry, n int64, err error) { + df.Lock() + defer df.Unlock() + + n, err = df.dec.Decode(&e) + if err != nil { + return + } + + return +} + +// ReadAt the entry located at index offset with expected serialized size +func (df *datafile) ReadAt(index, size int64) (e internal.Entry, err error) { + var n int + + b := make([]byte, size) + + df.RLock() + defer df.RUnlock() + + if df.ra != nil { + n, err = df.ra.ReadAt(b, index) + } else { + n, err = df.r.ReadAt(b, index) + } + if err != nil { + return + } + if int64(n) != size { + err = errReadError + return + } + + codec.DecodeEntry(b, &e, df.maxKeySize, df.maxValueSize) + + return +} + +func (df *datafile) Write(e internal.Entry) (int64, int64, error) { + if df.w == nil { + return -1, 0, errReadonly + } + + df.Lock() + defer df.Unlock() + + e.Offset = df.offset + + n, err := df.enc.Encode(e) + if err != nil { + return -1, 0, err + } + df.offset += n + + return e.Offset, n, nil +} diff --git a/v2/internal/data/recover.go b/v2/internal/data/recover.go new file mode 100644 index 0000000..bfd045b --- /dev/null +++ b/v2/internal/data/recover.go @@ -0,0 +1,95 @@ +package data + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "git.mills.io/prologic/bitcask/v2/internal" + "git.mills.io/prologic/bitcask/v2/internal/config" + "git.mills.io/prologic/bitcask/v2/internal/data/codec" +) + +// CheckAndRecover checks and recovers the last datafile. +// If the datafile isn't corrupted, this is a noop. If it is, +// the longest non-corrupted prefix will be kept and the rest +// will be *deleted*. Also, the index file is also *deleted* which +// will be automatically recreated on next startup. +func CheckAndRecover(path string, cfg *config.Config) error { + dfs, err := internal.GetDatafiles(path) + if err != nil { + return fmt.Errorf("scanning datafiles: %s", err) + } + if len(dfs) == 0 { + return nil + } + f := dfs[len(dfs)-1] + recovered, err := recoverDatafile(f, cfg) + if err != nil { + return fmt.Errorf("error recovering data file: %s", err) + } + if recovered { + if err := os.Remove(filepath.Join(path, "index")); err != nil { + return fmt.Errorf("error deleting the index on recovery: %s", err) + } + } + return nil +} + +func recoverDatafile(path string, cfg *config.Config) (recovered bool, err error) { + f, err := os.Open(path) + if err != nil { + return false, fmt.Errorf("opening the datafile: %s", err) + } + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } + }() + dir, file := filepath.Split(path) + rPath := filepath.Join(dir, fmt.Sprintf("%s.recovered", file)) + fr, err := os.OpenFile(rPath, os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return false, fmt.Errorf("creating the recovered datafile: %w", err) + } + defer func() { + closeErr := fr.Close() + if err == nil { + err = closeErr + } + }() + + dec := codec.NewDecoder(f, cfg.MaxKeySize, cfg.MaxValueSize) + enc := codec.NewEncoder(fr) + e := internal.Entry{} + + corrupted := false + for !corrupted { + _, err = dec.Decode(&e) + if err == io.EOF { + break + } + if codec.IsCorruptedData(err) { + corrupted = true + continue + } + if err != nil { + return false, fmt.Errorf("unexpected error while reading datafile: %w", err) + } + if _, err := enc.Encode(e); err != nil { + return false, fmt.Errorf("writing to recovered datafile: %w", err) + } + } + if !corrupted { + if err := os.Remove(fr.Name()); err != nil { + return false, fmt.Errorf("can't remove temporal recovered datafile: %w", err) + } + return false, nil + } + if err := os.Rename(rPath, path); err != nil { + return false, fmt.Errorf("removing corrupted file: %s", err) + } + return true, nil +} diff --git a/v2/internal/entry.go b/v2/internal/entry.go new file mode 100644 index 0000000..090f2f5 --- /dev/null +++ b/v2/internal/entry.go @@ -0,0 +1,27 @@ +package internal + +import ( + "hash/crc32" + "time" +) + +// Entry represents a key/value in the database +type Entry struct { + Checksum uint32 + Key []byte + Offset int64 + Value []byte + Expiry *time.Time +} + +// NewEntry creates a new `Entry` with the given `key` and `value` +func NewEntry(key, value []byte, expiry *time.Time) Entry { + checksum := crc32.ChecksumIEEE(value) + + return Entry{ + Checksum: checksum, + Key: key, + Value: value, + Expiry: expiry, + } +} diff --git a/v2/internal/index/codec_index.go b/v2/internal/index/codec_index.go new file mode 100644 index 0000000..28338d9 --- /dev/null +++ b/v2/internal/index/codec_index.go @@ -0,0 +1,134 @@ +package index + +import ( + "encoding/binary" + "io" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/pkg/errors" + art "github.com/plar/go-adaptive-radix-tree" +) + +var ( + errTruncatedKeySize = errors.New("key size is truncated") + errTruncatedKeyData = errors.New("key data is truncated") + errTruncatedData = errors.New("data is truncated") + errKeySizeTooLarge = errors.New("key size too large") +) + +const ( + int32Size = 4 + int64Size = 8 + fileIDSize = int32Size + offsetSize = int64Size + sizeSize = int64Size +) + +func readKeyBytes(r io.Reader, maxKeySize uint32) ([]byte, error) { + s := make([]byte, int32Size) + _, err := io.ReadFull(r, s) + if err != nil { + if err == io.EOF { + return nil, err + } + return nil, errors.Wrap(errTruncatedKeySize, err.Error()) + } + size := binary.BigEndian.Uint32(s) + if maxKeySize > 0 && size > uint32(maxKeySize) { + return nil, errKeySizeTooLarge + } + + b := make([]byte, size) + _, err = io.ReadFull(r, b) + if err != nil { + return nil, errors.Wrap(errTruncatedKeyData, err.Error()) + } + return b, nil +} + +func writeBytes(b []byte, w io.Writer) error { + s := make([]byte, int32Size) + binary.BigEndian.PutUint32(s, uint32(len(b))) + _, err := w.Write(s) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +func readItem(r io.Reader) (internal.Item, error) { + buf := make([]byte, (fileIDSize + offsetSize + sizeSize)) + _, err := io.ReadFull(r, buf) + if err != nil { + return internal.Item{}, errors.Wrap(errTruncatedData, err.Error()) + } + + return internal.Item{ + FileID: int(binary.BigEndian.Uint32(buf[:fileIDSize])), + Offset: int64(binary.BigEndian.Uint64(buf[fileIDSize:(fileIDSize + offsetSize)])), + Size: int64(binary.BigEndian.Uint64(buf[(fileIDSize + offsetSize):])), + }, nil +} + +func writeItem(item internal.Item, w io.Writer) error { + buf := make([]byte, (fileIDSize + offsetSize + sizeSize)) + binary.BigEndian.PutUint32(buf[:fileIDSize], uint32(item.FileID)) + binary.BigEndian.PutUint64(buf[fileIDSize:(fileIDSize+offsetSize)], uint64(item.Offset)) + binary.BigEndian.PutUint64(buf[(fileIDSize+offsetSize):], uint64(item.Size)) + _, err := w.Write(buf) + if err != nil { + return err + } + return nil +} + +// ReadIndex reads a persisted from a io.Reader into a Tree +func readIndex(r io.Reader, t art.Tree, maxKeySize uint32) error { + for { + key, err := readKeyBytes(r, maxKeySize) + if err != nil { + if err == io.EOF { + break + } + return err + } + + item, err := readItem(r) + if err != nil { + return err + } + + t.Insert(key, item) + } + + return nil +} + +func writeIndex(t art.Tree, w io.Writer) (err error) { + t.ForEach(func(node art.Node) bool { + err = writeBytes(node.Key(), w) + if err != nil { + return false + } + + item := node.Value().(internal.Item) + err := writeItem(item, w) + return err == nil + }) + return +} + +// IsIndexCorruption returns a boolean indicating whether the error +// is known to report a corruption data issue +func IsIndexCorruption(err error) bool { + cause := errors.Cause(err) + switch cause { + case errKeySizeTooLarge, errTruncatedData, errTruncatedKeyData, errTruncatedKeySize: + return true + } + return false +} diff --git a/v2/internal/index/codec_index_test.go b/v2/internal/index/codec_index_test.go new file mode 100644 index 0000000..6be6945 --- /dev/null +++ b/v2/internal/index/codec_index_test.go @@ -0,0 +1,126 @@ +package index + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "testing" + + "git.mills.io/prologic/bitcask/v2/internal" + "github.com/pkg/errors" + art "github.com/plar/go-adaptive-radix-tree" +) + +const ( + base64SampleTree = "AAAABGFiY2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARhYmNlAAAAAQAAAAAAAAABAAAAAAAAAAEAAAAEYWJjZgAAAAIAAAAAAAAAAgAAAAAAAAACAAAABGFiZ2QAAAADAAAAAAAAAAMAAAAAAAAAAw==" +) + +func TestWriteIndex(t *testing.T) { + at, expectedSerializedSize := getSampleTree() + + var b bytes.Buffer + err := writeIndex(at, &b) + if err != nil { + t.Fatalf("writing index failed: %v", err) + } + if b.Len() != expectedSerializedSize { + t.Fatalf("incorrect size of serialied index: expected %d, got: %d", expectedSerializedSize, b.Len()) + } + sampleTreeBytes, _ := base64.StdEncoding.DecodeString(base64SampleTree) + if !bytes.Equal(b.Bytes(), sampleTreeBytes) { + t.Fatalf("unexpected serialization of the tree") + } +} + +func TestReadIndex(t *testing.T) { + sampleTreeBytes, _ := base64.StdEncoding.DecodeString(base64SampleTree) + b := bytes.NewBuffer(sampleTreeBytes) + + at := art.New() + err := readIndex(b, at, 1024) + if err != nil { + t.Fatalf("error while deserializing correct sample tree: %v", err) + } + + atsample, _ := getSampleTree() + if atsample.Size() != at.Size() { + t.Fatalf("trees aren't the same size, expected %v, got %v", atsample.Size(), at.Size()) + } + atsample.ForEach(func(node art.Node) bool { + _, found := at.Search(node.Key()) + if !found { + t.Fatalf("expected node wasn't found: %s", node.Key()) + } + return true + }) +} + +func TestReadCorruptedData(t *testing.T) { + sampleBytes, _ := base64.StdEncoding.DecodeString(base64SampleTree) + + t.Run("truncated", func(t *testing.T) { + table := []struct { + name string + err error + data []byte + }{ + {name: "key-size-first-item", err: errTruncatedKeySize, data: sampleBytes[:2]}, + {name: "key-data-second-item", err: errTruncatedKeyData, data: sampleBytes[:6]}, + {name: "key-size-second-item", err: errTruncatedKeySize, data: sampleBytes[:(int32Size+4+fileIDSize+offsetSize+sizeSize)+2]}, + {name: "key-data-second-item", err: errTruncatedKeyData, data: sampleBytes[:(int32Size+4+fileIDSize+offsetSize+sizeSize)+6]}, + {name: "data", err: errTruncatedData, data: sampleBytes[:int32Size+4+(fileIDSize+offsetSize+sizeSize-3)]}, + } + + for i := range table { + t.Run(table[i].name, func(t *testing.T) { + bf := bytes.NewBuffer(table[i].data) + + if err := readIndex(bf, art.New(), 1024); !IsIndexCorruption(err) || errors.Cause(err) != table[i].err { + t.Fatalf("expected %v, got %v", table[i].err, err) + } + }) + } + }) + + t.Run("overflow", func(t *testing.T) { + overflowKeySize := make([]byte, len(sampleBytes)) + copy(overflowKeySize, sampleBytes) + binary.BigEndian.PutUint32(overflowKeySize, 1025) + + overflowDataSize := make([]byte, len(sampleBytes)) + copy(overflowDataSize, sampleBytes) + binary.BigEndian.PutUint32(overflowDataSize[int32Size+4+fileIDSize+offsetSize:], 1025) + + table := []struct { + name string + err error + maxKeySize uint32 + data []byte + }{ + {name: "key-data-overflow", err: errKeySizeTooLarge, maxKeySize: 1024, data: overflowKeySize}, + } + + for i := range table { + t.Run(table[i].name, func(t *testing.T) { + bf := bytes.NewBuffer(table[i].data) + + if err := readIndex(bf, art.New(), table[i].maxKeySize); !IsIndexCorruption(err) || errors.Cause(err) != table[i].err { + t.Fatalf("expected %v, got %v", table[i].err, err) + } + }) + } + }) + +} + +func getSampleTree() (art.Tree, int) { + at := art.New() + keys := [][]byte{[]byte("abcd"), []byte("abce"), []byte("abcf"), []byte("abgd")} + expectedSerializedSize := 0 + for i := range keys { + at.Insert(keys[i], internal.Item{FileID: i, Offset: int64(i), Size: int64(i)}) + expectedSerializedSize += int32Size + len(keys[i]) + fileIDSize + offsetSize + sizeSize + } + + return at, expectedSerializedSize +} diff --git a/v2/internal/index/index.go b/v2/internal/index/index.go new file mode 100644 index 0000000..88be466 --- /dev/null +++ b/v2/internal/index/index.go @@ -0,0 +1,59 @@ +package index + +import ( + "os" + + "git.mills.io/prologic/bitcask/v2/internal" + art "github.com/plar/go-adaptive-radix-tree" +) + +// Indexer is an interface for loading and saving the index (an Adaptive Radix Tree) +type Indexer interface { + Load(path string, maxkeySize uint32) (art.Tree, bool, error) + Save(t art.Tree, path string) error +} + +// NewIndexer returns an instance of the default `Indexer` implemtnation +// which perists the index (an Adaptive Radix Tree) as a binary blob on file +func NewIndexer() Indexer { + return &indexer{} +} + +type indexer struct{} + +func (i *indexer) Load(path string, maxKeySize uint32) (art.Tree, bool, error) { + t := art.New() + + if !internal.Exists(path) { + return t, false, nil + } + + f, err := os.Open(path) + if err != nil { + return t, true, err + } + defer f.Close() + + if err := readIndex(f, t, maxKeySize); err != nil { + return t, true, err + } + return t, true, nil +} + +func (i *indexer) Save(t art.Tree, path string) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + if err := writeIndex(t, f); err != nil { + return err + } + + if err := f.Sync(); err != nil { + return err + } + + return f.Close() +} diff --git a/v2/internal/index/ttl_index.go b/v2/internal/index/ttl_index.go new file mode 100644 index 0000000..d2e1808 --- /dev/null +++ b/v2/internal/index/ttl_index.go @@ -0,0 +1,71 @@ +package index + +import ( + "encoding/binary" + "io" + "os" + "time" + + "git.mills.io/prologic/bitcask/v2/internal" + art "github.com/plar/go-adaptive-radix-tree" +) + +type ttlIndexer struct{} + +func NewTTLIndexer() Indexer { + return ttlIndexer{} +} + +func (i ttlIndexer) Save(t art.Tree, path string) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + buf := make([]byte, int64Size) + for it := t.Iterator(); it.HasNext(); { + node, err := it.Next() + if err != nil { + return err + } + // save key + err = writeBytes(node.Key(), f) + if err != nil { + return err + } + // save key ttl + binary.BigEndian.PutUint64(buf, uint64(node.Value().(time.Time).Unix())) + _, err = f.Write(buf) + if err != nil { + return err + } + } + return f.Sync() +} + +func (i ttlIndexer) Load(path string, maxKeySize uint32) (art.Tree, bool, error) { + t := art.New() + if !internal.Exists(path) { + return t, false, nil + } + f, err := os.Open(path) + if err != nil { + return t, true, err + } + buf := make([]byte, int64Size) + for { + key, err := readKeyBytes(f, maxKeySize) + if err != nil { + if err == io.EOF { + break + } + return t, true, err + } + _, err = io.ReadFull(f, buf) + if err != nil { + return t, true, err + } + expiry := time.Unix(int64(binary.BigEndian.Uint64(buf)), 0).UTC() + t.Insert(key, expiry) + } + return t, true, nil +} diff --git a/v2/internal/index/ttl_index_test.go b/v2/internal/index/ttl_index_test.go new file mode 100644 index 0000000..c456f0b --- /dev/null +++ b/v2/internal/index/ttl_index_test.go @@ -0,0 +1,54 @@ +package index + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + art "github.com/plar/go-adaptive-radix-tree" + assert2 "github.com/stretchr/testify/assert" +) + +func Test_TTLIndexer(t *testing.T) { + assert := assert2.New(t) + tempDir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(tempDir) + + currTime := time.Date(2020, 12, 27, 0, 0, 0, 0, time.UTC) + trie := art.New() + + t.Run("LoadEmpty", func(t *testing.T) { + newTrie, found, err := NewTTLIndexer().Load(filepath.Join(tempDir, "ttl_index"), 4) + assert.NoError(err) + assert.False(found) + assert.Equal(trie, newTrie) + }) + + t.Run("Save", func(t *testing.T) { + trie.Insert([]byte("key"), currTime) + err := NewTTLIndexer().Save(trie, filepath.Join(tempDir, "ttl_index")) + assert.NoError(err) + trie.Insert([]byte("foo"), currTime.Add(24*time.Hour)) + err = NewTTLIndexer().Save(trie, filepath.Join(tempDir, "ttl_index")) + assert.NoError(err) + trie.Insert([]byte("key"), currTime.Add(-24*time.Hour)) + err = NewTTLIndexer().Save(trie, filepath.Join(tempDir, "ttl_index")) + assert.NoError(err) + }) + + t.Run("Load", func(t *testing.T) { + newTrie, found, err := NewTTLIndexer().Load(filepath.Join(tempDir, "ttl_index"), 4) + assert.NoError(err) + assert.True(found) + assert.Equal(2, newTrie.Size()) + value, found := newTrie.Search([]byte("key")) + assert.True(found) + assert.Equal(currTime.Add(-24*time.Hour), value) + value, found = newTrie.Search([]byte("foo")) + assert.True(found) + assert.Equal(currTime.Add(24*time.Hour), value) + }) +} diff --git a/v2/internal/item.go b/v2/internal/item.go new file mode 100644 index 0000000..6d245de --- /dev/null +++ b/v2/internal/item.go @@ -0,0 +1,10 @@ +package internal + +// Item represents the location of the value on disk. This is used by the +// internal Adaptive Radix Tree to hold an in-memory structure mapping keys to +// locations on disk of where the value(s) can be read from. +type Item struct { + FileID int `json:"fileid"` + Offset int64 `json:"offset"` + Size int64 `json:"size"` +} diff --git a/v2/internal/metadata/metadata.go b/v2/internal/metadata/metadata.go new file mode 100644 index 0000000..b7d0499 --- /dev/null +++ b/v2/internal/metadata/metadata.go @@ -0,0 +1,22 @@ +package metadata + +import ( + "os" + + "git.mills.io/prologic/bitcask/v2/internal" +) + +type MetaData struct { + IndexUpToDate bool `json:"index_up_to_date"` + ReclaimableSpace int64 `json:"reclaimable_space"` +} + +func (m *MetaData) Save(path string, mode os.FileMode) error { + return internal.SaveJsonToFile(m, path, mode) +} + +func Load(path string) (*MetaData, error) { + var m MetaData + err := internal.LoadFromJsonFile(path, &m) + return &m, err +} diff --git a/v2/internal/mocks/datafile.go b/v2/internal/mocks/datafile.go new file mode 100644 index 0000000..bd1eda3 --- /dev/null +++ b/v2/internal/mocks/datafile.go @@ -0,0 +1,158 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import internal "git.mills.io/prologic/bitcask/v2/internal" +import mock "github.com/stretchr/testify/mock" + +// Datafile is an autogenerated mock type for the Datafile type +type Datafile struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Datafile) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FileID provides a mock function with given fields: +func (_m *Datafile) FileID() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *Datafile) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Read provides a mock function with given fields: +func (_m *Datafile) Read() (internal.Entry, int64, error) { + ret := _m.Called() + + var r0 internal.Entry + if rf, ok := ret.Get(0).(func() internal.Entry); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(internal.Entry) + } + + var r1 int64 + if rf, ok := ret.Get(1).(func() int64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(int64) + } + + var r2 error + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ReadAt provides a mock function with given fields: index, size +func (_m *Datafile) ReadAt(index int64, size int64) (internal.Entry, error) { + ret := _m.Called(index, size) + + var r0 internal.Entry + if rf, ok := ret.Get(0).(func(int64, int64) internal.Entry); ok { + r0 = rf(index, size) + } else { + r0 = ret.Get(0).(internal.Entry) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64, int64) error); ok { + r1 = rf(index, size) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Size provides a mock function with given fields: +func (_m *Datafile) Size() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// Sync provides a mock function with given fields: +func (_m *Datafile) Sync() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: _a0 +func (_m *Datafile) Write(_a0 internal.Entry) (int64, int64, error) { + ret := _m.Called(_a0) + + var r0 int64 + if rf, ok := ret.Get(0).(func(internal.Entry) int64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(int64) + } + + var r1 int64 + if rf, ok := ret.Get(1).(func(internal.Entry) int64); ok { + r1 = rf(_a0) + } else { + r1 = ret.Get(1).(int64) + } + + var r2 error + if rf, ok := ret.Get(2).(func(internal.Entry) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} diff --git a/v2/internal/mocks/indexer.go b/v2/internal/mocks/indexer.go new file mode 100644 index 0000000..7680aac --- /dev/null +++ b/v2/internal/mocks/indexer.go @@ -0,0 +1,56 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import art "github.com/plar/go-adaptive-radix-tree" + +import mock "github.com/stretchr/testify/mock" + +// Indexer is an autogenerated mock type for the Indexer type +type Indexer struct { + mock.Mock +} + +// Load provides a mock function with given fields: path, maxkeySize +func (_m *Indexer) Load(path string, maxkeySize uint32) (art.Tree, bool, error) { + ret := _m.Called(path, maxkeySize) + + var r0 art.Tree + if rf, ok := ret.Get(0).(func(string, uint32) art.Tree); ok { + r0 = rf(path, maxkeySize) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(art.Tree) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(string, uint32) bool); ok { + r1 = rf(path, maxkeySize) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(string, uint32) error); ok { + r2 = rf(path, maxkeySize) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Save provides a mock function with given fields: t, path +func (_m *Indexer) Save(t art.Tree, path string) error { + ret := _m.Called(t, path) + + var r0 error + if rf, ok := ret.Get(0).(func(art.Tree, string) error); ok { + r0 = rf(t, path) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/v2/internal/utils.go b/v2/internal/utils.go new file mode 100644 index 0000000..1524300 --- /dev/null +++ b/v2/internal/utils.go @@ -0,0 +1,112 @@ +package internal + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" +) + +// Exists returns `true` if the given `path` on the current file system exists +func Exists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +// DirSize returns the space occupied by the given `path` on disk on the current +// file system. +func DirSize(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return err + }) + return size, err +} + +// GetDatafiles returns a list of all data files stored in the database path +// given by `path`. All datafiles are identified by the the glob `*.data` and +// the basename is represented by a monotonic increasing integer. +// The returned files are *sorted* in increasing order. +func GetDatafiles(path string) ([]string, error) { + fns, err := filepath.Glob(fmt.Sprintf("%s/*.data", path)) + if err != nil { + return nil, err + } + sort.Strings(fns) + return fns, nil +} + +// ParseIds will parse a list of datafiles as returned by `GetDatafiles` and +// extract the id part and return a slice of ints. +func ParseIds(fns []string) ([]int, error) { + var ids []int + for _, fn := range fns { + fn = filepath.Base(fn) + ext := filepath.Ext(fn) + if ext != ".data" { + continue + } + id, err := strconv.ParseInt(strings.TrimSuffix(fn, ext), 10, 32) + if err != nil { + return nil, err + } + ids = append(ids, int(id)) + } + sort.Ints(ids) + return ids, nil +} + +// Copy copies source contents to destination +func Copy(src, dst string, exclude []string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + relPath := strings.Replace(path, src, "", 1) + if relPath == "" { + return nil + } + for _, e := range exclude { + matched, err := filepath.Match(e, info.Name()) + if err != nil { + return err + } + if matched { + return nil + } + } + if info.IsDir() { + return os.Mkdir(filepath.Join(dst, relPath), info.Mode()) + } + var data, err1 = ioutil.ReadFile(filepath.Join(src, relPath)) + if err1 != nil { + return err1 + } + return ioutil.WriteFile(filepath.Join(dst, relPath), data, info.Mode()) + }) +} + +// SaveJsonToFile converts v into json and store in file identified by path +func SaveJsonToFile(v interface{}, path string, mode os.FileMode) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + return ioutil.WriteFile(path, b, mode) +} + +// LoadFromJsonFile reads file located at `path` and put its content in json format in v +func LoadFromJsonFile(path string, v interface{}) error { + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} diff --git a/v2/internal/utils_test.go b/v2/internal/utils_test.go new file mode 100644 index 0000000..4ad1645 --- /dev/null +++ b/v2/internal/utils_test.go @@ -0,0 +1,108 @@ +package internal + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Copy(t *testing.T) { + assert := assert.New(t) + t.Run("CopyDir", func(t *testing.T) { + tempsrc, err := ioutil.TempDir("", "test") + assert.NoError(err) + defer os.RemoveAll(tempsrc) + var f *os.File + + tempdir, err := ioutil.TempDir(tempsrc, "") + assert.NoError(err) + + f, err = os.OpenFile(filepath.Join(tempsrc, "file1"), os.O_WRONLY|os.O_CREATE, 0755) + assert.NoError(err) + n, err := f.WriteString("test123") + assert.Equal(7, n) + assert.NoError(err) + f.Close() + + f, err = os.OpenFile(filepath.Join(tempsrc, "file2"), os.O_WRONLY|os.O_CREATE, 0755) + assert.NoError(err) + n, err = f.WriteString("test1234") + assert.Equal(8, n) + assert.NoError(err) + f.Close() + + f, err = os.OpenFile(filepath.Join(tempsrc, "file3"), os.O_WRONLY|os.O_CREATE, 0755) + assert.NoError(err) + f.Close() + + tempdst, err := ioutil.TempDir("", "backup") + assert.NoError(err) + defer os.RemoveAll(tempdst) + err = Copy(tempsrc, tempdst, []string{"file3"}) + assert.NoError(err) + buf := make([]byte, 10) + + exists := Exists(filepath.Join(tempdst, filepath.Base(tempdir))) + assert.Equal(true, exists) + + f, err = os.Open(filepath.Join(tempdst, "file1")) + assert.NoError(err) + n, err = f.Read(buf[:7]) + assert.NoError(err) + assert.Equal(7, n) + assert.Equal([]byte("test123"), buf[:7]) + _, err = f.Read(buf) + assert.Equal(io.EOF, err) + f.Close() + + f, err = os.Open(filepath.Join(tempdst, "file2")) + assert.NoError(err) + n, err = f.Read(buf[:8]) + assert.NoError(err) + assert.Equal(8, n) + assert.Equal([]byte("test1234"), buf[:8]) + _, err = f.Read(buf) + assert.Equal(io.EOF, err) + f.Close() + + exists = Exists(filepath.Join(tempdst, "file3")) + assert.Equal(false, exists) + }) +} + +func Test_SaveAndLoad(t *testing.T) { + assert := assert.New(t) + t.Run("save and load", func(t *testing.T) { + tempdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(tempdir) + type test struct { + Value bool `json:"value"` + } + m := test{Value: true} + err = SaveJsonToFile(&m, filepath.Join(tempdir, "meta.json"), 0755) + assert.NoError(err) + m1 := test{} + err = LoadFromJsonFile(filepath.Join(tempdir, "meta.json"), &m1) + assert.NoError(err) + assert.Equal(m, m1) + }) + + t.Run("save and load error", func(t *testing.T) { + tempdir, err := ioutil.TempDir("", "bitcask") + assert.NoError(err) + defer os.RemoveAll(tempdir) + type test struct { + Value bool `json:"value"` + } + err = SaveJsonToFile(make(chan int), filepath.Join(tempdir, "meta.json"), 0755) + assert.Error(err) + m1 := test{} + err = LoadFromJsonFile(filepath.Join(tempdir, "meta.json"), &m1) + assert.Error(err) + }) +} diff --git a/v2/internal/version.go b/v2/internal/version.go new file mode 100644 index 0000000..79a1bbf --- /dev/null +++ b/v2/internal/version.go @@ -0,0 +1,18 @@ +package internal + +import ( + "fmt" +) + +var ( + // Version release version + Version = "0.0.1" + + // Commit will be overwritten automatically by the build system + Commit = "HEAD" +) + +// FullVersion returns the full version and commit hash +func FullVersion() string { + return fmt.Sprintf("%s@%s", Version, Commit) +} diff --git a/v2/internal/version_test.go b/v2/internal/version_test.go new file mode 100644 index 0000000..4d66e4e --- /dev/null +++ b/v2/internal/version_test.go @@ -0,0 +1,15 @@ +package internal + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFullVersion(t *testing.T) { + assert := assert.New(t) + + expected := fmt.Sprintf("%s@%s", Version, Commit) + assert.Equal(expected, FullVersion()) +} diff --git a/v2/options.go b/v2/options.go new file mode 100644 index 0000000..66dec71 --- /dev/null +++ b/v2/options.go @@ -0,0 +1,118 @@ +package bitcask + +import ( + "os" + + "git.mills.io/prologic/bitcask/v2/internal/config" +) + +const ( + // DefaultDirFileModeBeforeUmask is the default os.FileMode used when creating directories + DefaultDirFileModeBeforeUmask = os.FileMode(0700) + + // DefaultFileFileModeBeforeUmask is the default os.FileMode used when creating files + DefaultFileFileModeBeforeUmask = os.FileMode(0600) + + // DefaultMaxDatafileSize is the default maximum datafile size in bytes + DefaultMaxDatafileSize = 1 << 20 // 1MB + + // DefaultMaxKeySize is the default maximum key size in bytes + DefaultMaxKeySize = uint32(64) // 64 bytes + + // DefaultMaxValueSize is the default value size in bytes + DefaultMaxValueSize = uint64(1 << 16) // 65KB + + // DefaultSync is the default file synchronization action + DefaultSync = false + + // DefaultAutoRecovery is the default auto-recovery action. + + CurrentDBVersion = uint32(1) +) + +// Option is a function that takes a config struct and modifies it +type Option func(*config.Config) error + +func withConfig(src *config.Config) Option { + return func(cfg *config.Config) error { + cfg.MaxDatafileSize = src.MaxDatafileSize + cfg.MaxKeySize = src.MaxKeySize + cfg.MaxValueSize = src.MaxValueSize + cfg.Sync = src.Sync + cfg.AutoRecovery = src.AutoRecovery + cfg.DirFileModeBeforeUmask = src.DirFileModeBeforeUmask + cfg.FileFileModeBeforeUmask = src.FileFileModeBeforeUmask + return nil + } +} + +// WithAutoRecovery sets auto recovery of data and index file recreation. +// IMPORTANT: This flag MUST BE used only if a proper backup was made of all +// the existing datafiles. +func WithAutoRecovery(enabled bool) Option { + return func(cfg *config.Config) error { + cfg.AutoRecovery = enabled + return nil + } +} + +// WithDirFileModeBeforeUmask sets the FileMode used for each new file created. +func WithDirFileModeBeforeUmask(mode os.FileMode) Option { + return func(cfg *config.Config) error { + cfg.DirFileModeBeforeUmask = mode + return nil + } +} + +// WithFileFileModeBeforeUmask sets the FileMode used for each new file created. +func WithFileFileModeBeforeUmask(mode os.FileMode) Option { + return func(cfg *config.Config) error { + cfg.FileFileModeBeforeUmask = mode + return nil + } +} + +// WithMaxDatafileSize sets the maximum datafile size option +func WithMaxDatafileSize(size int) Option { + return func(cfg *config.Config) error { + cfg.MaxDatafileSize = size + return nil + } +} + +// WithMaxKeySize sets the maximum key size option +func WithMaxKeySize(size uint32) Option { + return func(cfg *config.Config) error { + cfg.MaxKeySize = size + return nil + } +} + +// WithMaxValueSize sets the maximum value size option +func WithMaxValueSize(size uint64) Option { + return func(cfg *config.Config) error { + cfg.MaxValueSize = size + return nil + } +} + +// WithSync causes Sync() to be called on every key/value written increasing +// durability and safety at the expense of performance +func WithSync(sync bool) Option { + return func(cfg *config.Config) error { + cfg.Sync = sync + return nil + } +} + +func newDefaultConfig() *config.Config { + return &config.Config{ + MaxDatafileSize: DefaultMaxDatafileSize, + MaxKeySize: DefaultMaxKeySize, + MaxValueSize: DefaultMaxValueSize, + Sync: DefaultSync, + DirFileModeBeforeUmask: DefaultDirFileModeBeforeUmask, + FileFileModeBeforeUmask: DefaultFileFileModeBeforeUmask, + DBVersion: CurrentDBVersion, + } +} diff --git a/v2/scripts/migrations/v0_to_v1.go b/v2/scripts/migrations/v0_to_v1.go new file mode 100644 index 0000000..8d0e9aa --- /dev/null +++ b/v2/scripts/migrations/v0_to_v1.go @@ -0,0 +1,159 @@ +package migrations + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + + "git.mills.io/prologic/bitcask/v2/internal" +) + +const ( + keySize = 4 + valueSize = 8 + checksumSize = 4 + ttlSize = 8 + defaultDatafileFilename = "%09d.data" +) + +func ApplyV0ToV1(dir string, maxDatafileSize int) error { + temp, err := prepare(dir) + if err != nil { + return err + } + defer os.RemoveAll(temp) + err = apply(dir, temp, maxDatafileSize) + if err != nil { + return err + } + return cleanup(dir, temp) +} + +func prepare(dir string) (string, error) { + return ioutil.TempDir(dir, "migration") +} + +func apply(dir, temp string, maxDatafileSize int) error { + datafilesPath, err := internal.GetDatafiles(dir) + if err != nil { + return err + } + var id, newOffset int + datafile, err := getNewDatafile(temp, id) + if err != nil { + return err + } + id++ + for _, p := range datafilesPath { + df, err := os.Open(p) + if err != nil { + return err + } + var off int64 + for { + entry, err := getSingleEntry(df, off) + if err == io.EOF { + break + } + if err != nil { + return err + } + if newOffset+len(entry) > maxDatafileSize { + err = datafile.Sync() + if err != nil { + return err + } + datafile, err = getNewDatafile(temp, id) + if err != nil { + return err + } + id++ + newOffset = 0 + } + newEntry := make([]byte, len(entry)+ttlSize) + copy(newEntry[:len(entry)], entry) + n, err := datafile.Write(newEntry) + if err != nil { + return err + } + newOffset += n + off += int64(len(entry)) + } + } + return datafile.Sync() +} + +func cleanup(dir, temp string) error { + files, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + for _, file := range files { + if !file.IsDir() { + err := os.RemoveAll(path.Join([]string{dir, file.Name()}...)) + if err != nil { + return err + } + } + } + files, err = ioutil.ReadDir(temp) + if err != nil { + return err + } + for _, file := range files { + err := os.Rename( + path.Join([]string{temp, file.Name()}...), + path.Join([]string{dir, file.Name()}...), + ) + if err != nil { + return err + } + } + return nil +} + +func getNewDatafile(path string, id int) (*os.File, error) { + fn := filepath.Join(path, fmt.Sprintf(defaultDatafileFilename, id)) + return os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) +} + +func getSingleEntry(f *os.File, offset int64) ([]byte, error) { + prefixBuf, err := readPrefix(f, offset) + if err != nil { + return nil, err + } + actualKeySize, actualValueSize := getKeyValueSize(prefixBuf) + entryBuf, err := read(f, uint64(actualKeySize)+actualValueSize+checksumSize, offset+keySize+valueSize) + if err != nil { + return nil, err + } + return append(prefixBuf, entryBuf...), nil +} + +func readPrefix(f *os.File, offset int64) ([]byte, error) { + prefixBuf := make([]byte, keySize+valueSize) + _, err := f.ReadAt(prefixBuf, offset) + if err != nil { + return nil, err + } + return prefixBuf, nil +} + +func read(f *os.File, bufSize uint64, offset int64) ([]byte, error) { + buf := make([]byte, bufSize) + _, err := f.ReadAt(buf, offset) + if err != nil { + return nil, err + } + return buf, nil +} + +func getKeyValueSize(buf []byte) (uint32, uint64) { + actualKeySize := binary.BigEndian.Uint32(buf[:keySize]) + actualValueSize := binary.BigEndian.Uint64(buf[keySize:]) + return actualKeySize, actualValueSize +} diff --git a/v2/scripts/migrations/v0_to_v1_test.go b/v2/scripts/migrations/v0_to_v1_test.go new file mode 100644 index 0000000..0dcb0c7 --- /dev/null +++ b/v2/scripts/migrations/v0_to_v1_test.go @@ -0,0 +1,58 @@ +package migrations + +import ( + "encoding/binary" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ApplyV0ToV1(t *testing.T) { + assert := assert.New(t) + testdir, err := ioutil.TempDir("/tmp", "bitcask") + assert.NoError(err) + defer os.RemoveAll(testdir) + w0, err := os.OpenFile(filepath.Join(testdir, "000000000.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + assert.NoError(err) + w1, err := os.OpenFile(filepath.Join(testdir, "000000001.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + assert.NoError(err) + w2, err := os.OpenFile(filepath.Join(testdir, "000000002.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + assert.NoError(err) + defer w0.Close() + defer w1.Close() + defer w2.Close() + buf := make([]byte, 104) + binary.BigEndian.PutUint32(buf[:4], 5) + binary.BigEndian.PutUint64(buf[4:12], 7) + copy(buf[12:28], "mykeymyvalue0AAA") + binary.BigEndian.PutUint32(buf[28:32], 3) + binary.BigEndian.PutUint64(buf[32:40], 5) + copy(buf[40:52], "keyvalue0BBB") + _, err = w0.Write(buf[:52]) + assert.NoError(err) + _, err = w1.Write(buf[:52]) + assert.NoError(err) + _, err = w2.Write(buf[:52]) + assert.NoError(err) + err = ApplyV0ToV1(testdir, 104) + assert.NoError(err) + r0, err := os.Open(filepath.Join(testdir, "000000000.data")) + assert.NoError(err) + defer r0.Close() + n, err := io.ReadFull(r0, buf) + assert.NoError(err) + assert.Equal(104, n) + assert.Equal("0000000500000000000000076d796b65796d7976616c75653041414100000000000000000000000300000000000000056b657976616c75653042424200000000000000000000000500000000000000076d796b65796d7976616c7565304141410000000000000000", hex.EncodeToString(buf)) + r1, err := os.Open(filepath.Join(testdir, "000000001.data")) + assert.NoError(err) + defer r1.Close() + n, err = io.ReadFull(r1, buf[:100]) + assert.NoError(err) + assert.Equal(100, n) + assert.Equal("0000000300000000000000056b657976616c75653042424200000000000000000000000500000000000000076d796b65796d7976616c75653041414100000000000000000000000300000000000000056b657976616c7565304242420000000000000000", hex.EncodeToString(buf[:100])) +}