mirror of
https://github.com/taigrr/bitcask
synced 2025-01-18 04:03:17 -08:00
Add support for keys with ttl (#177)
* ttl support first commit * imports fix * put api args correction * put options added * upgrade method added * upgrade log added * v0 to v1 migration script added * error assertion added * temp migration dir fix Co-authored-by: yash <yash.chandra@grabpay.com>
This commit is contained in:
committed by
GitHub
parent
f397bec88f
commit
5c6ceadac1
159
scripts/migrations/v0_to_v1.go
Normal file
159
scripts/migrations/v0_to_v1.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prologic/bitcask/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
keySize = 4
|
||||
valueSize = 8
|
||||
checksumSize = 4
|
||||
ttlSize = 8
|
||||
defaultDatafileFilename = "%09d.data"
|
||||
)
|
||||
|
||||
func ApplyV0ToV1(dir string, maxDatafileSize int) error {
|
||||
temp, err := prepare(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
err = apply(dir, temp, maxDatafileSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cleanup(dir, temp)
|
||||
}
|
||||
|
||||
func prepare(dir string) (string, error) {
|
||||
return ioutil.TempDir(dir, "migration")
|
||||
}
|
||||
|
||||
func apply(dir, temp string, maxDatafileSize int) error {
|
||||
datafilesPath, err := internal.GetDatafiles(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id, newOffset int
|
||||
datafile, err := getNewDatafile(temp, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id++
|
||||
for _, p := range datafilesPath {
|
||||
df, err := os.Open(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var off int64
|
||||
for {
|
||||
entry, err := getSingleEntry(df, off)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newOffset+len(entry) > maxDatafileSize {
|
||||
err = datafile.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
datafile, err = getNewDatafile(temp, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id++
|
||||
newOffset = 0
|
||||
}
|
||||
newEntry := make([]byte, len(entry)+ttlSize)
|
||||
copy(newEntry[:len(entry)], entry)
|
||||
n, err := datafile.Write(newEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newOffset += n
|
||||
off += int64(len(entry))
|
||||
}
|
||||
}
|
||||
return datafile.Sync()
|
||||
}
|
||||
|
||||
func cleanup(dir, temp string) error {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
err := os.RemoveAll(path.Join([]string{dir, file.Name()}...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
files, err = ioutil.ReadDir(temp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
err := os.Rename(
|
||||
path.Join([]string{temp, file.Name()}...),
|
||||
path.Join([]string{dir, file.Name()}...),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNewDatafile(path string, id int) (*os.File, error) {
|
||||
fn := filepath.Join(path, fmt.Sprintf(defaultDatafileFilename, id))
|
||||
return os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||
}
|
||||
|
||||
func getSingleEntry(f *os.File, offset int64) ([]byte, error) {
|
||||
prefixBuf, err := readPrefix(f, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
actualKeySize, actualValueSize := getKeyValueSize(prefixBuf)
|
||||
entryBuf, err := read(f, uint64(actualKeySize)+actualValueSize+checksumSize, offset+keySize+valueSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(prefixBuf, entryBuf...), nil
|
||||
}
|
||||
|
||||
func readPrefix(f *os.File, offset int64) ([]byte, error) {
|
||||
prefixBuf := make([]byte, keySize+valueSize)
|
||||
_, err := f.ReadAt(prefixBuf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return prefixBuf, nil
|
||||
}
|
||||
|
||||
func read(f *os.File, bufSize uint64, offset int64) ([]byte, error) {
|
||||
buf := make([]byte, bufSize)
|
||||
_, err := f.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func getKeyValueSize(buf []byte) (uint32, uint64) {
|
||||
actualKeySize := binary.BigEndian.Uint32(buf[:keySize])
|
||||
actualValueSize := binary.BigEndian.Uint64(buf[keySize:])
|
||||
return actualKeySize, actualValueSize
|
||||
}
|
||||
58
scripts/migrations/v0_to_v1_test.go
Normal file
58
scripts/migrations/v0_to_v1_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_ApplyV0ToV1(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
testdir, err := ioutil.TempDir("/tmp", "bitcask")
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(testdir)
|
||||
w0, err := os.OpenFile(filepath.Join(testdir, "000000000.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
|
||||
assert.NoError(err)
|
||||
w1, err := os.OpenFile(filepath.Join(testdir, "000000001.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
|
||||
assert.NoError(err)
|
||||
w2, err := os.OpenFile(filepath.Join(testdir, "000000002.data"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
|
||||
assert.NoError(err)
|
||||
defer w0.Close()
|
||||
defer w1.Close()
|
||||
defer w2.Close()
|
||||
buf := make([]byte, 104)
|
||||
binary.BigEndian.PutUint32(buf[:4], 5)
|
||||
binary.BigEndian.PutUint64(buf[4:12], 7)
|
||||
copy(buf[12:28], "mykeymyvalue0AAA")
|
||||
binary.BigEndian.PutUint32(buf[28:32], 3)
|
||||
binary.BigEndian.PutUint64(buf[32:40], 5)
|
||||
copy(buf[40:52], "keyvalue0BBB")
|
||||
_, err = w0.Write(buf[:52])
|
||||
assert.NoError(err)
|
||||
_, err = w1.Write(buf[:52])
|
||||
assert.NoError(err)
|
||||
_, err = w2.Write(buf[:52])
|
||||
assert.NoError(err)
|
||||
err = ApplyV0ToV1(testdir, 104)
|
||||
assert.NoError(err)
|
||||
r0, err := os.Open(filepath.Join(testdir, "000000000.data"))
|
||||
assert.NoError(err)
|
||||
defer r0.Close()
|
||||
n, err := io.ReadFull(r0, buf)
|
||||
assert.NoError(err)
|
||||
assert.Equal(104, n)
|
||||
assert.Equal("0000000500000000000000076d796b65796d7976616c75653041414100000000000000000000000300000000000000056b657976616c75653042424200000000000000000000000500000000000000076d796b65796d7976616c7565304141410000000000000000", hex.EncodeToString(buf))
|
||||
r1, err := os.Open(filepath.Join(testdir, "000000001.data"))
|
||||
assert.NoError(err)
|
||||
defer r1.Close()
|
||||
n, err = io.ReadFull(r1, buf[:100])
|
||||
assert.NoError(err)
|
||||
assert.Equal(100, n)
|
||||
assert.Equal("0000000300000000000000056b657976616c75653042424200000000000000000000000500000000000000076d796b65796d7976616c75653041414100000000000000000000000300000000000000056b657976616c7565304242420000000000000000", hex.EncodeToString(buf[:100]))
|
||||
}
|
||||
Reference in New Issue
Block a user