From a2b5ae2287fabbbce1ba94e5e933663d9bf5058c Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 4 Sep 2019 09:42:32 -0300 Subject: [PATCH] fix: check of persisted index values (#91) --- bitcask.go | 2 +- internal/codec_index.go | 13 ++++--------- internal/codec_index_test.go | 18 ++++++++---------- 3 files changed, 13 insertions(+), 20 deletions(-) diff --git a/bitcask.go b/bitcask.go index 082f3e9..e37ce9c 100644 --- a/bitcask.go +++ b/bitcask.go @@ -337,7 +337,7 @@ func (b *Bitcask) reopen() error { } defer f.Close() - if err := internal.ReadIndex(f, t, b.config.maxKeySize, b.config.maxValueSize); err != nil { + if err := internal.ReadIndex(f, t, b.config.maxKeySize); err != nil { return err } } else { diff --git a/internal/codec_index.go b/internal/codec_index.go index ba0e959..f163005 100644 --- a/internal/codec_index.go +++ b/internal/codec_index.go @@ -13,7 +13,6 @@ var ( errTruncatedKeyData = errors.New("key data is truncated") errTruncatedData = errors.New("data is truncated") errKeySizeTooLarge = errors.New("key size too large") - errDataSizeTooLarge = errors.New("data size too large") ) const ( @@ -60,21 +59,17 @@ func writeBytes(b []byte, w io.Writer) error { return nil } -func readItem(r io.Reader, maxValueSize int) (Item, error) { +func readItem(r io.Reader) (Item, error) { buf := make([]byte, (fileIDSize + offsetSize + sizeSize)) _, err := io.ReadFull(r, buf) if err != nil { return Item{}, errors.Wrap(errTruncatedData, err.Error()) } - size := int64(binary.BigEndian.Uint64(buf[(fileIDSize + offsetSize):])) - if size > int64(maxValueSize) { - return Item{}, errDataSizeTooLarge - } return Item{ FileID: int(binary.BigEndian.Uint32(buf[:fileIDSize])), Offset: int64(binary.BigEndian.Uint64(buf[fileIDSize:(fileIDSize + offsetSize)])), - Size: size, + Size: int64(binary.BigEndian.Uint64(buf[(fileIDSize + offsetSize):])), }, nil } @@ -91,7 +86,7 @@ func writeItem(item Item, w io.Writer) error { } // ReadIndex reads a persisted from a io.Reader into a Tree -func ReadIndex(r io.Reader, t art.Tree, maxKeySize, maxValueSize int) error { +func ReadIndex(r io.Reader, t art.Tree, maxKeySize int) error { for { key, err := readKeyBytes(r, maxKeySize) if err != nil { @@ -101,7 +96,7 @@ func ReadIndex(r io.Reader, t art.Tree, maxKeySize, maxValueSize int) error { return err } - item, err := readItem(r, maxValueSize) + item, err := readItem(r) if err != nil { return err } diff --git a/internal/codec_index_test.go b/internal/codec_index_test.go index b465e3c..f78cc19 100644 --- a/internal/codec_index_test.go +++ b/internal/codec_index_test.go @@ -36,7 +36,7 @@ func TestReadIndex(t *testing.T) { b := bytes.NewBuffer(sampleTreeBytes) at := art.New() - err := ReadIndex(b, at, 1024, 1024) + err := ReadIndex(b, at, 1024) if err != nil { t.Fatalf("error while deserializing correct sample tree: %v", err) } @@ -74,7 +74,7 @@ func TestReadCorruptedData(t *testing.T) { t.Run(table[i].name, func(t *testing.T) { bf := bytes.NewBuffer(table[i].data) - if err := ReadIndex(bf, art.New(), 1024, 1024); errors.Cause(err) != table[i].err { + if err := ReadIndex(bf, art.New(), 1024); errors.Cause(err) != table[i].err { t.Fatalf("expected %v, got %v", table[i].err, err) } }) @@ -91,21 +91,19 @@ func TestReadCorruptedData(t *testing.T) { binary.BigEndian.PutUint32(overflowDataSize[int32Size+4+fileIDSize+offsetSize:], 1025) table := []struct { - name string - err error - maxKeySize int - maxValueSize int - data []byte + name string + err error + maxKeySize int + data []byte }{ - {name: "key-data-overflow", err: errKeySizeTooLarge, maxKeySize: 1024, maxValueSize: 1024, data: overflowKeySize}, - {name: "item-data-overflow", err: errDataSizeTooLarge, maxKeySize: 1024, maxValueSize: 1024, data: overflowDataSize}, + {name: "key-data-overflow", err: errKeySizeTooLarge, maxKeySize: 1024, data: overflowKeySize}, } for i := range table { t.Run(table[i].name, func(t *testing.T) { bf := bytes.NewBuffer(table[i].data) - if err := ReadIndex(bf, art.New(), table[i].maxKeySize, table[i].maxValueSize); errors.Cause(err) != table[i].err { + if err := ReadIndex(bf, art.New(), table[i].maxKeySize); errors.Cause(err) != table[i].err { t.Fatalf("expected %v, got %v", table[i].err, err) } })