1
0
mirror of https://github.com/taigrr/bitcask synced 2025-01-18 04:03:17 -08:00

Fix glfmt/golint issues

This commit is contained in:
James Mills 2019-10-14 16:55:47 +10:00
parent c4e12e0019
commit 65e9317d26
No known key found for this signature in database
GPG Key ID: AC4C014F1440EBD6
6 changed files with 24 additions and 8 deletions

View File

@ -1196,10 +1196,10 @@ func BenchmarkPut(b *testing.B) {
} }
variants := map[string][]Option{ variants := map[string][]Option{
"NoSync": []Option{ "NoSync": {
WithSync(false), WithSync(false),
}, },
"Sync": []Option{ "Sync": {
WithSync(true), WithSync(true),
}, },
} }

View File

@ -13,16 +13,17 @@ import (
) )
const ( const (
DefaultDatafileFilename = "%09d.data" defaultDatafileFilename = "%09d.data"
) )
var ( var (
ErrReadonly = errors.New("error: read only datafile") errReadonly = errors.New("error: read only datafile")
ErrReadError = errors.New("error: read error") errReadError = errors.New("error: read error")
mxMemPool sync.RWMutex mxMemPool sync.RWMutex
) )
// Datafile is an interface that represents a readable and writeable datafile
type Datafile interface { type Datafile interface {
FileID() int FileID() int
Name() string Name() string
@ -57,7 +58,7 @@ func NewDatafile(path string, id int, readonly bool, maxKeySize uint32, maxValue
err error err error
) )
fn := filepath.Join(path, fmt.Sprintf(DefaultDatafileFilename, id)) fn := filepath.Join(path, fmt.Sprintf(defaultDatafileFilename, id))
if !readonly { if !readonly {
w, err = os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) w, err = os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
@ -165,7 +166,7 @@ func (df *datafile) ReadAt(index, size int64) (e internal.Entry, err error) {
return return
} }
if int64(n) != size { if int64(n) != size {
err = ErrReadError err = errReadError
return return
} }
@ -176,7 +177,7 @@ func (df *datafile) ReadAt(index, size int64) (e internal.Entry, err error) {
func (df *datafile) Write(e internal.Entry) (int64, int64, error) { func (df *datafile) Write(e internal.Entry) (int64, int64, error) {
if df.w == nil { if df.w == nil {
return -1, 0, ErrReadonly return -1, 0, errReadonly
} }
df.Lock() df.Lock()

View File

@ -12,6 +12,7 @@ type Entry struct {
Value []byte Value []byte
} }
// NewEntry creates a new `Entry` with the given `key` and `value`
func NewEntry(key, value []byte) Entry { func NewEntry(key, value []byte) Entry {
checksum := crc32.ChecksumIEEE(value) checksum := crc32.ChecksumIEEE(value)

View File

@ -7,11 +7,14 @@ import (
"github.com/prologic/bitcask/internal" "github.com/prologic/bitcask/internal"
) )
// Indexer is an interface for loading and saving the index (an Adaptive Radix Tree)
type Indexer interface { type Indexer interface {
Load(path string, maxkeySize uint32) (art.Tree, bool, error) Load(path string, maxkeySize uint32) (art.Tree, bool, error)
Save(t art.Tree, path string) error Save(t art.Tree, path string) error
} }
// NewIndexer returns an instance of the default `Indexer` implemtnation
// which perists the index (an Adaptive Radix Tree) as a binary blob on file
func NewIndexer() Indexer { func NewIndexer() Indexer {
return &indexer{} return &indexer{}
} }

View File

@ -1,5 +1,8 @@
package internal package internal
// Item represents the location of the value on disk. This is used by the
// internal Adaptive Radix Tree to hold an in-memory structure mapping keys to
// locations on disk of where the value(s) can be read from.
type Item struct { type Item struct {
FileID int `json:"fileid"` FileID int `json:"fileid"`
Offset int64 `json:"offset"` Offset int64 `json:"offset"`

View File

@ -9,11 +9,14 @@ import (
"strings" "strings"
) )
// Exists returns `true` if the given `path` on the current file system exists
func Exists(path string) bool { func Exists(path string) bool {
_, err := os.Stat(path) _, err := os.Stat(path)
return err == nil return err == nil
} }
// DirSize returns the space occupied by the given `path` on disk on the current
// file system.
func DirSize(path string) (int64, error) { func DirSize(path string) (int64, error) {
var size int64 var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
@ -28,6 +31,9 @@ func DirSize(path string) (int64, error) {
return size, err return size, err
} }
// GetDatafiles returns a list of all data files stored in the database path
// given by `path`. All datafiles are identified by the the glob `*.data` and
// the basename is represented by an monotomic increasing integer.
func GetDatafiles(path string) ([]string, error) { func GetDatafiles(path string) ([]string, error) {
fns, err := filepath.Glob(fmt.Sprintf("%s/*.data", path)) fns, err := filepath.Glob(fmt.Sprintf("%s/*.data", path))
if err != nil { if err != nil {
@ -37,6 +43,8 @@ func GetDatafiles(path string) ([]string, error) {
return fns, nil return fns, nil
} }
// ParseIds will parse a list of datafiles as returned by `GetDatafiles` and
// extract the id part and return a slice of ints.
func ParseIds(fns []string) ([]int, error) { func ParseIds(fns []string) ([]int, error) {
var ids []int var ids []int
for _, fn := range fns { for _, fn := range fns {