mirror of
https://github.com/gogrlx/bitcask.git
synced 2026-04-03 03:29:11 -07:00
Compare commits
116 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0338755f8c | ||
|
|
877bf982b1 | ||
|
|
abbbeb8e1d | ||
|
|
36bc134b22 | ||
|
|
ea96b8afc0 | ||
|
|
b3d6f734b6 | ||
|
|
55459a5c93 | ||
|
|
a20ee3e3d4 | ||
|
|
cd27b84069 | ||
|
|
b28353de02 | ||
|
|
e8bee948bc | ||
|
|
156d29e344 | ||
|
|
c5a565cd82 | ||
|
|
8f56cffd86 | ||
|
|
7204a33512 | ||
|
|
c7d101d34f | ||
|
|
af43cfa8f1 | ||
|
|
110c5024ee | ||
|
|
1f10b4026d | ||
|
|
fd179b4a86 | ||
|
|
755b1879b5 | ||
|
|
d0c913ccee | ||
|
|
6b372d8334 | ||
|
|
3c1808cad3 | ||
|
|
5d1dd6657a | ||
|
|
1ba9ca46e3 | ||
|
|
2a419c46d2 | ||
|
|
e543fc38fb | ||
|
|
82e26449fa | ||
|
|
bce2721be4 | ||
|
|
f2b5515e03 | ||
|
|
8b684b635d | ||
|
|
a407905ae2 | ||
|
|
6ceeccfd64 | ||
|
|
35dc7e70d2 | ||
|
|
6cc1154611 | ||
|
|
8aa66c66da | ||
|
|
e3242c8426 | ||
|
|
912371645d | ||
|
|
bc782a3083 | ||
|
|
a2161179ef | ||
|
|
51bac21c0a | ||
|
|
b7ac95d66a | ||
|
|
c28c72108f | ||
|
|
a74203b99e | ||
|
|
5ee0f8e0df | ||
|
|
479cabcc8e | ||
|
|
3b63388e79 | ||
|
|
fd2023ee38 | ||
|
|
47ad6601f3 | ||
|
|
057c147f89 | ||
|
|
9fafcad9a6 | ||
|
|
c4faac9f7c | ||
|
|
43334647a6 | ||
|
|
f26a1b1727 | ||
|
|
1fca55d268 | ||
|
|
c640f7f7e7 | ||
|
|
b6c9867e7b | ||
|
|
ed6283dca4 | ||
|
|
f44b6249ac | ||
|
|
2be3a63153 | ||
|
|
836deeb0ff | ||
|
|
b29b4c5422 | ||
|
|
f397a73abd | ||
|
|
53dc013215 | ||
|
|
711d08ce91 | ||
|
|
c3b1a02371 | ||
|
|
7149cb9afe | ||
|
|
c593bc966f | ||
|
|
2400dd86d5 | ||
|
|
27eb922ba2 | ||
|
|
34ad78efc0 | ||
|
|
352c32ee12 | ||
|
|
aaea7273c3 | ||
|
|
01cb269a51 | ||
|
|
962e53af17 | ||
|
|
7a427a237a | ||
|
|
8bf169c96f | ||
|
|
c1488fed2a | ||
|
|
d6e806e655 | ||
|
|
2d9bfbb408 | ||
|
|
d8a48f9eea | ||
|
|
65e7877bdf | ||
|
|
5711478dd6 | ||
|
|
336795285e | ||
|
|
7fba9bd4b7 | ||
|
|
e117ffd2e9 | ||
|
|
ebefd0abf4 | ||
|
|
52dfec6760 | ||
|
|
1298240f53 | ||
|
|
2a35976cdd | ||
|
|
6fe6fe0689 | ||
|
|
e83608b903 | ||
|
|
67ab944db7 | ||
|
|
cb00b11dd7 | ||
|
|
e9c858d43f | ||
|
|
120e854444 | ||
|
|
d2f44d1513 | ||
|
|
c0f178c4f7 | ||
|
|
2585222830 | ||
|
|
3f1d6635c4 | ||
|
|
67840ffb57 | ||
|
|
9f0a357ca0 | ||
|
|
52b6c74a21 | ||
|
|
d24a01797a | ||
|
|
bc8f6c6718 | ||
|
|
b6c212d60c | ||
|
|
3f1b90eb23 | ||
|
|
71a42800fe | ||
|
|
3b9627aeb8 | ||
|
|
e0c4c4fdae | ||
|
|
fb50eb2f82 | ||
|
|
fb2335e3c1 | ||
|
|
9a8aca55ba | ||
|
|
32b782b229 | ||
|
|
146f777683 |
10
.drone.yml
10
.drone.yml
@@ -5,18 +5,10 @@ steps:
|
||||
- name: build
|
||||
image: golang:latest
|
||||
commands:
|
||||
- go test -v -short -cover -coverprofile=coverage.txt ./...
|
||||
- go test -v -short -cover -coverprofile=coverage.txt -coverpkg=$(go list) .
|
||||
|
||||
- name: coverage
|
||||
image: plugins/codecov
|
||||
settings:
|
||||
token:
|
||||
from_secret: codecov-token
|
||||
|
||||
- name: notify
|
||||
image: plugins/webhook
|
||||
urls: https://msgbus.mills.io/ci.mills.io
|
||||
when:
|
||||
status:
|
||||
- success
|
||||
- failure
|
||||
|
||||
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github: prologic
|
||||
patreon: prologic
|
||||
@@ -3,14 +3,14 @@ builds:
|
||||
binary: bitcask
|
||||
main: ./cmd/bitcask
|
||||
flags: -tags "static_build"
|
||||
ldflags: -w -X .Version={{.Version}} -X .Commit={{.Commit}}
|
||||
ldflags: -w -X github.com/prologic/bitcask/internal.Version={{.Version}} -X github.com/prologic/bitcask/internal.Commit={{.Commit}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
-
|
||||
binary: bitcaskd
|
||||
main: ./cmd/bitcaskd
|
||||
flags: -tags "static_build"
|
||||
ldflags: -w -X .Version={{.Version}} -X .Commit={{.Commit}}
|
||||
ldflags: -w -X github.com/prologic/bitcask/internal.Version={{.Version}} -X github.com/prologic/bitcask/internal.Commit={{.Commit}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
sign:
|
||||
|
||||
13
AUTHORS
Normal file
13
AUTHORS
Normal file
@@ -0,0 +1,13 @@
|
||||
# Entries should be added alphabetically in the form:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Awn Umar <awn@spacetime.dev>
|
||||
Christian Muehlhaeuser <muesli@gmail.com>
|
||||
Ignacio Hagopian <jsign.uy@gmail.com>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
Jesse Donat <donatj@gmail.com>
|
||||
Kebert Xela kebertxela
|
||||
panyun panyun
|
||||
Whemoon Jang <palindrom615@gmail.com>
|
||||
Yury Fedorov orlangure
|
||||
12
CONTRIBUTING.md
Normal file
12
CONTRIBUTING.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Contributing
|
||||
|
||||
No preference. If you know how to use Github and have contributed to open source projects before then:
|
||||
|
||||
* File an issue
|
||||
* Submit a pull request
|
||||
* File an issue + Submit a pull request
|
||||
* Use this project somewhere :)
|
||||
|
||||
Be sure to add yourself to the [AUTHORS](/AUTHORS) file when you submit your PR(s). Every contribution counts no how big or small!
|
||||
|
||||
Thanks for using Bitcask!
|
||||
14
Dockerfile
Normal file
14
Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
# Build
|
||||
FROM prologic/go-builder:latest AS build
|
||||
|
||||
# Runtime
|
||||
FROM alpine
|
||||
|
||||
COPY --from=build /src/bitcaskd /bitcaskd
|
||||
|
||||
EXPOSE 6379/tcp
|
||||
|
||||
VOLUME /data
|
||||
|
||||
ENTRYPOINT ["/bitcaskd"]
|
||||
CMD ["/data"]
|
||||
16
Makefile
16
Makefile
@@ -1,6 +1,7 @@
|
||||
.PHONY: dev build generate install image release profile bench test clean
|
||||
|
||||
CGO_ENABLED=0
|
||||
VERSION=$(shell git describe --abbrev=0 --tags)
|
||||
COMMIT=$(shell git rev-parse --short HEAD)
|
||||
|
||||
all: dev
|
||||
@@ -12,11 +13,11 @@ dev: build
|
||||
build: clean generate
|
||||
@go build \
|
||||
-tags "netgo static_build" -installsuffix netgo \
|
||||
-ldflags "-w -X $(shell go list)/.Commit=$(COMMIT)" \
|
||||
-ldflags "-w -X $(shell go list)/internal.Version=$(VERSION) -X $(shell go list)/internal.Commit=$(COMMIT)" \
|
||||
./cmd/bitcask/...
|
||||
@go build \
|
||||
-tags "netgo static_build" -installsuffix netgo \
|
||||
-ldflags "-w -X $(shell go list)/.Commit=$(COMMIT)" \
|
||||
-ldflags "-w -X $(shell go list)/internal.Version=$(VERSION) -X $(shell go list)/internal.Commit=$(COMMIT)" \
|
||||
./cmd/bitcaskd/...
|
||||
|
||||
generate:
|
||||
@@ -24,6 +25,7 @@ generate:
|
||||
|
||||
install: build
|
||||
@go install ./cmd/bitcask/...
|
||||
@go install ./cmd/bitcaskd/...
|
||||
|
||||
image:
|
||||
@docker build -t prologic/bitcask .
|
||||
@@ -32,13 +34,17 @@ release:
|
||||
@./tools/release.sh
|
||||
|
||||
profile: build
|
||||
@go test -cpuprofile cpu.prof -memprofile mem.prof -v -bench ./...
|
||||
@go test -cpuprofile cpu.prof -memprofile mem.prof -v -bench .
|
||||
|
||||
bench: build
|
||||
@go test -v -benchmem -bench=. ./...
|
||||
@go test -v -benchmem -bench=. .
|
||||
|
||||
test: build
|
||||
@go test -v -cover -coverprofile=coverage.txt -covermode=atomic -coverpkg=./... -race ./...
|
||||
@go test -v \
|
||||
-cover -coverprofile=coverage.txt -covermode=atomic \
|
||||
-coverpkg=$(shell go list) \
|
||||
-race \
|
||||
.
|
||||
|
||||
clean:
|
||||
@git clean -f -d -X
|
||||
|
||||
146
README.md
146
README.md
@@ -4,21 +4,32 @@
|
||||
[](https://codecov.io/gh/prologic/bitcask)
|
||||
[](https://goreportcard.com/report/prologic/bitcask)
|
||||
[](https://godoc.org/github.com/prologic/bitcask)
|
||||
[](https://github.com/prologic/bitcask)
|
||||
[](https://sourcegraph.com/github.com/prologic/bitcask?badge)
|
||||
|
||||
A Bitcask (LSM+WAL) Key/Value Store written in Go.
|
||||
A high performance Key/Value store written in [Go](https://golang.org) with a predictable read/write performance and high throughput. Uses a [Bitcask](https://en.wikipedia.org/wiki/Bitcask) on-disk layout (LSM+WAL) similar to [Riak](https://riak.com/)
|
||||
|
||||
For a more feature-complete Redis-compatible server, distributed key/value store have a look at [Bitraft](https://github.com/prologic/bitraft) which uses this library as its backend. Use [Bitcask](https://github.com/prologic/bitcask) as a starting point or if you want to embed in your application, use [Bitraft](https://github.com/prologic/bitraft) if you need a complete server/client solution with high availability with a Redis-compatible API.
|
||||
|
||||
## Features
|
||||
|
||||
* Embeddable
|
||||
* Builtin CLI
|
||||
* Embeddable (`import "github.com/prologic/bitcask"`)
|
||||
* Builtin CLI (`bitcask`)
|
||||
* Builtin Redis-compatible server (`bitcaskd`)
|
||||
* Predictable read/write performance
|
||||
* Low latecny
|
||||
* High throughput (See: [Performance](README.md#Performance)
|
||||
* Low latency
|
||||
* High throughput (See: [Performance](README.md#Performance) )
|
||||
|
||||
## Development
|
||||
|
||||
```#!sh
|
||||
$ git clone https://github.com/prologic/bitcask.git
|
||||
$ make
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
```#!bash
|
||||
```#!sh
|
||||
$ go get github.com/prologic/bitcask
|
||||
```
|
||||
|
||||
@@ -26,23 +37,20 @@ $ go get github.com/prologic/bitcask
|
||||
|
||||
Install the package into your project:
|
||||
|
||||
```#!bash
|
||||
```#!sh
|
||||
$ go get github.com/prologic/bitcask
|
||||
```
|
||||
|
||||
```#!go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
import "github.com/prologic/bitcask"
|
||||
|
||||
func main() {
|
||||
db, _ := bitcask.Open("/tmp/db")
|
||||
db.Set("Hello", []byte("World"))
|
||||
db.Close()
|
||||
defer db.Close()
|
||||
db.Put("Hello", []byte("World"))
|
||||
val, _ := db.Get("Hello")
|
||||
}
|
||||
```
|
||||
|
||||
@@ -51,26 +59,122 @@ documentation and other examples.
|
||||
|
||||
## Usage (tool)
|
||||
|
||||
```#!bash
|
||||
```#!sh
|
||||
$ bitcask -p /tmp/db set Hello World
|
||||
$ bitcask -p /tmp/db get Hello
|
||||
World
|
||||
```
|
||||
|
||||
## Usage (server)
|
||||
|
||||
There is also a builtin very simple Redis-compatible server called `bitcaskd`:
|
||||
|
||||
```#!sh
|
||||
$ ./bitcaskd ./tmp
|
||||
INFO[0000] starting bitcaskd v0.0.7@146f777 bind=":6379" path=./tmp
|
||||
```
|
||||
|
||||
Example session:
|
||||
|
||||
```#!sh
|
||||
$ telnet localhost 6379
|
||||
Trying ::1...
|
||||
Connected to localhost.
|
||||
Escape character is '^]'.
|
||||
SET foo bar
|
||||
+OK
|
||||
GET foo
|
||||
$3
|
||||
bar
|
||||
DEL foo
|
||||
:1
|
||||
GET foo
|
||||
$-1
|
||||
PING
|
||||
+PONG
|
||||
QUIT
|
||||
+OK
|
||||
Connection closed by foreign host.
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
You can also use the [Bitcask Docker Image](https://cloud.docker.com/u/prologic/repository/docker/prologic/bitcask):
|
||||
|
||||
```#!sh
|
||||
$ docker pull prologic/bitcask
|
||||
$ docker run -d -p 6379:6379 prologic/bitcask
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
Benchmarks run on a 11" Macbook with a 1.4Ghz Intel Core i7:
|
||||
|
||||
```
|
||||
```#!sh
|
||||
$ make bench
|
||||
...
|
||||
BenchmarkGet-4 300000 5065 ns/op 144 B/op 4 allocs/op
|
||||
BenchmarkPut-4 100000 14640 ns/op 699 B/op 7 allocs/op
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/prologic/bitcask
|
||||
|
||||
BenchmarkGet/128B-4 300000 3913 ns/op 32.71 MB/s 387 B/op 4 allocs/op
|
||||
BenchmarkGet/128BWithPool-4 300000 4143 ns/op 30.89 MB/s 227 B/op 3 allocs/op
|
||||
BenchmarkGet/256B-4 300000 3919 ns/op 65.31 MB/s 643 B/op 4 allocs/op
|
||||
BenchmarkGet/256BWithPool-4 300000 4270 ns/op 59.95 MB/s 355 B/op 3 allocs/op
|
||||
BenchmarkGet/512B-4 300000 4248 ns/op 120.52 MB/s 1187 B/op 4 allocs/op
|
||||
BenchmarkGet/512BWithPool-4 300000 4676 ns/op 109.48 MB/s 611 B/op 3 allocs/op
|
||||
BenchmarkGet/1K-4 200000 5248 ns/op 195.10 MB/s 2275 B/op 4 allocs/op
|
||||
BenchmarkGet/1KWithPool-4 200000 5270 ns/op 194.28 MB/s 1123 B/op 3 allocs/op
|
||||
BenchmarkGet/2K-4 200000 6229 ns/op 328.74 MB/s 4451 B/op 4 allocs/op
|
||||
BenchmarkGet/2KWithPool-4 200000 6282 ns/op 325.99 MB/s 2147 B/op 3 allocs/op
|
||||
BenchmarkGet/4K-4 200000 9027 ns/op 453.74 MB/s 9059 B/op 4 allocs/op
|
||||
BenchmarkGet/4KWithPool-4 200000 8906 ns/op 459.87 MB/s 4195 B/op 3 allocs/op
|
||||
BenchmarkGet/8K-4 100000 12024 ns/op 681.28 MB/s 17763 B/op 4 allocs/op
|
||||
BenchmarkGet/8KWithPool-4 200000 11103 ns/op 737.79 MB/s 8291 B/op 3 allocs/op
|
||||
BenchmarkGet/16K-4 100000 16844 ns/op 972.65 MB/s 34915 B/op 4 allocs/op
|
||||
BenchmarkGet/16KWithPool-4 100000 14575 ns/op 1124.10 MB/s 16483 B/op 3 allocs/op
|
||||
BenchmarkGet/32K-4 50000 27770 ns/op 1179.97 MB/s 73827 B/op 4 allocs/op
|
||||
BenchmarkGet/32KWithPool-4 100000 24495 ns/op 1337.74 MB/s 32867 B/op 3 allocs/op
|
||||
|
||||
BenchmarkPut/128B-4 100000 17492 ns/op 7.32 MB/s 441 B/op 6 allocs/op
|
||||
BenchmarkPut/256B-4 100000 17234 ns/op 14.85 MB/s 571 B/op 6 allocs/op
|
||||
BenchmarkPut/512B-4 100000 22837 ns/op 22.42 MB/s 861 B/op 6 allocs/op
|
||||
BenchmarkPut/1K-4 50000 30333 ns/op 33.76 MB/s 1443 B/op 6 allocs/op
|
||||
BenchmarkPut/2K-4 30000 45304 ns/op 45.21 MB/s 2606 B/op 6 allocs/op
|
||||
BenchmarkPut/4K-4 20000 83953 ns/op 48.79 MB/s 5187 B/op 6 allocs/op
|
||||
BenchmarkPut/8K-4 10000 142142 ns/op 57.63 MB/s 9845 B/op 6 allocs/op
|
||||
BenchmarkPut/16K-4 5000 206722 ns/op 79.26 MB/s 18884 B/op 6 allocs/op
|
||||
BenchmarkPut/32K-4 5000 361108 ns/op 90.74 MB/s 41582 B/op 7 allocs/op
|
||||
|
||||
BenchmarkScan-4 1000000 1679 ns/op 408 B/op 16 allocs/op
|
||||
PASS
|
||||
```
|
||||
|
||||
* ~180,000 reads/sec
|
||||
* ~60,000 writes/sec
|
||||
For 128B values:
|
||||
|
||||
* ~200,000 reads/sec
|
||||
* ~50,000 writes/sec
|
||||
|
||||
The full benchmark above shows linear performance as you increase key/value sizes. Memory pooling starts to become advantageous for larger values.
|
||||
|
||||
## Stargazers over time
|
||||
|
||||
[](https://starcharts.herokuapp.com/prologic/bitcask)
|
||||
|
||||
## Support
|
||||
|
||||
Support the ongoing development of Bitcask!
|
||||
|
||||
**Sponser**
|
||||
|
||||
- Become a [Sponser](https://www.patreon.com/prologic)
|
||||
|
||||
## Contributors
|
||||
|
||||
Thank you to all those that have contributed to this project, battle-tested it, used it in their own projects or products, fixed bugs, improved performance and even fix tiny typos in documentation! Thank you and keep contributing!
|
||||
|
||||
You can find an [AUTHORS](/AUTHORS) file where we keep a list of contributors to the project. If you contriibute a PR please consider adding your name there. There is also Github's own [Contributors](https://github.com/prologic/bitcask/graphs/contributors) statistics.
|
||||
|
||||
## License
|
||||
|
||||
bitcask is licensed under the [MIT License](https://github.com/prologic/bitcask/blob/master/LICENSE)
|
||||
bitcask is licensed under the term of the [MIT License](https://github.com/prologic/bitcask/blob/master/LICENSE)
|
||||
|
||||
621
bitcask.go
621
bitcask.go
@@ -1,342 +1,378 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMaxDatafileSize = 1 << 20 // 1MB
|
||||
art "github.com/plar/go-adaptive-radix-tree"
|
||||
"github.com/prologic/bitcask/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyNotFound = errors.New("error: key not found")
|
||||
ErrCannotAcquireLock = errors.New("error: cannot acquire lock")
|
||||
// ErrKeyNotFound is the error returned when a key is not found
|
||||
ErrKeyNotFound = errors.New("error: key not found")
|
||||
|
||||
// ErrKeyTooLarge is the error returned for a key that exceeds the
|
||||
// maximum allowed key size (configured with WithMaxKeySize).
|
||||
ErrKeyTooLarge = errors.New("error: key too large")
|
||||
|
||||
// ErrValueTooLarge is the error returned for a value that exceeds the
|
||||
// maximum allowed value size (configured with WithMaxValueSize).
|
||||
ErrValueTooLarge = errors.New("error: value too large")
|
||||
|
||||
// ErrChecksumFailed is the error returned if a key/value retrieved does
|
||||
// not match its CRC checksum
|
||||
ErrChecksumFailed = errors.New("error: checksum failed")
|
||||
|
||||
// ErrDatabaseLocked is the error returned if the database is locked
|
||||
// (typically opened by another process)
|
||||
ErrDatabaseLocked = errors.New("error: database locked")
|
||||
)
|
||||
|
||||
// Bitcask is a struct that represents a on-disk LSM and WAL data structure
|
||||
// and in-memory hash of key/value pairs as per the Bitcask paper and seen
|
||||
// in the Riak database.
|
||||
type Bitcask struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
*flock.Flock
|
||||
|
||||
config *config
|
||||
options []Option
|
||||
path string
|
||||
curr *Datafile
|
||||
keydir *Keydir
|
||||
datafiles []*Datafile
|
||||
|
||||
maxDatafileSize int64
|
||||
curr *internal.Datafile
|
||||
datafiles map[int]*internal.Datafile
|
||||
trie art.Tree
|
||||
}
|
||||
|
||||
// Stats is a struct returned by Stats() on an open Bitcask instance
|
||||
type Stats struct {
|
||||
Datafiles int
|
||||
Keys int
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Stats returns statistics about the database including the number of
|
||||
// data files, keys and overall size on disk of the data
|
||||
func (b *Bitcask) Stats() (stats Stats, err error) {
|
||||
var size int64
|
||||
|
||||
size, err = internal.DirSize(b.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stats.Datafiles = len(b.datafiles)
|
||||
b.mu.RLock()
|
||||
stats.Keys = b.trie.Size()
|
||||
b.mu.RUnlock()
|
||||
stats.Size = size
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the database and removes the lock. It is important to call
|
||||
// Close() as this is the only way to cleanup the lock held by the open
|
||||
// database.
|
||||
func (b *Bitcask) Close() error {
|
||||
defer func() {
|
||||
b.Flock.Unlock()
|
||||
os.Remove(b.Flock.Path())
|
||||
}()
|
||||
|
||||
for _, df := range b.datafiles {
|
||||
df.Close()
|
||||
f, err := os.OpenFile(filepath.Join(b.path, "index"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := internal.WriteIndex(b.trie, f); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, df := range b.datafiles {
|
||||
if err := df.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return b.curr.Close()
|
||||
}
|
||||
|
||||
// Sync flushes all buffers to disk ensuring all data is written
|
||||
func (b *Bitcask) Sync() error {
|
||||
return b.curr.Sync()
|
||||
}
|
||||
|
||||
func (b *Bitcask) Get(key string) ([]byte, error) {
|
||||
var df *Datafile
|
||||
// Get retrieves the value of the given key. If the key is not found or an/I/O
|
||||
// error occurs a null byte slice is returned along with the error.
|
||||
func (b *Bitcask) Get(key []byte) ([]byte, error) {
|
||||
var df *internal.Datafile
|
||||
|
||||
item, ok := b.keydir.Get(key)
|
||||
if !ok {
|
||||
b.mu.RLock()
|
||||
value, found := b.trie.Search(key)
|
||||
b.mu.RUnlock()
|
||||
if !found {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
|
||||
if item.FileID == b.curr.id {
|
||||
item := value.(internal.Item)
|
||||
|
||||
if item.FileID == b.curr.FileID() {
|
||||
df = b.curr
|
||||
} else {
|
||||
df = b.datafiles[item.FileID]
|
||||
}
|
||||
|
||||
e, err := df.ReadAt(item.Index)
|
||||
e, err := df.ReadAt(item.Offset, item.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checksum := crc32.ChecksumIEEE(e.Value)
|
||||
if checksum != e.Checksum {
|
||||
return nil, ErrChecksumFailed
|
||||
}
|
||||
|
||||
return e.Value, nil
|
||||
}
|
||||
|
||||
func (b *Bitcask) Put(key string, value []byte) error {
|
||||
index, err := b.put(key, value)
|
||||
// Has returns true if the key exists in the database, false otherwise.
|
||||
func (b *Bitcask) Has(key []byte) bool {
|
||||
b.mu.RLock()
|
||||
_, found := b.trie.Search(key)
|
||||
b.mu.RUnlock()
|
||||
return found
|
||||
}
|
||||
|
||||
// Put stores the key and value in the database.
|
||||
func (b *Bitcask) Put(key, value []byte) error {
|
||||
if len(key) > b.config.maxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
}
|
||||
if len(value) > b.config.maxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
offset, n, err := b.put(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.keydir.Add(key, b.curr.id, index, time.Now().Unix())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bitcask) Delete(key string) error {
|
||||
_, err := b.put(key, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.keydir.Delete(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bitcask) Fold(f func(key string) error) error {
|
||||
for key := range b.keydir.Keys() {
|
||||
if err := f(key); err != nil {
|
||||
if b.config.sync {
|
||||
if err := b.curr.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
item := internal.Item{FileID: b.curr.FileID(), Offset: offset, Size: n}
|
||||
b.mu.Lock()
|
||||
b.trie.Insert(key, item)
|
||||
b.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bitcask) put(key string, value []byte) (int64, error) {
|
||||
size, err := b.curr.Size()
|
||||
// Delete deletes the named key. If the key doesn't exist or an I/O error
|
||||
// occurs the error is returned.
|
||||
func (b *Bitcask) Delete(key []byte) error {
|
||||
_, _, err := b.put(key, []byte{})
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return err
|
||||
}
|
||||
|
||||
if size >= b.maxDatafileSize {
|
||||
err := b.curr.Close()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
b.mu.Lock()
|
||||
b.trie.Delete(key)
|
||||
b.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scan performs a prefix scan of keys matching the given prefix and calling
|
||||
// the function `f` with the keys found. If the function returns an error
|
||||
// no further keys are processed and the first error returned.
|
||||
func (b *Bitcask) Scan(prefix []byte, f func(key []byte) error) (err error) {
|
||||
b.trie.ForEachPrefix(prefix, func(node art.Node) bool {
|
||||
// Skip the root node
|
||||
if len(node.Key()) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
df, err := NewDatafile(b.path, b.curr.id, true)
|
||||
b.datafiles = append(b.datafiles, df)
|
||||
if err = f(node.Key()); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
id := b.curr.id + 1
|
||||
curr, err := NewDatafile(b.path, id, false)
|
||||
// Len returns the total number of keys in the database
|
||||
func (b *Bitcask) Len() int {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.trie.Size()
|
||||
}
|
||||
|
||||
// Keys returns all keys in the database as a channel of keys
|
||||
func (b *Bitcask) Keys() chan []byte {
|
||||
ch := make(chan []byte)
|
||||
go func() {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
for it := b.trie.Iterator(); it.HasNext(); {
|
||||
node, _ := it.Next()
|
||||
|
||||
// Skip the root node
|
||||
if len(node.Key()) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- node.Key()
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Fold iterates over all keys in the database calling the function `f` for
|
||||
// each key. If the function returns an error, no further keys are processed
|
||||
// and the error returned.
|
||||
func (b *Bitcask) Fold(f func(key []byte) error) error {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
b.trie.ForEach(func(node art.Node) bool {
|
||||
if err := f(node.Key()); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bitcask) put(key, value []byte) (int64, int64, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
size := b.curr.Size()
|
||||
if size >= int64(b.config.maxDatafileSize) {
|
||||
err := b.curr.Close()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return -1, 0, err
|
||||
}
|
||||
|
||||
id := b.curr.FileID()
|
||||
|
||||
df, err := internal.NewDatafile(b.path, id, true)
|
||||
if err != nil {
|
||||
return -1, 0, err
|
||||
}
|
||||
|
||||
b.datafiles[id] = df
|
||||
|
||||
id = b.curr.FileID() + 1
|
||||
curr, err := internal.NewDatafile(b.path, id, false)
|
||||
if err != nil {
|
||||
return -1, 0, err
|
||||
}
|
||||
b.curr = curr
|
||||
}
|
||||
|
||||
e := NewEntry(key, value)
|
||||
e := internal.NewEntry(key, value)
|
||||
return b.curr.Write(e)
|
||||
}
|
||||
|
||||
func (b *Bitcask) setMaxDatafileSize(size int64) error {
|
||||
b.maxDatafileSize = size
|
||||
func (b *Bitcask) readConfig() error {
|
||||
if internal.Exists(filepath.Join(b.path, "config.json")) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(b.path, "config.json"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &b.config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithMaxDatafileSize(size int64) func(*Bitcask) error {
|
||||
return func(b *Bitcask) error {
|
||||
return b.setMaxDatafileSize(size)
|
||||
}
|
||||
}
|
||||
|
||||
func getDatafiles(path string) ([]string, error) {
|
||||
fns, err := filepath.Glob(fmt.Sprintf("%s/*.data", path))
|
||||
func (b *Bitcask) writeConfig() error {
|
||||
data, err := json.Marshal(b.config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
sort.Strings(fns)
|
||||
return fns, nil
|
||||
return ioutil.WriteFile(filepath.Join(b.path, "config.json"), data, 0600)
|
||||
}
|
||||
|
||||
func parseIds(fns []string) ([]int, error) {
|
||||
var ids []int
|
||||
for _, fn := range fns {
|
||||
fn = filepath.Base(fn)
|
||||
ext := filepath.Ext(fn)
|
||||
if ext != ".data" {
|
||||
continue
|
||||
}
|
||||
id, err := strconv.ParseInt(strings.TrimSuffix(fn, ext), 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
return ids, nil
|
||||
}
|
||||
func (b *Bitcask) reopen() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
func Merge(path string, force bool) error {
|
||||
fns, err := getDatafiles(path)
|
||||
fns, err := internal.GetDatafiles(b.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ids, err := parseIds(fns)
|
||||
ids, err := internal.ParseIds(fns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not merge if we only have 1 Datafile
|
||||
if len(ids) <= 1 {
|
||||
return nil
|
||||
}
|
||||
datafiles := make(map[int]*internal.Datafile, len(ids))
|
||||
|
||||
// Don't merge the Active Datafile (the last one)
|
||||
fns = fns[:len(fns)-1]
|
||||
ids = ids[:len(ids)-1]
|
||||
|
||||
temp, err := ioutil.TempDir("", "bitcask")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, fn := range fns {
|
||||
// Don't merge Datafiles whose .hint files we've already generated
|
||||
// (they are already merged); unless we set the force flag to true
|
||||
// (forcing a re-merge).
|
||||
if filepath.Ext(fn) == ".hint" && !force {
|
||||
// Already merged
|
||||
continue
|
||||
}
|
||||
|
||||
id := ids[i]
|
||||
|
||||
keydir := NewKeydir()
|
||||
|
||||
df, err := NewDatafile(path, id, true)
|
||||
for _, id := range ids {
|
||||
df, err := internal.NewDatafile(b.path, id, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
datafiles[id] = df
|
||||
}
|
||||
|
||||
for {
|
||||
e, err := df.Read()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
t := art.New()
|
||||
|
||||
// Tombstone value (deleted key)
|
||||
if len(e.Value) == 0 {
|
||||
keydir.Delete(e.Key)
|
||||
continue
|
||||
}
|
||||
|
||||
keydir.Add(e.Key, ids[i], e.Index, e.Timestamp)
|
||||
}
|
||||
|
||||
tempdf, err := NewDatafile(temp, id, false)
|
||||
if internal.Exists(path.Join(b.path, "index")) {
|
||||
f, err := os.Open(path.Join(b.path, "index"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tempdf.Close()
|
||||
defer f.Close()
|
||||
|
||||
for key := range keydir.Keys() {
|
||||
item, _ := keydir.Get(key)
|
||||
e, err := df.ReadAt(item.Index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tempdf.Write(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tempdf.Close()
|
||||
if err != nil {
|
||||
if err := internal.ReadIndex(f, t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = df.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Rename(tempdf.Name(), df.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hint := strings.TrimSuffix(df.Name(), ".data") + ".hint"
|
||||
err = keydir.Save(hint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Open(path string, options ...func(*Bitcask) error) (*Bitcask, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := Merge(path, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fns, err := getDatafiles(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ids, err := parseIds(fns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keydir := NewKeydir()
|
||||
var datafiles []*Datafile
|
||||
|
||||
for i, fn := range fns {
|
||||
df, err := NewDatafile(path, ids[i], true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
datafiles = append(datafiles, df)
|
||||
|
||||
if filepath.Ext(fn) == ".hint" {
|
||||
f, err := os.Open(filepath.Join(path, fn))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
hint, err := NewKeydirFromBytes(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key := range hint.Keys() {
|
||||
item, _ := hint.Get(key)
|
||||
keydir.Add(key, item.FileID, item.Index, item.Timestamp)
|
||||
}
|
||||
} else {
|
||||
} else {
|
||||
for i, df := range datafiles {
|
||||
for {
|
||||
e, err := df.Read()
|
||||
e, n, err := df.Read()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Tombstone value (deleted key)
|
||||
if len(e.Value) == 0 {
|
||||
keydir.Delete(e.Key)
|
||||
t.Delete(e.Key)
|
||||
continue
|
||||
}
|
||||
|
||||
keydir.Add(e.Key, ids[i], e.Index, e.Timestamp)
|
||||
item := internal.Item{FileID: ids[i], Offset: e.Offset, Size: n}
|
||||
t.Insert(e.Key, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -346,24 +382,125 @@ func Open(path string, options ...func(*Bitcask) error) (*Bitcask, error) {
|
||||
id = ids[(len(ids) - 1)]
|
||||
}
|
||||
|
||||
curr, err := NewDatafile(path, id, false)
|
||||
curr, err := internal.NewDatafile(b.path, id, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.trie = t
|
||||
b.curr = curr
|
||||
b.datafiles = datafiles
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge merges all datafiles in the database. Old keys are squashed
|
||||
// and deleted keys removes. Duplicate key/value pairs are also removed.
|
||||
// Call this function periodically to reclaim disk space.
|
||||
func (b *Bitcask) Merge() error {
|
||||
// Temporary merged database path
|
||||
temp, err := ioutil.TempDir(b.path, "merge")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
// Create a merged database
|
||||
mdb, err := Open(temp, b.options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rewrite all key/value pairs into merged database
|
||||
// Doing this automatically strips deleted keys and
|
||||
// old key/value pairs
|
||||
err = b.Fold(func(key []byte) error {
|
||||
value, err := b.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mdb.Put(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mdb.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the database
|
||||
err = b.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove all data files
|
||||
files, err := ioutil.ReadDir(b.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
err := os.RemoveAll(path.Join([]string{b.path, file.Name()}...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rename all merged data files
|
||||
files, err = ioutil.ReadDir(mdb.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
err := os.Rename(
|
||||
path.Join([]string{mdb.path, file.Name()}...),
|
||||
path.Join([]string{b.path, file.Name()}...),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally reopen the database
|
||||
return b.reopen()
|
||||
}
|
||||
|
||||
// Open opens the database at the given path with optional options.
|
||||
// Options can be provided with the `WithXXX` functions that provide
|
||||
// configuration options as functions.
|
||||
func Open(path string, options ...Option) (*Bitcask, error) {
|
||||
var (
|
||||
cfg *config
|
||||
err error
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bitcask := &Bitcask{
|
||||
Flock: flock.New(filepath.Join(path, "lock")),
|
||||
path: path,
|
||||
curr: curr,
|
||||
keydir: keydir,
|
||||
datafiles: datafiles,
|
||||
|
||||
maxDatafileSize: DefaultMaxDatafileSize,
|
||||
cfg, err = getConfig(path)
|
||||
if err != nil {
|
||||
cfg = newDefaultConfig()
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
err = option(bitcask)
|
||||
if err != nil {
|
||||
bitcask := &Bitcask{
|
||||
Flock: flock.New(filepath.Join(path, "lock")),
|
||||
config: cfg,
|
||||
options: options,
|
||||
path: path,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(bitcask.config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -374,7 +511,15 @@ func Open(path string, options ...func(*Bitcask) error) (*Bitcask, error) {
|
||||
}
|
||||
|
||||
if !locked {
|
||||
return nil, ErrCannotAcquireLock
|
||||
return nil, ErrDatabaseLocked
|
||||
}
|
||||
|
||||
if err := bitcask.writeConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := bitcask.reopen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bitcask, nil
|
||||
|
||||
529
bitcask_test.go
529
bitcask_test.go
@@ -1,14 +1,45 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type sortByteArrays [][]byte
|
||||
|
||||
func (b sortByteArrays) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b sortByteArrays) Less(i, j int) bool {
|
||||
switch bytes.Compare(b[i], b[j]) {
|
||||
case -1:
|
||||
return true
|
||||
case 0, 1:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b sortByteArrays) Swap(i, j int) {
|
||||
b[j], b[i] = b[i], b[j]
|
||||
}
|
||||
|
||||
func SortByteArrays(src [][]byte) [][]byte {
|
||||
sorted := sortByteArrays(src)
|
||||
sort.Sort(sorted)
|
||||
return sorted
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
@@ -27,22 +58,58 @@ func TestAll(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
err = db.Put("foo", []byte("bar"))
|
||||
err = db.Put([]byte([]byte("foo")), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
val, err := db.Get("foo")
|
||||
val, err := db.Get([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte("bar"), val)
|
||||
})
|
||||
|
||||
t.Run("Delete", func(t *testing.T) {
|
||||
err := db.Delete("foo")
|
||||
t.Run("Len", func(t *testing.T) {
|
||||
assert.Equal(1, db.Len())
|
||||
})
|
||||
|
||||
t.Run("Has", func(t *testing.T) {
|
||||
assert.True(db.Has([]byte("foo")))
|
||||
})
|
||||
|
||||
t.Run("Keys", func(t *testing.T) {
|
||||
keys := make([][]byte, 0)
|
||||
for key := range db.Keys() {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
assert.Equal([][]byte{[]byte("foo")}, keys)
|
||||
})
|
||||
|
||||
t.Run("Fold", func(t *testing.T) {
|
||||
var (
|
||||
keys [][]byte
|
||||
values [][]byte
|
||||
)
|
||||
|
||||
err := db.Fold(func(key []byte) error {
|
||||
value, err := db.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, key)
|
||||
values = append(values, value)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(err)
|
||||
_, err = db.Get("foo")
|
||||
assert.Equal([][]byte{[]byte("foo")}, keys)
|
||||
assert.Equal([][]byte{[]byte("bar")}, values)
|
||||
})
|
||||
|
||||
t.Run("Delete", func(t *testing.T) {
|
||||
err := db.Delete([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
_, err = db.Get([]byte("foo"))
|
||||
assert.Error(err)
|
||||
assert.Equal(err.Error(), "error: key not found")
|
||||
assert.Equal(ErrKeyNotFound, err)
|
||||
})
|
||||
|
||||
t.Run("Sync", func(t *testing.T) {
|
||||
@@ -74,22 +141,22 @@ func TestDeletedKeys(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
err = db.Put("foo", []byte("bar"))
|
||||
err = db.Put([]byte("foo"), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
val, err := db.Get("foo")
|
||||
val, err := db.Get([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte("bar"), val)
|
||||
})
|
||||
|
||||
t.Run("Delete", func(t *testing.T) {
|
||||
err := db.Delete("foo")
|
||||
err := db.Delete([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
_, err = db.Get("foo")
|
||||
_, err = db.Get([]byte("foo"))
|
||||
assert.Error(err)
|
||||
assert.Equal("error: key not found", err.Error())
|
||||
assert.Equal(ErrKeyNotFound, err)
|
||||
})
|
||||
|
||||
t.Run("Sync", func(t *testing.T) {
|
||||
@@ -115,9 +182,9 @@ func TestDeletedKeys(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
_, err = db.Get("foo")
|
||||
_, err = db.Get([]byte("foo"))
|
||||
assert.Error(err)
|
||||
assert.Equal("error: key not found", err.Error())
|
||||
assert.Equal(ErrKeyNotFound, err)
|
||||
})
|
||||
|
||||
t.Run("Close", func(t *testing.T) {
|
||||
@@ -127,38 +194,83 @@ func TestDeletedKeys(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
func TestMaxKeySize(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
var db *Bitcask
|
||||
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir, WithMaxKeySize(16))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
key := []byte(strings.Repeat(" ", 17))
|
||||
value := []byte("foobar")
|
||||
err = db.Put(key, value)
|
||||
assert.Error(err)
|
||||
assert.Equal(ErrKeyTooLarge, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMaxValueSize(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
var db *Bitcask
|
||||
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir, WithMaxValueSize(16))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
key := []byte("foo")
|
||||
value := []byte(strings.Repeat(" ", 17))
|
||||
err = db.Put(key, value)
|
||||
assert.Error(err)
|
||||
assert.Equal(ErrValueTooLarge, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStats(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
err error
|
||||
)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
t.Run("Setup", func(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
err error
|
||||
)
|
||||
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir, MaxDatafileSize(1024))
|
||||
db, err = Open(testdir)
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
for i := 0; i < 1024; i++ {
|
||||
err = db.Put(string(i), []byte(strings.Repeat(" ", 1024)))
|
||||
assert.NoError(err)
|
||||
}
|
||||
err := db.Put([]byte("foo"), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
for i := 0; i < 32; i++ {
|
||||
err = db.Put(string(i), []byte(strings.Repeat(" ", 1024)))
|
||||
assert.NoError(err)
|
||||
val, err := db.Get(string(i))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte(strings.Repeat(" ", 1024)), val)
|
||||
}
|
||||
val, err := db.Get([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte("bar"), val)
|
||||
})
|
||||
|
||||
t.Run("Stats", func(t *testing.T) {
|
||||
stats, err := db.Stats()
|
||||
assert.NoError(err)
|
||||
assert.Equal(stats.Datafiles, 0)
|
||||
assert.Equal(stats.Keys, 1)
|
||||
})
|
||||
|
||||
t.Run("Sync", func(t *testing.T) {
|
||||
@@ -171,26 +283,65 @@ func TestMerge(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Merge", func(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
err error
|
||||
)
|
||||
func TestMerge(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
err error
|
||||
)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
t.Run("Setup", func(t *testing.T) {
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir)
|
||||
db, err = Open(testdir, WithMaxDatafileSize(32))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
for i := 0; i < 32; i++ {
|
||||
val, err := db.Get(string(i))
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
err := db.Put([]byte("foo"), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
s1, err := db.Stats()
|
||||
assert.NoError(err)
|
||||
assert.Equal(0, s1.Datafiles)
|
||||
assert.Equal(1, s1.Keys)
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
err := db.Put([]byte("foo"), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte(strings.Repeat(" ", 1024)), val)
|
||||
}
|
||||
})
|
||||
|
||||
s2, err := db.Stats()
|
||||
assert.NoError(err)
|
||||
assert.Equal(5, s2.Datafiles)
|
||||
assert.Equal(1, s2.Keys)
|
||||
assert.True(s2.Size > s1.Size)
|
||||
|
||||
t.Run("Merge", func(t *testing.T) {
|
||||
err := db.Merge()
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
s3, err := db.Stats()
|
||||
assert.NoError(err)
|
||||
assert.Equal(1, s3.Datafiles)
|
||||
assert.Equal(1, s3.Keys)
|
||||
assert.True(s3.Size > s1.Size)
|
||||
assert.True(s3.Size < s2.Size)
|
||||
|
||||
t.Run("Sync", func(t *testing.T) {
|
||||
err = db.Sync()
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Close", func(t *testing.T) {
|
||||
err = db.Close()
|
||||
assert.NoError(err)
|
||||
@@ -198,6 +349,136 @@ func TestMerge(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
var (
|
||||
db *Bitcask
|
||||
err error
|
||||
)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
t.Run("Setup", func(t *testing.T) {
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir)
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
err = db.Put([]byte("foo"), []byte("bar"))
|
||||
assert.NoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Concurrent", func(t *testing.T) {
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
f := func(wg *sync.WaitGroup, x int) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
for i := 0; i <= 100; i++ {
|
||||
if i%x == 0 {
|
||||
key := []byte(fmt.Sprintf("k%d", i))
|
||||
value := []byte(fmt.Sprintf("v%d", i))
|
||||
err := db.Put(key, value)
|
||||
assert.NoError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
go f(wg, 2)
|
||||
go f(wg, 3)
|
||||
go f(wg, 5)
|
||||
wg.Add(3)
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
f := func(wg *sync.WaitGroup, N int) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
for i := 0; i <= N; i++ {
|
||||
value, err := db.Get([]byte("foo"))
|
||||
assert.NoError(err)
|
||||
assert.Equal([]byte("bar"), value)
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
go f(wg, 100)
|
||||
go f(wg, 100)
|
||||
go f(wg, 100)
|
||||
wg.Add(3)
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("Close", func(t *testing.T) {
|
||||
err = db.Close()
|
||||
assert.NoError(err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestScan(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
assert.NoError(err)
|
||||
|
||||
var db *Bitcask
|
||||
|
||||
t.Run("Setup", func(t *testing.T) {
|
||||
t.Run("Open", func(t *testing.T) {
|
||||
db, err = Open(testdir)
|
||||
assert.NoError(err)
|
||||
})
|
||||
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
var items = map[string][]byte{
|
||||
"1": []byte("1"),
|
||||
"2": []byte("2"),
|
||||
"3": []byte("3"),
|
||||
"food": []byte("pizza"),
|
||||
"foo": []byte([]byte("foo")),
|
||||
"fooz": []byte("fooz ball"),
|
||||
"hello": []byte("world"),
|
||||
}
|
||||
for k, v := range items {
|
||||
err = db.Put([]byte(k), v)
|
||||
assert.NoError(err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Scan", func(t *testing.T) {
|
||||
var (
|
||||
vals [][]byte
|
||||
expected = [][]byte{
|
||||
[]byte("foo"),
|
||||
[]byte("fooz ball"),
|
||||
[]byte("pizza"),
|
||||
}
|
||||
)
|
||||
|
||||
err = db.Scan([]byte("fo"), func(key []byte) error {
|
||||
val, err := db.Get(key)
|
||||
assert.NoError(err)
|
||||
vals = append(vals, val)
|
||||
return nil
|
||||
})
|
||||
vals = SortByteArrays(vals)
|
||||
assert.Equal(expected, vals)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocking(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
@@ -210,55 +491,181 @@ func TestLocking(t *testing.T) {
|
||||
|
||||
_, err = Open(testdir)
|
||||
assert.Error(err)
|
||||
assert.Equal("error: cannot acquire lock", err.Error())
|
||||
assert.Equal(ErrDatabaseLocked, err)
|
||||
}
|
||||
|
||||
type benchmarkTestCase struct {
|
||||
name string
|
||||
size int
|
||||
}
|
||||
|
||||
func BenchmarkGet(b *testing.B) {
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
db, err := Open(testdir)
|
||||
testdir, err := ioutil.TempDir(currentDir, "bitcask_bench")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
defer os.RemoveAll(testdir)
|
||||
|
||||
err = db.Put("foo", []byte("bar"))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
tests := []benchmarkTestCase{
|
||||
{"128B", 128},
|
||||
{"256B", 256},
|
||||
{"512B", 512},
|
||||
{"1K", 1024},
|
||||
{"2K", 2048},
|
||||
{"4K", 4096},
|
||||
{"8K", 8192},
|
||||
{"16K", 16384},
|
||||
{"32K", 32768},
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
val, err := db.Get("foo")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if string(val) != "bar" {
|
||||
b.Errorf("expected val=bar got=%s", val)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.SetBytes(int64(tt.size))
|
||||
|
||||
key := []byte("foo")
|
||||
value := []byte(strings.Repeat(" ", tt.size))
|
||||
|
||||
options := []Option{
|
||||
WithMaxKeySize(len(key)),
|
||||
WithMaxValueSize(tt.size),
|
||||
}
|
||||
db, err := Open(testdir, options...)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
err = db.Put(key, value)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
val, err := db.Get(key)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(val, value) {
|
||||
b.Errorf("unexpected value")
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
db.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPut(b *testing.B) {
|
||||
testdir, err := ioutil.TempDir("", "bitcask")
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []benchmarkTestCase{
|
||||
{"128B", 128},
|
||||
{"256B", 256},
|
||||
{"1K", 1024},
|
||||
{"2K", 2048},
|
||||
{"4K", 4096},
|
||||
{"8K", 8192},
|
||||
{"16K", 16384},
|
||||
{"32K", 32768},
|
||||
}
|
||||
|
||||
variants := map[string][]Option{
|
||||
"NoSync": []Option{
|
||||
WithSync(false),
|
||||
},
|
||||
"Sync": []Option{
|
||||
WithSync(true),
|
||||
},
|
||||
}
|
||||
|
||||
for name, options := range variants {
|
||||
testdir, err := ioutil.TempDir(currentDir, "bitcask_bench")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testdir)
|
||||
|
||||
db, err := Open(testdir, options...)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name+name, func(b *testing.B) {
|
||||
b.SetBytes(int64(tt.size))
|
||||
|
||||
key := []byte("foo")
|
||||
value := []byte(strings.Repeat(" ", tt.size))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := db.Put(key, value)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkScan(b *testing.B) {
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
testdir, err := ioutil.TempDir(currentDir, "bitcask_bench")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testdir)
|
||||
|
||||
db, err := Open(testdir)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := db.Put(fmt.Sprintf("key%d", i), []byte("bar"))
|
||||
var items = map[string][]byte{
|
||||
"1": []byte("1"),
|
||||
"2": []byte("2"),
|
||||
"3": []byte("3"),
|
||||
"food": []byte("pizza"),
|
||||
"foo": []byte([]byte("foo")),
|
||||
"fooz": []byte("fooz ball"),
|
||||
"hello": []byte("world"),
|
||||
}
|
||||
for k, v := range items {
|
||||
err := db.Put([]byte(k), v)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var expected = [][]byte{[]byte("foo"), []byte("food"), []byte("fooz")}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var keys [][]byte
|
||||
err = db.Scan([]byte("fo"), func(key []byte) error {
|
||||
keys = append(keys, key)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
keys = SortByteArrays(keys)
|
||||
if !reflect.DeepEqual(expected, keys) {
|
||||
b.Fatal(fmt.Errorf("expected keys=#%v got=%#v", expected, keys))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,8 +35,9 @@ func del(path, key string) int {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
err = db.Delete(key)
|
||||
err = db.Delete([]byte(key))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error deleting key")
|
||||
return 1
|
||||
|
||||
142
cmd/bitcask/export.go
Normal file
142
cmd/bitcask/export.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var errNotAllDataWritten = errors.New("error: not all data written")
|
||||
|
||||
var exportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Aliases: []string{"backup", "dump"},
|
||||
Short: "Export a database",
|
||||
Long: `This command allows you to export or dump/backup a database's
|
||||
key/values into a long-term portable archival format suitable for backup and
|
||||
restore purposes or migrating from older on-disk formats of Bitcask.
|
||||
|
||||
All key/value pairs are base64 encoded and serialized as JSON one pair per
|
||||
line to form an output stream to either standard output or a file. You can
|
||||
optionally compress the output with standard compression tools such as gzip.`,
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var output string
|
||||
|
||||
path := viper.GetString("path")
|
||||
|
||||
if len(args) == 1 {
|
||||
output = args[0]
|
||||
} else {
|
||||
output = "-"
|
||||
}
|
||||
|
||||
os.Exit(export(path, output))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(exportCmd)
|
||||
|
||||
exportCmd.PersistentFlags().IntP(
|
||||
"with-max-datafile-size", "", bitcask.DefaultMaxDatafileSize,
|
||||
"Maximum size of each datafile",
|
||||
)
|
||||
exportCmd.PersistentFlags().IntP(
|
||||
"with-max-key-size", "", bitcask.DefaultMaxKeySize,
|
||||
"Maximum size of each key",
|
||||
)
|
||||
exportCmd.PersistentFlags().IntP(
|
||||
"with-max-value-size", "", bitcask.DefaultMaxValueSize,
|
||||
"Maximum size of each value",
|
||||
)
|
||||
}
|
||||
|
||||
type kvPair struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
func export(path, output string) int {
|
||||
var (
|
||||
err error
|
||||
w io.WriteCloser
|
||||
)
|
||||
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if output == "-" {
|
||||
w = os.Stdout
|
||||
} else {
|
||||
w, err = os.OpenFile(output, os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("output", output).
|
||||
Error("error opening output for writing")
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Fold(func(key []byte) error {
|
||||
value, err := db.Get(key)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("key", key).
|
||||
Error("error reading key")
|
||||
return err
|
||||
}
|
||||
|
||||
kv := kvPair{
|
||||
Key: base64.StdEncoding.EncodeToString([]byte(key)),
|
||||
Value: base64.StdEncoding.EncodeToString(value),
|
||||
}
|
||||
|
||||
data, err := json.Marshal(&kv)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("key", key).
|
||||
Error("error serialzing key")
|
||||
return err
|
||||
}
|
||||
|
||||
if n, err := w.Write(data); err != nil || n != len(data) {
|
||||
if err == nil && n != len(data) {
|
||||
err = errNotAllDataWritten
|
||||
}
|
||||
log.WithError(err).
|
||||
WithField("key", key).
|
||||
WithField("n", n).
|
||||
Error("error writing key")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte("\n")); err != nil {
|
||||
log.WithError(err).Error("error writing newline")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("path", path).
|
||||
WithField("output", output).
|
||||
Error("error exporting keys")
|
||||
return 2
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -36,8 +36,9 @@ func get(path, key string) int {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
value, err := db.Get(key)
|
||||
value, err := db.Get([]byte(key))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error reading key")
|
||||
return 1
|
||||
|
||||
106
cmd/bitcask/import.go
Normal file
106
cmd/bitcask/import.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var importCmd = &cobra.Command{
|
||||
Use: "import",
|
||||
Aliases: []string{"restore", "read"},
|
||||
Short: "Import a database",
|
||||
Long: `This command allows you to import or restore a database from a
|
||||
previous export/dump using the export command either creating a new database
|
||||
or adding additional key/value pairs to an existing one.`,
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var input string
|
||||
|
||||
path := viper.GetString("path")
|
||||
|
||||
if len(args) == 1 {
|
||||
input = args[0]
|
||||
} else {
|
||||
input = "-"
|
||||
}
|
||||
|
||||
os.Exit(_import(path, input))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(importCmd)
|
||||
}
|
||||
|
||||
func _import(path, input string) int {
|
||||
var (
|
||||
err error
|
||||
r io.ReadCloser
|
||||
)
|
||||
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if input == "-" {
|
||||
r = os.Stdin
|
||||
} else {
|
||||
r, err = os.Open(input)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("input", input).
|
||||
Error("error opening input for reading")
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
var kv kvPair
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
if err := json.Unmarshal(scanner.Bytes(), &kv); err != nil {
|
||||
log.WithError(err).
|
||||
WithField("input", input).
|
||||
Error("error reading input")
|
||||
return 2
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(kv.Key)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error decoding key")
|
||||
return 2
|
||||
}
|
||||
|
||||
value, err := base64.StdEncoding.DecodeString(kv.Value)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error decoding value")
|
||||
return 2
|
||||
}
|
||||
|
||||
if err := db.Put(key, value); err != nil {
|
||||
log.WithError(err).Error("error writing key/value")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.WithError(err).
|
||||
WithField("input", input).
|
||||
Error("error reading input")
|
||||
return 2
|
||||
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
67
cmd/bitcask/initdb.go
Normal file
67
cmd/bitcask/initdb.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var initdbCmd = &cobra.Command{
|
||||
Use: "initdb",
|
||||
Aliases: []string{"create", "init"},
|
||||
Short: "Initialize a new database",
|
||||
Long: `This initializes a new database with persisted options`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("with-max-datafile-size", cmd.Flags().Lookup("with-max-datafile-size"))
|
||||
viper.SetDefault("with-max-datafile-size", bitcask.DefaultMaxDatafileSize)
|
||||
|
||||
viper.BindPFlag("with-max-key-size", cmd.Flags().Lookup("with-max-key-size"))
|
||||
viper.SetDefault("with-max-key-size", bitcask.DefaultMaxKeySize)
|
||||
|
||||
viper.BindPFlag("with-max-value-size", cmd.Flags().Lookup("with-max-value-size"))
|
||||
viper.SetDefault("with-max-value-size", bitcask.DefaultMaxValueSize)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
path := viper.GetString("path")
|
||||
|
||||
maxDatafileSize := viper.GetInt("with-max-datafile-size")
|
||||
maxKeySize := viper.GetInt("with-max-key-size")
|
||||
maxValueSize := viper.GetInt("with-max-value-size")
|
||||
|
||||
db, err := bitcask.Open(
|
||||
path,
|
||||
bitcask.WithMaxDatafileSize(maxDatafileSize),
|
||||
bitcask.WithMaxKeySize(maxKeySize),
|
||||
bitcask.WithMaxValueSize(maxValueSize),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(initdbCmd)
|
||||
|
||||
initdbCmd.PersistentFlags().IntP(
|
||||
"with-max-datafile-size", "", bitcask.DefaultMaxDatafileSize,
|
||||
"Maximum size of each datafile",
|
||||
)
|
||||
initdbCmd.PersistentFlags().IntP(
|
||||
"with-max-key-size", "", bitcask.DefaultMaxKeySize,
|
||||
"Maximum size of each key",
|
||||
)
|
||||
initdbCmd.PersistentFlags().IntP(
|
||||
"with-max-value-size", "", bitcask.DefaultMaxValueSize,
|
||||
"Maximum size of each value",
|
||||
)
|
||||
}
|
||||
@@ -34,9 +34,10 @@ func keys(path string) int {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
err = db.Fold(func(key string) error {
|
||||
fmt.Printf("%s\n", key)
|
||||
err = db.Fold(func(key []byte) error {
|
||||
fmt.Printf("%s\n", string(key))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -20,28 +20,23 @@ keys.`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
path := viper.GetString("path")
|
||||
force, err := cmd.Flags().GetBool("force")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error parsing force flag")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
os.Exit(merge(path, force))
|
||||
os.Exit(merge(path))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(mergeCmd)
|
||||
|
||||
mergeCmd.Flags().BoolP(
|
||||
"force", "f", false,
|
||||
"Force a re-merge even if .hint files exist",
|
||||
)
|
||||
}
|
||||
|
||||
func merge(path string, force bool) int {
|
||||
err := bitcask.Merge(path, force)
|
||||
func merge(path string) int {
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
|
||||
if err = db.Merge(); err != nil {
|
||||
log.WithError(err).Error("error merging database")
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@ import (
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var setCmd = &cobra.Command{
|
||||
Use: "set <key> [<value>]",
|
||||
Aliases: []string{"add"},
|
||||
Short: "Add/Set a new Key/Value pair",
|
||||
Long: `This adds or sets a new key/value pair.
|
||||
var putCmd = &cobra.Command{
|
||||
Use: "put <key> [<value>]",
|
||||
Aliases: []string{"add", "set", "store"},
|
||||
Short: "Adds a new Key/Value pair",
|
||||
Long: `This adds a new key/value pair or modifies an existing one.
|
||||
|
||||
If the value is not specified as an argument it is read from standard input.`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
@@ -33,20 +33,21 @@ If the value is not specified as an argument it is read from standard input.`,
|
||||
value = os.Stdin
|
||||
}
|
||||
|
||||
os.Exit(set(path, key, value))
|
||||
os.Exit(put(path, key, value))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(setCmd)
|
||||
RootCmd.AddCommand(putCmd)
|
||||
}
|
||||
|
||||
func set(path, key string, value io.Reader) int {
|
||||
func put(path, key string, value io.Reader) int {
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(value)
|
||||
if err != nil {
|
||||
@@ -54,7 +55,7 @@ func set(path, key string, value io.Reader) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
err = db.Put(key, data)
|
||||
err = db.Put([]byte(key), data)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error writing key")
|
||||
return 1
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
"github.com/prologic/bitcask/internal"
|
||||
)
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "bitcask",
|
||||
Version: bitcask.FullVersion(),
|
||||
Version: internal.FullVersion(),
|
||||
Short: "Command-line tools for bitcask",
|
||||
Long: `This is the command-line tool to interact with a bitcask database.
|
||||
|
||||
|
||||
60
cmd/bitcask/scan.go
Normal file
60
cmd/bitcask/scan.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var scanCmd = &cobra.Command{
|
||||
Use: "scan <prefix>",
|
||||
Aliases: []string{"search", "find"},
|
||||
Short: "Perform a prefix scan for keys",
|
||||
Long: `This performa a prefix scan for keys starting with the given
|
||||
prefix. This uses a Trie to search for matching keys and returns all matched
|
||||
keys.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
path := viper.GetString("path")
|
||||
|
||||
prefix := args[0]
|
||||
|
||||
os.Exit(scan(path, prefix))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(scanCmd)
|
||||
}
|
||||
|
||||
func scan(path, prefix string) int {
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
err = db.Scan([]byte(prefix), func(key []byte) error {
|
||||
value, err := db.Get(key)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error reading key")
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", string(value))
|
||||
log.WithField("key", key).WithField("value", value).Debug("key/value")
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error scanning keys")
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
55
cmd/bitcask/stats.go
Normal file
55
cmd/bitcask/stats.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
)
|
||||
|
||||
var statsCmd = &cobra.Command{
|
||||
Use: "stats",
|
||||
Aliases: []string{},
|
||||
Short: "Display statis about the Database",
|
||||
Long: `This displays statistics about the Database"`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
path := viper.GetString("path")
|
||||
|
||||
os.Exit(stats(path))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(statsCmd)
|
||||
}
|
||||
|
||||
func stats(path string) int {
|
||||
db, err := bitcask.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error opening database")
|
||||
return 1
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
stats, err := db.Stats()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error getting stats")
|
||||
return 1
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(stats, "", " ")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error marshalling stats")
|
||||
return 1
|
||||
}
|
||||
|
||||
fmt.Println(string(data))
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -10,13 +10,14 @@ import (
|
||||
"github.com/tidwall/redcon"
|
||||
|
||||
"github.com/prologic/bitcask"
|
||||
"github.com/prologic/bitcask/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
bind string
|
||||
debug bool
|
||||
version bool
|
||||
maxDatafileSize int64
|
||||
maxDatafileSize int
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -30,7 +31,7 @@ func init() {
|
||||
|
||||
flag.StringVarP(&bind, "bind", "b", ":6379", "interface and port to bind to")
|
||||
|
||||
flag.Int64Var(&maxDatafileSize, "max-datafile-size", 1<<20, "maximum datafile size in bytes")
|
||||
flag.IntVar(&maxDatafileSize, "max-datafile-size", 1<<20, "maximum datafile size in bytes")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -43,7 +44,7 @@ func main() {
|
||||
}
|
||||
|
||||
if version {
|
||||
fmt.Printf("bitcaskd version %s", bitcask.FullVersion())
|
||||
fmt.Printf("bitcaskd version %s", internal.FullVersion())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@@ -60,7 +61,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.WithField("bind", bind).WithField("path", path).Infof("starting bitcaskd v%s", bitcask.FullVersion())
|
||||
log.WithField("bind", bind).WithField("path", path).Infof("starting bitcaskd v%s", internal.FullVersion())
|
||||
|
||||
err = redcon.ListenAndServe(bind,
|
||||
func(conn redcon.Conn, cmd redcon.Command) {
|
||||
@@ -75,8 +76,8 @@ func main() {
|
||||
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
|
||||
return
|
||||
}
|
||||
key := string(cmd.Args[0])
|
||||
value := cmd.Args[1]
|
||||
key := cmd.Args[1]
|
||||
value := cmd.Args[2]
|
||||
err = db.Put(key, value)
|
||||
if err != nil {
|
||||
conn.WriteString(fmt.Sprintf("ERR: %s", err))
|
||||
@@ -88,19 +89,35 @@ func main() {
|
||||
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
|
||||
return
|
||||
}
|
||||
key := string(cmd.Args[0])
|
||||
key := cmd.Args[1]
|
||||
value, err := db.Get(key)
|
||||
if err != nil {
|
||||
conn.WriteNull()
|
||||
} else {
|
||||
conn.WriteBulk(value)
|
||||
}
|
||||
case "keys":
|
||||
conn.WriteArray(db.Len())
|
||||
for key := range db.Keys() {
|
||||
conn.WriteBulk([]byte(key))
|
||||
}
|
||||
case "exists":
|
||||
if len(cmd.Args) != 2 {
|
||||
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
|
||||
return
|
||||
}
|
||||
key := cmd.Args[1]
|
||||
if db.Has(key) {
|
||||
conn.WriteInt(1)
|
||||
} else {
|
||||
conn.WriteInt(0)
|
||||
}
|
||||
case "del":
|
||||
if len(cmd.Args) != 2 {
|
||||
conn.WriteError("ERR wrong number of arguments for '" + string(cmd.Args[0]) + "' command")
|
||||
return
|
||||
}
|
||||
key := string(cmd.Args[0])
|
||||
key := cmd.Args[1]
|
||||
err := db.Delete(key)
|
||||
if err != nil {
|
||||
conn.WriteInt(0)
|
||||
|
||||
139
datafile.go
139
datafile.go
@@ -1,139 +0,0 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
pb "github.com/prologic/bitcask/proto"
|
||||
"github.com/prologic/bitcask/streampb"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDatafileFilename = "%09d.data"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrReadonly = errors.New("error: read only datafile")
|
||||
)
|
||||
|
||||
type Datafile struct {
|
||||
id int
|
||||
r *os.File
|
||||
w *os.File
|
||||
dec *streampb.Decoder
|
||||
enc *streampb.Encoder
|
||||
}
|
||||
|
||||
func NewDatafile(path string, id int, readonly bool) (*Datafile, error) {
|
||||
var (
|
||||
r *os.File
|
||||
w *os.File
|
||||
err error
|
||||
)
|
||||
|
||||
fn := filepath.Join(path, fmt.Sprintf(DefaultDatafileFilename, id))
|
||||
|
||||
if !readonly {
|
||||
w, err = os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r, err = os.Open(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dec := streampb.NewDecoder(r)
|
||||
enc := streampb.NewEncoder(w)
|
||||
|
||||
return &Datafile{
|
||||
id: id,
|
||||
r: r,
|
||||
w: w,
|
||||
dec: dec,
|
||||
enc: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (df *Datafile) Name() string {
|
||||
return df.r.Name()
|
||||
}
|
||||
|
||||
func (df *Datafile) Close() error {
|
||||
if df.w == nil {
|
||||
return df.r.Close()
|
||||
}
|
||||
|
||||
err := df.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return df.w.Close()
|
||||
}
|
||||
|
||||
func (df *Datafile) Sync() error {
|
||||
if df.w == nil {
|
||||
return nil
|
||||
}
|
||||
return df.w.Sync()
|
||||
}
|
||||
|
||||
func (df *Datafile) Size() (int64, error) {
|
||||
var (
|
||||
stat os.FileInfo
|
||||
err error
|
||||
)
|
||||
|
||||
if df.w == nil {
|
||||
stat, err = df.r.Stat()
|
||||
} else {
|
||||
stat, err = df.w.Stat()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return stat.Size(), nil
|
||||
}
|
||||
|
||||
func (df *Datafile) Read() (pb.Entry, error) {
|
||||
var e pb.Entry
|
||||
return e, df.dec.Decode(&e)
|
||||
}
|
||||
|
||||
func (df *Datafile) ReadAt(index int64) (e pb.Entry, err error) {
|
||||
_, err = df.r.Seek(index, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return df.Read()
|
||||
}
|
||||
|
||||
func (df *Datafile) Write(e pb.Entry) (int64, error) {
|
||||
if df.w == nil {
|
||||
return -1, ErrReadonly
|
||||
}
|
||||
|
||||
stat, err := df.w.Stat()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
index := stat.Size()
|
||||
|
||||
e.Index = index
|
||||
e.Timestamp = time.Now().Unix()
|
||||
|
||||
err = df.enc.Encode(&e)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return index, nil
|
||||
}
|
||||
3
doc.go
Normal file
3
doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package bitcask implements a high-performance key-value store based on a
|
||||
// WAL and LSM.
|
||||
package bitcask
|
||||
13
doc_test.go
Normal file
13
doc_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package bitcask
|
||||
|
||||
func Example() {
|
||||
_, _ = Open("path/to/db")
|
||||
}
|
||||
|
||||
func Example_withOptions() {
|
||||
opts := []Option{
|
||||
WithMaxKeySize(1024),
|
||||
WithMaxValueSize(4096),
|
||||
}
|
||||
_, _ = Open("path/to/db", opts...)
|
||||
}
|
||||
17
entry.go
17
entry.go
@@ -1,17 +0,0 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
|
||||
pb "github.com/prologic/bitcask/proto"
|
||||
)
|
||||
|
||||
func NewEntry(key string, value []byte) pb.Entry {
|
||||
crc := crc32.ChecksumIEEE(value)
|
||||
|
||||
return pb.Entry{
|
||||
CRC: crc,
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
27
go.mod
27
go.mod
@@ -1,20 +1,23 @@
|
||||
module github.com/prologic/bitcask
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/gofrs/flock v0.7.1
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/gorilla/websocket v1.4.0 // indirect
|
||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prologic/msgbus v0.1.1
|
||||
github.com/prometheus/client_golang v0.9.2 // indirect
|
||||
github.com/sirupsen/logrus v1.3.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/plar/go-adaptive-radix-tree v1.0.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/viper v1.3.1
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tidwall/redcon v0.9.0
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1
|
||||
github.com/tidwall/redcon v1.0.0
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
)
|
||||
|
||||
160
go.sum
160
go.sum
@@ -1,89 +1,189 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=
|
||||
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME=
|
||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/plar/go-adaptive-radix-tree v1.0.1 h1:J+2qrXaKWLACw59s8SlTVYYxWjlUr/BlCsfkAzn96/0=
|
||||
github.com/plar/go-adaptive-radix-tree v1.0.1/go.mod h1:Ot8d28EII3i7Lv4PSvBlF8ejiD/CtRYDuPsySJbSaK8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prologic/msgbus v0.1.1/go.mod h1:B3Qu4/U2FP08x93jUzp9E8bl155+cIgDH2DUGRK6OZk=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
|
||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.1 h1:5+8j8FTpnFV4nEImW/ofkzEt8VoOiLXxdYIDsB73T38=
|
||||
github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tidwall/redcon v0.9.0 h1:tiT9DLAoohsdNaFg9Si5dRsv9+FjvZYnhMOEtSFwBqA=
|
||||
github.com/tidwall/redcon v0.9.0/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
github.com/tidwall/redcon v1.0.0 h1:D4AzzJ81Afeh144fgnj5H0aSVPBBJ5RI9Rzj0zThU+E=
|
||||
github.com/tidwall/redcon v1.0.0/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56 h1:estk1glOnSVeJ9tdEZZc5mAMDZk5lNJNyJ6DvrBkTEU=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38VS1jM=
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
105
internal/codec.go
Normal file
105
internal/codec.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
KeySize = 4
|
||||
ValueSize = 8
|
||||
checksumSize = 4
|
||||
)
|
||||
|
||||
// NewEncoder creates a streaming Entry encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
// Encoder wraps an underlying io.Writer and allows you to stream
|
||||
// Entry encodings on it.
|
||||
type Encoder struct {
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// Encode takes any Entry and streams it to the underlying writer.
|
||||
// Messages are framed with a key-length and value-length prefix.
|
||||
func (e *Encoder) Encode(msg Entry) (int64, error) {
|
||||
var bufKeyValue = make([]byte, KeySize+ValueSize)
|
||||
binary.BigEndian.PutUint32(bufKeyValue[:KeySize], uint32(len(msg.Key)))
|
||||
binary.BigEndian.PutUint64(bufKeyValue[KeySize:KeySize+ValueSize], uint64(len(msg.Value)))
|
||||
if _, err := e.w.Write(bufKeyValue); err != nil {
|
||||
return 0, errors.Wrap(err, "failed writing key & value length prefix")
|
||||
}
|
||||
|
||||
if _, err := e.w.Write(msg.Key); err != nil {
|
||||
return 0, errors.Wrap(err, "failed writing key data")
|
||||
}
|
||||
if _, err := e.w.Write(msg.Value); err != nil {
|
||||
return 0, errors.Wrap(err, "failed writing value data")
|
||||
}
|
||||
|
||||
bufChecksumSize := bufKeyValue[:checksumSize]
|
||||
binary.BigEndian.PutUint32(bufChecksumSize, msg.Checksum)
|
||||
if _, err := e.w.Write(bufChecksumSize); err != nil {
|
||||
return 0, errors.Wrap(err, "failed writing checksum data")
|
||||
}
|
||||
|
||||
if err := e.w.Flush(); err != nil {
|
||||
return 0, errors.Wrap(err, "failed flushing data")
|
||||
}
|
||||
|
||||
return int64(KeySize + ValueSize + len(msg.Key) + len(msg.Value) + checksumSize), nil
|
||||
}
|
||||
|
||||
// NewDecoder creates a streaming Entry decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// Decoder wraps an underlying io.Reader and allows you to stream
|
||||
// Entry decodings on it.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (d *Decoder) Decode(v *Entry) (int64, error) {
|
||||
prefixBuf := make([]byte, KeySize+ValueSize)
|
||||
|
||||
_, err := io.ReadFull(d.r, prefixBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
actualKeySize, actualValueSize := GetKeyValueSizes(prefixBuf)
|
||||
buf := make([]byte, actualKeySize+actualValueSize+checksumSize)
|
||||
if _, err = io.ReadFull(d.r, buf); err != nil {
|
||||
return 0, errors.Wrap(translateError(err), "failed reading saved data")
|
||||
}
|
||||
|
||||
DecodeWithoutPrefix(buf, actualKeySize, v)
|
||||
return int64(KeySize + ValueSize + actualKeySize + actualValueSize + checksumSize), nil
|
||||
}
|
||||
|
||||
func GetKeyValueSizes(buf []byte) (uint64, uint64) {
|
||||
actualKeySize := binary.BigEndian.Uint32(buf[:KeySize])
|
||||
actualValueSize := binary.BigEndian.Uint64(buf[KeySize:])
|
||||
|
||||
return uint64(actualKeySize), actualValueSize
|
||||
}
|
||||
|
||||
func DecodeWithoutPrefix(buf []byte, valueOffset uint64, v *Entry) {
|
||||
v.Key = buf[:valueOffset]
|
||||
v.Value = buf[valueOffset : len(buf)-checksumSize]
|
||||
v.Checksum = binary.BigEndian.Uint32(buf[len(buf)-checksumSize:])
|
||||
}
|
||||
|
||||
func translateError(err error) error {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
110
internal/codec_index.go
Normal file
110
internal/codec_index.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
art "github.com/plar/go-adaptive-radix-tree"
|
||||
)
|
||||
|
||||
const (
|
||||
Int32Size = 4
|
||||
Int64Size = 8
|
||||
FileIDSize = Int32Size
|
||||
OffsetSize = Int64Size
|
||||
SizeSize = Int64Size
|
||||
)
|
||||
|
||||
func ReadBytes(r io.Reader) ([]byte, error) {
|
||||
s := make([]byte, Int32Size)
|
||||
_, err := io.ReadFull(r, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := binary.BigEndian.Uint32(s)
|
||||
b := make([]byte, size)
|
||||
_, err = io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func WriteBytes(b []byte, w io.Writer) (int, error) {
|
||||
s := make([]byte, Int32Size)
|
||||
binary.BigEndian.PutUint32(s, uint32(len(b)))
|
||||
n, err := w.Write(s)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
m, err := w.Write(b)
|
||||
if err != nil {
|
||||
return (n + m), err
|
||||
}
|
||||
return (n + m), nil
|
||||
}
|
||||
|
||||
func ReadItem(r io.Reader) (Item, error) {
|
||||
buf := make([]byte, (FileIDSize + OffsetSize + SizeSize))
|
||||
_, err := io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return Item{}, err
|
||||
}
|
||||
|
||||
return Item{
|
||||
FileID: int(binary.BigEndian.Uint32(buf[:FileIDSize])),
|
||||
Offset: int64(binary.BigEndian.Uint64(buf[FileIDSize:(FileIDSize + OffsetSize)])),
|
||||
Size: int64(binary.BigEndian.Uint64(buf[(FileIDSize + OffsetSize):])),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func WriteItem(item Item, w io.Writer) (int, error) {
|
||||
buf := make([]byte, (FileIDSize + OffsetSize + SizeSize))
|
||||
binary.BigEndian.PutUint32(buf[:FileIDSize], uint32(item.FileID))
|
||||
binary.BigEndian.PutUint64(buf[FileIDSize:(FileIDSize+OffsetSize)], uint64(item.Offset))
|
||||
binary.BigEndian.PutUint64(buf[(FileIDSize+OffsetSize):], uint64(item.Size))
|
||||
n, err := w.Write(buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func ReadIndex(r io.Reader, t art.Tree) error {
|
||||
for {
|
||||
key, err := ReadBytes(r)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
item, err := ReadItem(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Insert(key, item)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteIndex(t art.Tree, w io.Writer) (err error) {
|
||||
t.ForEach(func(node art.Node) bool {
|
||||
_, err = WriteBytes(node.Key(), w)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
item := node.Value().(Item)
|
||||
_, err := WriteItem(item, w)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
175
internal/datafile.go
Normal file
175
internal/datafile.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/mmap"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDatafileFilename = "%09d.data"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrReadonly = errors.New("error: read only datafile")
|
||||
ErrReadError = errors.New("error: read error")
|
||||
|
||||
mxMemPool sync.RWMutex
|
||||
)
|
||||
|
||||
type Datafile struct {
|
||||
sync.RWMutex
|
||||
|
||||
id int
|
||||
r *os.File
|
||||
ra *mmap.ReaderAt
|
||||
w *os.File
|
||||
offset int64
|
||||
dec *Decoder
|
||||
enc *Encoder
|
||||
}
|
||||
|
||||
func NewDatafile(path string, id int, readonly bool) (*Datafile, error) {
|
||||
var (
|
||||
r *os.File
|
||||
ra *mmap.ReaderAt
|
||||
w *os.File
|
||||
err error
|
||||
)
|
||||
|
||||
fn := filepath.Join(path, fmt.Sprintf(DefaultDatafileFilename, id))
|
||||
|
||||
if !readonly {
|
||||
w, err = os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r, err = os.Open(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := r.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error calling Stat()")
|
||||
}
|
||||
|
||||
ra, err = mmap.Open(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset := stat.Size()
|
||||
|
||||
dec := NewDecoder(r)
|
||||
enc := NewEncoder(w)
|
||||
|
||||
return &Datafile{
|
||||
id: id,
|
||||
r: r,
|
||||
ra: ra,
|
||||
w: w,
|
||||
offset: offset,
|
||||
dec: dec,
|
||||
enc: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (df *Datafile) FileID() int {
|
||||
return df.id
|
||||
}
|
||||
|
||||
func (df *Datafile) Name() string {
|
||||
return df.r.Name()
|
||||
}
|
||||
|
||||
func (df *Datafile) Close() error {
|
||||
defer func() {
|
||||
df.ra.Close()
|
||||
df.r.Close()
|
||||
}()
|
||||
|
||||
// Readonly Datafile -- Nothing further to close on the write side
|
||||
if df.w == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := df.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return df.w.Close()
|
||||
}
|
||||
|
||||
func (df *Datafile) Sync() error {
|
||||
if df.w == nil {
|
||||
return nil
|
||||
}
|
||||
return df.w.Sync()
|
||||
}
|
||||
|
||||
func (df *Datafile) Size() int64 {
|
||||
df.RLock()
|
||||
defer df.RUnlock()
|
||||
return df.offset
|
||||
}
|
||||
|
||||
func (df *Datafile) Read() (e Entry, n int64, err error) {
|
||||
df.Lock()
|
||||
defer df.Unlock()
|
||||
|
||||
n, err = df.dec.Decode(&e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (df *Datafile) ReadAt(index, size int64) (e Entry, err error) {
|
||||
var n int
|
||||
|
||||
b := make([]byte, size)
|
||||
|
||||
if df.w == nil {
|
||||
n, err = df.ra.ReadAt(b, index)
|
||||
} else {
|
||||
n, err = df.r.ReadAt(b, index)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if int64(n) != size {
|
||||
err = ErrReadError
|
||||
return
|
||||
}
|
||||
|
||||
valueOffset, _ := GetKeyValueSizes(b)
|
||||
DecodeWithoutPrefix(b[KeySize+ValueSize:], valueOffset, &e)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (df *Datafile) Write(e Entry) (int64, int64, error) {
|
||||
if df.w == nil {
|
||||
return -1, 0, ErrReadonly
|
||||
}
|
||||
|
||||
df.Lock()
|
||||
defer df.Unlock()
|
||||
|
||||
e.Offset = df.offset
|
||||
|
||||
n, err := df.enc.Encode(e)
|
||||
if err != nil {
|
||||
return -1, 0, err
|
||||
}
|
||||
df.offset += n
|
||||
|
||||
return e.Offset, n, nil
|
||||
}
|
||||
23
internal/entry.go
Normal file
23
internal/entry.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
// Entry represents a key/value in the database
|
||||
type Entry struct {
|
||||
Checksum uint32
|
||||
Key []byte
|
||||
Offset int64
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func NewEntry(key, value []byte) Entry {
|
||||
checksum := crc32.ChecksumIEEE(value)
|
||||
|
||||
return Entry{
|
||||
Checksum: checksum,
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
7
internal/item.go
Normal file
7
internal/item.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package internal
|
||||
|
||||
type Item struct {
|
||||
FileID int `json:"fileid"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
56
internal/utils.go
Normal file
56
internal/utils.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Exists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func DirSize(path string) (int64, error) {
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
return err
|
||||
})
|
||||
return size, err
|
||||
}
|
||||
|
||||
func GetDatafiles(path string) ([]string, error) {
|
||||
fns, err := filepath.Glob(fmt.Sprintf("%s/*.data", path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Strings(fns)
|
||||
return fns, nil
|
||||
}
|
||||
|
||||
func ParseIds(fns []string) ([]int, error) {
|
||||
var ids []int
|
||||
for _, fn := range fns {
|
||||
fn = filepath.Base(fn)
|
||||
ext := filepath.Ext(fn)
|
||||
if ext != ".data" {
|
||||
continue
|
||||
}
|
||||
id, err := strconv.ParseInt(strings.TrimSuffix(fn, ext), 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
return ids, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package bitcask
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package bitcask
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
92
keydir.go
92
keydir.go
@@ -1,92 +0,0 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Item struct {
|
||||
FileID int
|
||||
Index int64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type Keydir struct {
|
||||
sync.RWMutex
|
||||
kv map[string]Item
|
||||
}
|
||||
|
||||
func NewKeydir() *Keydir {
|
||||
return &Keydir{
|
||||
kv: make(map[string]Item),
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Keydir) Add(key string, fileid int, index, timestamp int64) {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
|
||||
k.kv[key] = Item{
|
||||
FileID: fileid,
|
||||
Index: index,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Keydir) Get(key string) (Item, bool) {
|
||||
k.RLock()
|
||||
defer k.RUnlock()
|
||||
|
||||
item, ok := k.kv[key]
|
||||
return item, ok
|
||||
}
|
||||
|
||||
func (k *Keydir) Delete(key string) {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
|
||||
delete(k.kv, key)
|
||||
}
|
||||
|
||||
func (k *Keydir) Keys() chan string {
|
||||
ch := make(chan string)
|
||||
go func() {
|
||||
for k := range k.kv {
|
||||
ch <- k
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (k *Keydir) Bytes() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
err := enc.Encode(k.kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (k *Keydir) Save(fn string) error {
|
||||
data, err := k.Bytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(fn, data, 0644)
|
||||
}
|
||||
|
||||
func NewKeydirFromBytes(r io.Reader) (*Keydir, error) {
|
||||
k := NewKeydir()
|
||||
dec := gob.NewDecoder(r)
|
||||
err := dec.Decode(&k.kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k, nil
|
||||
}
|
||||
110
options.go
Normal file
110
options.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package bitcask
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMaxDatafileSize is the default maximum datafile size in bytes
|
||||
DefaultMaxDatafileSize = 1 << 20 // 1MB
|
||||
|
||||
// DefaultMaxKeySize is the default maximum key size in bytes
|
||||
DefaultMaxKeySize = 64 // 64 bytes
|
||||
|
||||
// DefaultMaxValueSize is the default value size in bytes
|
||||
DefaultMaxValueSize = 1 << 16 // 65KB
|
||||
)
|
||||
|
||||
// Option is a function that takes a config struct and modifies it
|
||||
type Option func(*config) error
|
||||
|
||||
type config struct {
|
||||
maxDatafileSize int
|
||||
maxKeySize int
|
||||
maxValueSize int
|
||||
sync bool
|
||||
}
|
||||
|
||||
func (c *config) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
MaxDatafileSize int `json:"max_datafile_size"`
|
||||
MaxKeySize int `json:"max_key_size"`
|
||||
MaxValueSize int `json:"max_value_size"`
|
||||
Sync bool `json:"sync"`
|
||||
}{
|
||||
MaxDatafileSize: c.maxDatafileSize,
|
||||
MaxKeySize: c.maxKeySize,
|
||||
MaxValueSize: c.maxValueSize,
|
||||
Sync: c.sync,
|
||||
})
|
||||
}
|
||||
|
||||
func getConfig(path string) (*config, error) {
|
||||
type Config struct {
|
||||
MaxDatafileSize int `json:"max_datafile_size"`
|
||||
MaxKeySize int `json:"max_key_size"`
|
||||
MaxValueSize int `json:"max_value_size"`
|
||||
Sync bool `json:"sync"`
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
|
||||
data, err := ioutil.ReadFile(filepath.Join(path, "config.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &config{
|
||||
maxDatafileSize: cfg.MaxDatafileSize,
|
||||
maxKeySize: cfg.MaxKeySize,
|
||||
maxValueSize: cfg.MaxValueSize,
|
||||
sync: cfg.Sync,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDefaultConfig() *config {
|
||||
return &config{
|
||||
maxDatafileSize: DefaultMaxDatafileSize,
|
||||
maxKeySize: DefaultMaxKeySize,
|
||||
maxValueSize: DefaultMaxValueSize,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxDatafileSize sets the maximum datafile size option
|
||||
func WithMaxDatafileSize(size int) Option {
|
||||
return func(cfg *config) error {
|
||||
cfg.maxDatafileSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxKeySize sets the maximum key size option
|
||||
func WithMaxKeySize(size int) Option {
|
||||
return func(cfg *config) error {
|
||||
cfg.maxKeySize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxValueSize sets the maximum value size option
|
||||
func WithMaxValueSize(size int) Option {
|
||||
return func(cfg *config) error {
|
||||
cfg.maxValueSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSync causes Sync() to be called on every key/value written increasing
|
||||
// durability and safety at the expense of performance
|
||||
func WithSync(sync bool) Option {
|
||||
return func(cfg *config) error {
|
||||
cfg.sync = sync
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
package proto
|
||||
|
||||
//go:generate protoc --go_out=. entry.proto
|
||||
@@ -1,108 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: entry.proto
|
||||
|
||||
package proto
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Entry struct {
|
||||
CRC uint32 `protobuf:"varint,1,opt,name=CRC,proto3" json:"CRC,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=Key,proto3" json:"Key,omitempty"`
|
||||
Index int64 `protobuf:"varint,3,opt,name=Index,proto3" json:"Index,omitempty"`
|
||||
Value []byte `protobuf:"bytes,4,opt,name=Value,proto3" json:"Value,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,5,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Entry) Reset() { *m = Entry{} }
|
||||
func (m *Entry) String() string { return proto.CompactTextString(m) }
|
||||
func (*Entry) ProtoMessage() {}
|
||||
func (*Entry) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_entry_4f5906245d08394f, []int{0}
|
||||
}
|
||||
func (m *Entry) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Entry.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Entry) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Entry.Merge(dst, src)
|
||||
}
|
||||
func (m *Entry) XXX_Size() int {
|
||||
return xxx_messageInfo_Entry.Size(m)
|
||||
}
|
||||
func (m *Entry) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Entry.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Entry proto.InternalMessageInfo
|
||||
|
||||
func (m *Entry) GetCRC() uint32 {
|
||||
if m != nil {
|
||||
return m.CRC
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Entry) GetKey() string {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Entry) GetIndex() int64 {
|
||||
if m != nil {
|
||||
return m.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Entry) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entry) GetTimestamp() int64 {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Entry)(nil), "proto.Entry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("entry.proto", fileDescriptor_entry_4f5906245d08394f) }
|
||||
|
||||
var fileDescriptor_entry_4f5906245d08394f = []byte{
|
||||
// 134 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29,
|
||||
0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, 0xa5, 0x5c, 0xac, 0xae,
|
||||
0x20, 0x51, 0x21, 0x01, 0x2e, 0x66, 0xe7, 0x20, 0x67, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xde, 0x20,
|
||||
0x10, 0x13, 0x24, 0xe2, 0x9d, 0x5a, 0x29, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a,
|
||||
0x89, 0x70, 0xb1, 0x7a, 0xe6, 0xa5, 0xa4, 0x56, 0x48, 0x30, 0x2b, 0x30, 0x6a, 0x30, 0x07, 0x41,
|
||||
0x38, 0x20, 0xd1, 0xb0, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x16, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x08,
|
||||
0x47, 0x48, 0x86, 0x8b, 0x33, 0x24, 0x33, 0x37, 0xb5, 0xb8, 0x24, 0x31, 0xb7, 0x40, 0x82, 0x15,
|
||||
0xac, 0x1e, 0x21, 0x90, 0xc4, 0x06, 0xb6, 0xdd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x07, 0x99,
|
||||
0x47, 0xb9, 0x93, 0x00, 0x00, 0x00,
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package proto;
|
||||
|
||||
message Entry {
|
||||
uint32 CRC = 1;
|
||||
string Key = 2;
|
||||
int64 Index = 3;
|
||||
bytes Value = 4;
|
||||
int64 Timestamp = 5;
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package streampb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// prefixSize is the number of bytes we preallocate for storing
|
||||
// our big endian lenth prefix buffer.
|
||||
prefixSize = 8
|
||||
)
|
||||
|
||||
// NewEncoder creates a streaming protobuf encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w, prefixBuf: make([]byte, prefixSize)}
|
||||
}
|
||||
|
||||
// Encoder wraps an underlying io.Writer and allows you to stream
|
||||
// proto encodings on it.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
prefixBuf []byte
|
||||
}
|
||||
|
||||
// Encode takes any proto.Message and streams it to the underlying writer.
|
||||
// Messages are framed with a length prefix.
|
||||
func (e *Encoder) Encode(msg proto.Message) error {
|
||||
buf, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binary.BigEndian.PutUint64(e.prefixBuf, uint64(len(buf)))
|
||||
|
||||
if _, err := e.w.Write(e.prefixBuf); err != nil {
|
||||
return errors.Wrap(err, "failed writing length prefix")
|
||||
}
|
||||
|
||||
_, err = e.w.Write(buf)
|
||||
return errors.Wrap(err, "failed writing marshaled data")
|
||||
}
|
||||
|
||||
// NewDecoder creates a streaming protobuf decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
r: r,
|
||||
prefixBuf: make([]byte, prefixSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Decoder wraps an underlying io.Reader and allows you to stream
|
||||
// proto decodings on it.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
prefixBuf []byte
|
||||
}
|
||||
|
||||
// Decode takes a proto.Message and unmarshals the next payload in the
|
||||
// underlying io.Reader. It returns an EOF when it's done.
|
||||
func (d *Decoder) Decode(v proto.Message) error {
|
||||
_, err := io.ReadFull(d.r, d.prefixBuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := binary.BigEndian.Uint64(d.prefixBuf)
|
||||
|
||||
buf := make([]byte, n)
|
||||
|
||||
idx := uint64(0)
|
||||
for idx < n {
|
||||
m, err := d.r.Read(buf[idx:n])
|
||||
if err != nil {
|
||||
return errors.Wrap(translateError(err), "failed reading marshaled data")
|
||||
}
|
||||
idx += uint64(m)
|
||||
}
|
||||
return proto.Unmarshal(buf[:n], v)
|
||||
}
|
||||
|
||||
func translateError(err error) error {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -20,6 +20,6 @@ fi
|
||||
|
||||
echo "Releasing ${TAG} ..."
|
||||
|
||||
git tag -a -s -m "Relase ${TAG}" "${TAG}"
|
||||
git tag -a -s -m "Release ${TAG}" "${TAG}"
|
||||
git push --tags
|
||||
goreleaser release --rm-dist
|
||||
|
||||
Reference in New Issue
Block a user