JetStream major refactor for name changes.

MsgSet -> Stream
Observable -> Consumer

Signed-off-by: Derek Collison <derek@nats.io>
This commit is contained in:
Derek Collison
2020-01-15 21:53:29 -08:00
parent 23da0b08ac
commit 47c28b2fb0
15 changed files with 1573 additions and 1584 deletions

View File

@@ -130,7 +130,7 @@ type ServiceRespType uint8
// Service response types. Defaults to a singleton.
const (
Singleton ServiceRespType = iota
Stream
Streamed
Chunked
)
@@ -139,8 +139,8 @@ func (rt ServiceRespType) String() string {
switch rt {
case Singleton:
return "Singleton"
case Stream:
return "Stream"
case Streamed:
return "Streamed"
case Chunked:
return "Chunked"
}
@@ -1865,7 +1865,7 @@ func (s *Server) updateAccountClaims(a *Account, ac *jwt.AccountClaims) {
rt := Singleton
switch e.ResponseType {
case jwt.ResponseTypeStream:
rt = Stream
rt = Streamed
case jwt.ResponseTypeChunked:
rt = Chunked
}

View File

@@ -1,4 +1,4 @@
// Copyright 2018 The NATS Authors
// Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -443,8 +443,8 @@ func TestAccountParseConfigImportsExports(t *testing.T) {
if ea == nil {
t.Fatalf("Expected to get a non-nil exportAuth for service")
}
if ea.respType != Stream {
t.Fatalf("Expected to get a Stream response type, got %q", ea.respType)
if ea.respType != Streamed {
t.Fatalf("Expected to get a Streamed response type, got %q", ea.respType)
}
ea = natsAcc.exports.services["nats.photo"]
if ea == nil {
@@ -1686,7 +1686,7 @@ func TestCrossAccountServiceResponseTypes(t *testing.T) {
}
// Add in the service export for the requests. Make it public.
if err := cfoo.acc.AddServiceExportWithResponse("test.request", Stream, nil); err != nil {
if err := cfoo.acc.AddServiceExportWithResponse("test.request", Streamed, nil); err != nil {
t.Fatalf("Error adding account service export to client foo: %v", err)
}
// Now add in the route mapping for request to be routed to the foo account.
@@ -1824,7 +1824,7 @@ func TestCrossAccountServiceResponseLeaks(t *testing.T) {
}
// Add in the service export for the requests. Make it public.
if err := cfoo.acc.AddServiceExportWithResponse("test.request", Stream, nil); err != nil {
if err := cfoo.acc.AddServiceExportWithResponse("test.request", Streamed, nil); err != nil {
t.Fatalf("Error adding account service export to client foo: %v", err)
}
// Now add in the route mapping for request to be routed to the foo account.

View File

@@ -27,16 +27,16 @@ import (
"time"
)
type ObservableInfo struct {
Name string `json:"name"`
Config ObservableConfig `json:"configuration"`
State ObservableState `json:"state"`
type ConsumerInfo struct {
Name string `json:"name"`
Config ConsumerConfig `json:"config"`
State ConsumerState `json:"state"`
}
type ObservableConfig struct {
type ConsumerConfig struct {
Delivery string `json:"delivery_subject"`
Durable string `json:"durable_name,omitempty"`
MsgSetSeq uint64 `json:"msg_set_seq,omitempty"`
StreamSeq uint64 `json:"stream_seq,omitempty"`
StartTime time.Time `json:"start_time,omitempty"`
DeliverAll bool `json:"deliver_all,omitempty"`
DeliverLast bool `json:"deliver_last,omitempty"`
@@ -45,24 +45,24 @@ type ObservableConfig struct {
MaxDeliver int `json:"max_deliver,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
ReplayPolicy ReplayPolicy `json:"replay_policy"`
SampleFrequency string `json:"sample_frequency,omitempty"`
SampleFrequency string `json:"sample_freq,omitempty"`
}
type CreateObservableRequest struct {
MsgSet string `json:"msg_set_name"`
Config ObservableConfig `json:"config"`
type CreateConsumerRequest struct {
Stream string `json:"stream_name"`
Config ConsumerConfig `json:"config"`
}
type ObservableAckSampleEvent struct {
MsgSet string `json:"msg_set"`
Observable string `json:"obs"`
ObsSeq uint64 `json:"obs_seq"`
MsgSetSeq uint64 `json:"msg_set_seq"`
Delay int64 `json:"ack_time"`
Deliveries uint64 `json:"delivered"`
type ConsumerAckEvent struct {
Stream string `json:"stream"`
Consumer string `json:"consumer"`
ConsumerSeq uint64 `json:"consumer_seq"`
StreamSeq uint64 `json:"stream_seq"`
Delay int64 `json:"ack_time"`
Deliveries uint64 `json:"delivered"`
}
// AckPolicy determines how the observable should acknowledge delivered messages.
// AckPolicy determines how the consumer should acknowledge delivered messages.
type AckPolicy int
const (
@@ -85,7 +85,7 @@ func (a AckPolicy) String() string {
}
}
// ReplayPolicy determines how the observable should replay messages it already has queued in the message set.
// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
type ReplayPolicy int
const (
@@ -116,13 +116,13 @@ var (
AckNext = []byte("+NXT")
)
// Observable is a jetstream observable/subscriber.
type Observable struct {
// Consumer is a jetstream consumer.
type Consumer struct {
mu sync.Mutex
mset *MsgSet
mset *Stream
acc *Account
name string
msetName string
streamName string
sseq uint64
dseq uint64
adflr uint64
@@ -138,8 +138,8 @@ type Observable struct {
rdc map[uint64]uint64
maxdc uint64
waiting []string
config ObservableConfig
store ObservableStore
config ConsumerConfig
store ConsumerStore
active bool
replay bool
dtmr *time.Timer
@@ -159,9 +159,9 @@ const (
JsDeleteWaitTimeDefault = 5 * time.Second
)
func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error) {
func (mset *Stream) AddConsumer(config *ConsumerConfig) (*Consumer, error) {
if config == nil {
return nil, fmt.Errorf("observable config required")
return nil, fmt.Errorf("consumer config required")
}
var err error
@@ -169,20 +169,20 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
if config.Delivery != _EMPTY_ {
if !subjectIsLiteral(config.Delivery) {
return nil, fmt.Errorf("observable delivery subject has wildcards")
return nil, fmt.Errorf("consumer delivery subject has wildcards")
}
if mset.deliveryFormsCycle(config.Delivery) {
return nil, fmt.Errorf("observable delivery subject forms a cycle")
return nil, fmt.Errorf("consumer delivery subject forms a cycle")
}
} else {
// Pull mode / work queue mode require explicit ack.
if config.AckPolicy != AckExplicit {
return nil, fmt.Errorf("observable in pull mode requires explicit ack policy")
return nil, fmt.Errorf("consumer in pull mode requires explicit ack policy")
}
// They are also required to be durable since otherwise we will not know when to
// clean them up.
if config.Durable == _EMPTY_ {
return nil, fmt.Errorf("observable in pull mode requires a durable name")
return nil, fmt.Errorf("consumer in pull mode requires a durable name")
}
}
@@ -198,25 +198,25 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
// Make sure any partition subject is also a literal.
if config.FilterSubject != "" {
if !subjectIsLiteral(config.FilterSubject) {
return nil, fmt.Errorf("observable filter subject has wildcards")
return nil, fmt.Errorf("consumer filter subject has wildcards")
}
// Make sure this is a valid partition of the interest subjects.
if !mset.validSubject(config.FilterSubject) {
return nil, fmt.Errorf("observable filter subject is not a valid subset of the interest subjects")
return nil, fmt.Errorf("consumer filter subject is not a valid subset of the interest subjects")
}
if config.AckPolicy == AckAll {
return nil, fmt.Errorf("observable with filter subject can not have an ack policy of ack all")
return nil, fmt.Errorf("consumer with filter subject can not have an ack policy of ack all")
}
}
// Check on start position conflicts.
noTime := time.Time{}
if config.MsgSetSeq > 0 && (config.StartTime != noTime || config.DeliverAll || config.DeliverLast) {
return nil, fmt.Errorf("observable starting position conflict")
if config.StreamSeq > 0 && (config.StartTime != noTime || config.DeliverAll || config.DeliverLast) {
return nil, fmt.Errorf("consumer starting position conflict")
} else if config.StartTime != noTime && (config.DeliverAll || config.DeliverLast) {
return nil, fmt.Errorf("observable starting position conflict")
return nil, fmt.Errorf("consumer starting position conflict")
} else if config.DeliverAll && config.DeliverLast {
return nil, fmt.Errorf("observable starting position conflict")
return nil, fmt.Errorf("consumer starting position conflict")
}
sampleFreq := 0
@@ -224,7 +224,7 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
s := strings.TrimSuffix(config.SampleFrequency, "%")
sampleFreq, err = strconv.Atoi(s)
if err != nil {
return nil, fmt.Errorf("failed to parse observable sampling configuration: %v", err)
return nil, fmt.Errorf("failed to parse consumer sampling configuration: %v", err)
}
}
@@ -232,38 +232,38 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
mset.mu.Lock()
// Check for any limits.
if mset.config.MaxObservables > 0 && len(mset.obs) >= mset.config.MaxObservables {
if mset.config.MaxConsumers > 0 && len(mset.consumers) >= mset.config.MaxConsumers {
mset.mu.Unlock()
return nil, fmt.Errorf("maximum observables limit reached")
return nil, fmt.Errorf("maximum consumers limit reached")
}
// Check on msgset type conflicts.
// Check on stream type conflicts.
switch mset.config.Retention {
case WorkQueuePolicy:
// Force explicit acks here.
if config.AckPolicy != AckExplicit {
mset.mu.Unlock()
return nil, fmt.Errorf("workqueue message set requires explicit ack")
return nil, fmt.Errorf("workqueue stream requires explicit ack")
}
if len(mset.obs) > 0 {
if len(mset.consumers) > 0 {
if config.FilterSubject == _EMPTY_ {
mset.mu.Unlock()
return nil, fmt.Errorf("multiple non-filtered observables not allowed on workqueue message set")
return nil, fmt.Errorf("multiple non-filtered observables not allowed on workqueue stream")
} else if !mset.partitionUnique(config.FilterSubject) {
// We have a partition but it is not unique amongst the others.
mset.mu.Unlock()
return nil, fmt.Errorf("filtered observable not unique on workqueue message set")
return nil, fmt.Errorf("filtered consumer not unique on workqueue stream")
}
}
if !config.DeliverAll {
mset.mu.Unlock()
return nil, fmt.Errorf("observable must be deliver all on workqueue message set")
return nil, fmt.Errorf("consumer must be deliver all on workqueue stream")
}
}
// Set name, which will be durable name if set, otherwise we create one at random.
o := &Observable{mset: mset,
o := &Consumer{mset: mset,
config: *config,
dsubj: config.Delivery,
active: true,
@@ -272,17 +272,17 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
}
if isDurableObservable(config) {
if isDurableConsumer(config) {
o.name = config.Durable
} else {
o.name = createObservableName()
o.name = createConsumerName()
}
// already under lock, mset.Name() would deadlock
o.msetName = mset.config.Name
o.ackSampleT = JetStreamObservableAckSamplePre + "." + o.msetName + "." + o.name
o.streamName = mset.config.Name
o.ackSampleT = JetStreamConsumerAckSamplePre + "." + o.streamName + "." + o.name
store, err := mset.store.ObservableStore(o.name, config)
store, err := mset.store.ConsumerStore(o.name, config)
if err != nil {
mset.mu.Unlock()
return nil, fmt.Errorf("error creating store for observable: %v", err)
@@ -301,25 +301,25 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
c := mset.client
if c == nil {
mset.mu.Unlock()
return nil, fmt.Errorf("message set not valid")
return nil, fmt.Errorf("stream not valid")
}
s, a := c.srv, c.acc
o.acc = a
// Check if we already have this one registered.
if eo, ok := mset.obs[o.name]; ok {
if eo, ok := mset.consumers[o.name]; ok {
mset.mu.Unlock()
if !o.isDurable() || !o.isPushMode() {
return nil, fmt.Errorf("observable already exists")
return nil, fmt.Errorf("consumer already exists")
}
// If we are here we have already registered this durable. If it is still active that is an error.
if eo.Active() {
return nil, fmt.Errorf("observable already exists and is still active")
return nil, fmt.Errorf("consumer already exists and is still active")
}
// Since we are here this means we have a potentially new durable so we should update here.
// Check that configs are the same.
if !configsEqualSansDelivery(o.config, eo.config) {
return nil, fmt.Errorf("observable replacement durable config not the same")
return nil, fmt.Errorf("consumer replacement durable config not the same")
}
// Once we are here we have a replacement push-based durable.
eo.updateDeliverySubject(o.config.Delivery)
@@ -346,7 +346,7 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
o.reqSub = sub
}
}
mset.obs[o.name] = o
mset.consumers[o.name] = o
mset.mu.Unlock()
// If push mode, register for notifications on interest.
@@ -358,7 +358,7 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
// Check if we are not durable that the delivery subject has interest.
if !o.isDurable() && !o.active {
o.Delete()
return nil, fmt.Errorf("observable requires interest for delivery subject when ephemeral")
return nil, fmt.Errorf("consumer requires interest for delivery subject when ephemeral")
}
}
@@ -378,7 +378,7 @@ func (mset *MsgSet) AddObservable(config *ObservableConfig) (*Observable, error)
// This will check for extended interest in a subject. If we have local interest we just return
// that, but in the absence of local interest and presence of gateways or service imports we need
// to check those as well.
func (o *Observable) hasDeliveryInterest(localInterest bool) bool {
func (o *Consumer) hasDeliveryInterest(localInterest bool) bool {
o.mu.Lock()
mset := o.mset
if mset == nil {
@@ -424,7 +424,7 @@ func (o *Observable) hasDeliveryInterest(localInterest bool) bool {
}
// This processes and update to the local interest for a delivery subject.
func (o *Observable) updateDeliveryInterest(localInterest bool) {
func (o *Consumer) updateDeliveryInterest(localInterest bool) {
interest := o.hasDeliveryInterest(localInterest)
o.mu.Lock()
@@ -447,20 +447,20 @@ func (o *Observable) updateDeliveryInterest(localInterest bool) {
o.mu.Unlock()
if shouldSignal {
mset.signalObservers()
mset.signalConsumers()
}
}
// Config returns the observable's configuration.
func (o *Observable) Config() ObservableConfig {
// Config returns the consumer's configuration.
func (o *Consumer) Config() ConsumerConfig {
o.mu.Lock()
defer o.mu.Unlock()
return o.config
}
// This is a config change for the delivery subject for a
// push based observable.
func (o *Observable) updateDeliverySubject(newDelivery string) {
// push based consumer.
func (o *Consumer) updateDeliverySubject(newDelivery string) {
// Update the config and the dsubj
o.mu.Lock()
defer o.mu.Unlock()
@@ -483,14 +483,14 @@ func (o *Observable) updateDeliverySubject(newDelivery string) {
}
// Check that configs are equal but allow delivery subjects to be different.
func configsEqualSansDelivery(a, b ObservableConfig) bool {
func configsEqualSansDelivery(a, b ConsumerConfig) bool {
// These were copied in so can set Delivery here.
a.Delivery, b.Delivery = _EMPTY_, _EMPTY_
return a == b
}
// Process a message for the ack reply subject delivered with a message.
func (o *Observable) processAck(_ *subscription, _ *client, subject, reply string, msg []byte) {
func (o *Consumer) processAck(_ *subscription, _ *client, subject, reply string, msg []byte) {
sseq, dseq, dcount := o.ReplyInfo(subject)
switch {
case len(msg) == 0, bytes.Equal(msg, AckAck):
@@ -506,7 +506,7 @@ func (o *Observable) processAck(_ *subscription, _ *client, subject, reply strin
}
// Used to process a working update to delay redelivery.
func (o *Observable) progressUpdate(seq uint64) {
func (o *Consumer) progressUpdate(seq uint64) {
o.mu.Lock()
if o.pending != nil {
if _, ok := o.pending[seq]; ok {
@@ -517,8 +517,8 @@ func (o *Observable) progressUpdate(seq uint64) {
}
// Process a NAK.
func (o *Observable) processNak(sseq, dseq uint64) {
var mset *MsgSet
func (o *Consumer) processNak(sseq, dseq uint64) {
var mset *Stream
o.mu.Lock()
// Check for out of range.
if dseq <= o.adflr || dseq > o.dseq {
@@ -539,24 +539,24 @@ func (o *Observable) processNak(sseq, dseq uint64) {
}
o.mu.Unlock()
if mset != nil {
mset.signalObservers()
mset.signalConsumers()
}
}
// This will restore the state from disk.
func (o *Observable) readStoredState() error {
func (o *Consumer) readStoredState() error {
if o.store == nil {
return nil
}
state, err := o.store.State()
if err == nil && state != nil {
// FIXME(dlc) - re-apply state.
o.dseq = state.Delivered.ObsSeq
o.sseq = state.Delivered.SetSeq
o.adflr = state.AckFloor.ObsSeq
o.asflr = state.AckFloor.SetSeq
o.dseq = state.Delivered.ConsumerSeq
o.sseq = state.Delivered.StreamSeq
o.adflr = state.AckFloor.ConsumerSeq
o.asflr = state.AckFloor.StreamSeq
o.pending = state.Pending
o.rdc = state.Redelivery
o.rdc = state.Redelivered
}
// Setup tracking timer if we have restored pending.
@@ -566,7 +566,7 @@ func (o *Observable) readStoredState() error {
return err
}
func (o *Observable) updateStateLoop() {
func (o *Consumer) updateStateLoop() {
o.mu.Lock()
fch := o.fch
qch := o.qch
@@ -585,17 +585,17 @@ func (o *Observable) updateStateLoop() {
time.Sleep(25 * time.Millisecond)
o.mu.Lock()
if o.store != nil {
state := &ObservableState{
state := &ConsumerState{
Delivered: SequencePair{
ObsSeq: o.dseq,
SetSeq: o.sseq,
ConsumerSeq: o.dseq,
StreamSeq: o.sseq,
},
AckFloor: SequencePair{
ObsSeq: o.adflr,
SetSeq: o.asflr,
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
Pending: o.pending,
Redelivery: o.rdc,
Pending: o.pending,
Redelivered: o.rdc,
}
// FIXME(dlc) - Hold onto any errors.
o.store.Update(state)
@@ -605,22 +605,22 @@ func (o *Observable) updateStateLoop() {
}
}
// Info returns our current observable state.
func (o *Observable) Info() *ObservableInfo {
// Info returns our current consumer state.
func (o *Consumer) Info() *ConsumerInfo {
o.mu.Lock()
defer o.mu.Unlock()
info := &ObservableInfo{
info := &ConsumerInfo{
Name: o.name,
Config: o.config,
State: ObservableState{
State: ConsumerState{
// We track these internally as next to deliver, hence the -1.
Delivered: SequencePair{
ObsSeq: o.dseq - 1,
SetSeq: o.sseq - 1,
ConsumerSeq: o.dseq - 1,
StreamSeq: o.sseq - 1,
},
AckFloor: SequencePair{
ObsSeq: o.adflr,
SetSeq: o.asflr,
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
},
}
@@ -637,7 +637,7 @@ func (o *Observable) Info() *ObservableInfo {
for k, v := range o.rdc {
r[k] = v
}
info.State.Redelivery = r
info.State.Redelivered = r
}
return info
@@ -645,7 +645,7 @@ func (o *Observable) Info() *ObservableInfo {
// Will update the underlying store.
// Lock should be held.
func (o *Observable) updateStore() {
func (o *Consumer) updateStore() {
if o.store == nil {
return
}
@@ -657,36 +657,31 @@ func (o *Observable) updateStore() {
}
// shouldSample lets us know if we are sampling metrics on acks.
func (o *Observable) shouldSample() bool {
if o.sfreq <= 0 {
func (o *Consumer) shouldSample() bool {
switch {
case o.sfreq <= 0:
return false
}
if o.sfreq >= 100 {
case o.sfreq >= 100:
return true
}
// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
// hit for those with sampling enabled and its not the default
if mrand.Int31n(100) <= o.sfreq {
return true
}
return false
return mrand.Int31n(100) <= o.sfreq
}
func (o *Observable) sampleAck(sseq, dseq, dcount uint64) {
func (o *Consumer) sampleAck(sseq, dseq, dcount uint64) {
if !o.shouldSample() {
return
}
e := &ObservableAckSampleEvent{
MsgSet: o.msetName,
Observable: o.name,
ObsSeq: dseq,
MsgSetSeq: sseq,
Delay: int64(time.Now().UnixNano()) - o.pending[sseq],
Deliveries: dcount,
e := &ConsumerAckEvent{
Stream: o.streamName,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Delay: int64(time.Now().UnixNano()) - o.pending[sseq],
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
@@ -701,14 +696,14 @@ func (o *Observable) sampleAck(sseq, dseq, dcount uint64) {
}
// Process an ack for a message.
func (o *Observable) ackMsg(sseq, dseq, dcount uint64) {
func (o *Consumer) ackMsg(sseq, dseq, dcount uint64) {
var sagap uint64
o.mu.Lock()
switch o.config.AckPolicy {
case AckExplicit:
o.sampleAck(sseq, dseq, dcount)
delete(o.pending, sseq)
// Observables sequence numbers can skip during redlivery since
// Consumers sequence numbers can skip during redlivery since
// they always increment. So if we do not have any pending treat
// as all scenario below. Otherwise check that we filled in a gap.
// TODO(dlc) - check this.
@@ -735,8 +730,8 @@ func (o *Observable) ackMsg(sseq, dseq, dcount uint64) {
mset := o.mset
o.mu.Unlock()
// Let the owning message set know if we are interest or workqueue retention based.
if mset != nil && mset.config.Retention != StreamPolicy {
// Let the owning stream know if we are interest or workqueue retention based.
if mset != nil && mset.config.Retention != LimitsPolicy {
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
@@ -749,7 +744,7 @@ func (o *Observable) ackMsg(sseq, dseq, dcount uint64) {
}
// Check if we need an ack for this store seq.
func (o *Observable) needAck(sseq uint64) bool {
func (o *Consumer) needAck(sseq uint64) bool {
var na bool
o.mu.Lock()
switch o.config.AckPolicy {
@@ -778,7 +773,7 @@ func batchSizeFromMsg(msg []byte) int {
// processNextMsgReq will process a request for the next message available. A nil message payload means deliver
// a single message. If the payload is a number parseable with Atoi(), then we will send a batch of messages without
// requiring another request to this endpoint, or an ACK.
func (o *Observable) processNextMsgReq(_ *subscription, _ *client, _, reply string, msg []byte) {
func (o *Consumer) processNextMsgReq(_ *subscription, _ *client, _, reply string, msg []byte) {
// Check payload here to see if they sent in batch size.
batchSize := batchSizeFromMsg(msg)
@@ -804,7 +799,7 @@ func (o *Observable) processNextMsgReq(_ *subscription, _ *client, _, reply stri
// Increase the delivery count for this message.
// ONLY used on redelivery semantics.
// Lock should be held.
func (o *Observable) incDeliveryCount(sseq uint64) uint64 {
func (o *Consumer) incDeliveryCount(sseq uint64) uint64 {
if o.rdc == nil {
o.rdc = make(map[uint64]uint64)
}
@@ -815,9 +810,9 @@ func (o *Observable) incDeliveryCount(sseq uint64) uint64 {
// Get next available message from underlying store.
// Is partition aware and redeliver aware.
// Lock should be held.
func (o *Observable) getNextMsg() (string, []byte, uint64, uint64, error) {
func (o *Consumer) getNextMsg() (string, []byte, uint64, uint64, error) {
if o.mset == nil {
return _EMPTY_, nil, 0, 0, fmt.Errorf("observable not valid")
return _EMPTY_, nil, 0, 0, fmt.Errorf("consumer not valid")
}
for {
seq, dcount := o.sseq, uint64(1)
@@ -853,14 +848,14 @@ func (o *Observable) getNextMsg() (string, []byte, uint64, uint64, error) {
}
// Returns if we should be doing a non-instant replay of stored messages.
func (o *Observable) needReplay() bool {
func (o *Consumer) needReplay() bool {
o.mu.Lock()
doReplay := o.replay
o.mu.Unlock()
return doReplay
}
func (o *Observable) clearReplayState() {
func (o *Consumer) clearReplayState() {
o.mu.Lock()
o.replay = false
o.mu.Unlock()
@@ -868,7 +863,7 @@ func (o *Observable) clearReplayState() {
// Wait for pull requests.
// FIXME(dlc) - for short wait periods is ok but should signal when waiting comes in.
func (o *Observable) waitForPullRequests(wait time.Duration) {
func (o *Consumer) waitForPullRequests(wait time.Duration) {
o.mu.Lock()
qch := o.qch
if qch == nil || !o.isPullMode() || len(o.waiting) > 0 {
@@ -883,7 +878,7 @@ func (o *Observable) waitForPullRequests(wait time.Duration) {
}
// This function is responsible for message replay that is not instant/firehose.
func (o *Observable) processReplay() error {
func (o *Consumer) processReplay() error {
defer o.clearReplayState()
o.mu.Lock()
@@ -893,11 +888,11 @@ func (o *Observable) processReplay() error {
o.mu.Unlock()
if mset == nil {
return fmt.Errorf("observable not valid")
return fmt.Errorf("consumer not valid")
}
// Grab last queued up for us before we start.
lseq := mset.Stats().LastSeq
lseq := mset.State().LastSeq
var lts int64 // last time stamp seen.
// If we are in pull mode, wait up to the waittime to have
@@ -914,7 +909,7 @@ func (o *Observable) processReplay() error {
mset = o.mset
if mset == nil {
o.mu.Unlock()
return fmt.Errorf("observable not valid")
return fmt.Errorf("consumer not valid")
}
// If push mode but we have no interest wait for it to show up.
@@ -937,7 +932,7 @@ func (o *Observable) processReplay() error {
o.mu.Unlock()
select {
case <-qch:
return fmt.Errorf("observable not valid")
return fmt.Errorf("consumer not valid")
case <-time.After(delay):
}
o.mu.Lock()
@@ -974,7 +969,7 @@ func (o *Observable) processReplay() error {
return nil
}
func (o *Observable) loopAndDeliverMsgs(s *Server, a *Account) {
func (o *Consumer) loopAndDeliverMsgs(s *Server, a *Account) {
// On startup check to see if we are in a a reply situtation where replay policy is not instant.
// Process the replay, return on error.
if o.needReplay() && o.processReplay() != nil {
@@ -984,7 +979,7 @@ func (o *Observable) loopAndDeliverMsgs(s *Server, a *Account) {
// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
for {
var (
mset *MsgSet
mset *Stream
seq, dcnt uint64
subj, dsubj string
msg []byte
@@ -992,7 +987,7 @@ func (o *Observable) loopAndDeliverMsgs(s *Server, a *Account) {
)
o.mu.Lock()
// observable is closed when mset is set to nil.
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
@@ -1040,13 +1035,13 @@ func (o *Observable) loopAndDeliverMsgs(s *Server, a *Account) {
}
}
func (o *Observable) ackReply(sseq, dseq, dcount uint64) string {
func (o *Consumer) ackReply(sseq, dseq, dcount uint64) string {
return fmt.Sprintf(o.ackReplyT, dcount, sseq, dseq)
}
// deliverCurrentMsg is the hot path to deliver a message that was just received.
// Will return if the message was delivered or not.
func (o *Observable) deliverCurrentMsg(subj string, msg []byte, seq uint64) bool {
func (o *Consumer) deliverCurrentMsg(subj string, msg []byte, seq uint64) bool {
o.mu.Lock()
if seq != o.sseq {
o.mu.Unlock()
@@ -1094,7 +1089,7 @@ func (o *Observable) deliverCurrentMsg(subj string, msg []byte, seq uint64) bool
// Deliver a msg to the observable.
// Lock should be held and o.mset validated to be non-nil.
func (o *Observable) deliverMsg(dsubj, subj string, msg []byte, seq, dcount uint64) {
func (o *Consumer) deliverMsg(dsubj, subj string, msg []byte, seq, dcount uint64) {
o.mset.sendq <- &jsPubMsg{dsubj, subj, o.ackReply(seq, o.dseq, dcount), msg, o, seq}
if o.config.AckPolicy == AckNone {
o.adflr = o.dseq
@@ -1108,7 +1103,7 @@ func (o *Observable) deliverMsg(dsubj, subj string, msg []byte, seq, dcount uint
// Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
// Lock should be held.
func (o *Observable) trackPending(seq uint64) {
func (o *Consumer) trackPending(seq uint64) {
if o.pending == nil {
o.pending = make(map[uint64]int64)
}
@@ -1118,9 +1113,9 @@ func (o *Observable) trackPending(seq uint64) {
o.pending[seq] = time.Now().UnixNano()
}
// didNotDeliver is called when a delivery for an observable message failed.
// didNotDeliver is called when a delivery for a consumer message failed.
// Depending on our state, we will process the failure.
func (o *Observable) didNotDeliver(seq uint64) {
func (o *Consumer) didNotDeliver(seq uint64) {
o.mu.Lock()
mset := o.mset
if mset == nil {
@@ -1144,14 +1139,14 @@ func (o *Observable) didNotDeliver(seq uint64) {
}
o.mu.Unlock()
if shouldSignal {
mset.signalObservers()
mset.signalConsumers()
}
}
// This checks if we already have this sequence queued for redelivery.
// FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
// Lock should be held.
func (o *Observable) onRedeliverQueue(seq uint64) bool {
func (o *Consumer) onRedeliverQueue(seq uint64) bool {
for _, rseq := range o.rdq {
if rseq == seq {
return true
@@ -1161,7 +1156,7 @@ func (o *Observable) onRedeliverQueue(seq uint64) bool {
}
// Checks the pending messages.
func (o *Observable) checkPending() {
func (o *Consumer) checkPending() {
now := time.Now().UnixNano()
shouldSignal := false
@@ -1198,23 +1193,23 @@ func (o *Observable) checkPending() {
o.mu.Unlock()
if shouldSignal {
mset.signalObservers()
mset.signalConsumers()
}
}
// SeqFromReply will extract a sequence number from a reply subject.
func (o *Observable) SeqFromReply(reply string) uint64 {
func (o *Consumer) SeqFromReply(reply string) uint64 {
_, seq, _ := o.ReplyInfo(reply)
return seq
}
// SetSeqFromReply will extract the message set sequence from the reply subject.
func (o *Observable) SetSeqFromReply(reply string) uint64 {
// StreamSeqFromReply will extract the stream sequence from the reply subject.
func (o *Consumer) StreamSeqFromReply(reply string) uint64 {
seq, _, _ := o.ReplyInfo(reply)
return seq
}
func (o *Observable) ReplyInfo(reply string) (sseq, dseq, dcount uint64) {
func (o *Consumer) ReplyInfo(reply string) (sseq, dseq, dcount uint64) {
n, err := fmt.Sscanf(reply, o.ackReplyT, &dcount, &sseq, &dseq)
if err != nil || n != 3 {
return 0, 0, 0
@@ -1223,7 +1218,7 @@ func (o *Observable) ReplyInfo(reply string) (sseq, dseq, dcount uint64) {
}
// NextSeq returns the next delivered sequence number for this observable.
func (o *Observable) NextSeq() uint64 {
func (o *Consumer) NextSeq() uint64 {
o.mu.Lock()
dseq := o.dseq
o.mu.Unlock()
@@ -1232,8 +1227,8 @@ func (o *Observable) NextSeq() uint64 {
// This will select the store seq to start with based on the
// partition subject.
func (o *Observable) selectSubjectLast() {
stats := o.mset.store.Stats()
func (o *Consumer) selectSubjectLast() {
stats := o.mset.store.State()
// FIXME(dlc) - this is linear and can be optimized by store layer.
for seq := stats.LastSeq; seq >= stats.FirstSeq; seq-- {
subj, _, _, err := o.mset.store.LoadMsg(seq)
@@ -1248,10 +1243,10 @@ func (o *Observable) selectSubjectLast() {
}
// Will select the starting sequence.
func (o *Observable) selectStartingSeqNo() {
stats := o.mset.store.Stats()
func (o *Consumer) selectStartingSeqNo() {
stats := o.mset.store.State()
noTime := time.Time{}
if o.config.MsgSetSeq == 0 {
if o.config.StreamSeq == 0 {
if o.config.DeliverAll {
o.sseq = stats.FirstSeq
} else if o.config.DeliverLast {
@@ -1269,7 +1264,7 @@ func (o *Observable) selectStartingSeqNo() {
o.sseq = stats.LastSeq + 1
}
} else {
o.sseq = o.config.MsgSetSeq
o.sseq = o.config.StreamSeq
}
if stats.FirstSeq == 0 {
@@ -1288,25 +1283,25 @@ func (o *Observable) selectStartingSeqNo() {
}
// Test whether a config represents a durable subscriber.
func isDurableObservable(config *ObservableConfig) bool {
func isDurableConsumer(config *ConsumerConfig) bool {
return config != nil && config.Durable != _EMPTY_
}
func (o *Observable) isDurable() bool {
func (o *Consumer) isDurable() bool {
return o.config.Durable != _EMPTY_
}
// Are we in push mode, delivery subject, etc.
func (o *Observable) isPushMode() bool {
func (o *Consumer) isPushMode() bool {
return o.config.Delivery != _EMPTY_
}
func (o *Observable) isPullMode() bool {
func (o *Consumer) isPullMode() bool {
return o.config.Delivery == _EMPTY_
}
// Name returns the name of this observable.
func (o *Observable) Name() string {
func (o *Consumer) Name() string {
o.mu.Lock()
n := o.name
o.mu.Unlock()
@@ -1314,31 +1309,31 @@ func (o *Observable) Name() string {
}
// For now size of 6 for randomly created names.
const randObservableNameLen = 6
const randConsumerNameLen = 6
func createObservableName() string {
func createConsumerName() string {
var b [64]byte
rand.Read(b[:])
sha := sha256.New()
sha.Write(b[:])
return fmt.Sprintf("%x", sha.Sum(nil))[:randObservableNameLen]
return fmt.Sprintf("%x", sha.Sum(nil))[:randConsumerNameLen]
}
// DeleteObservable will delete the observable from this message set.
func (mset *MsgSet) DeleteObservable(o *Observable) error {
// DeleteConsumer will delete the consumer from this stream.
func (mset *Stream) DeleteConsumer(o *Consumer) error {
return o.Delete()
}
// Active indicates if this observable is still active.
func (o *Observable) Active() bool {
// Active indicates if this consumer is still active.
func (o *Consumer) Active() bool {
o.mu.Lock()
active := o.active && o.mset != nil
o.mu.Unlock()
return active
}
// This is when the underlying message set has been purged.
func (o *Observable) purge(sseq uint64) {
// This is when the underlying stream has been purged.
func (o *Consumer) purge(sseq uint64) {
o.mu.Lock()
o.sseq = sseq
o.asflr = sseq - 1
@@ -1375,17 +1370,17 @@ func stopAndClearTimer(tp **time.Timer) {
*tp = nil
}
// Stop will shutdown the observable for the associated message set.
func (o *Observable) Stop() error {
// Stop will shutdown the consumer for the associated stream.
func (o *Consumer) Stop() error {
return o.stop(false)
}
// Delete will delete the observable for the associated message set.
func (o *Observable) Delete() error {
// Delete will delete the consumer for the associated stream.
func (o *Consumer) Delete() error {
return o.stop(true)
}
func (o *Observable) stop(dflag bool) error {
func (o *Consumer) stop(dflag bool) error {
o.mu.Lock()
mset := o.mset
if mset == nil {
@@ -1419,7 +1414,7 @@ func (o *Observable) stop(dflag bool) error {
mset.sg.Broadcast()
mset.unsubscribe(ackSub)
mset.unsubscribe(reqSub)
delete(mset.obs, o.name)
delete(mset.consumers, o.name)
mset.mu.Unlock()
var err error
@@ -1435,7 +1430,7 @@ func (o *Observable) stop(dflag bool) error {
// Check that we do not form a cycle by delivering to a delivery subject
// that is part of the interest group.
func (mset *MsgSet) deliveryFormsCycle(deliverySubject string) bool {
func (mset *Stream) deliveryFormsCycle(deliverySubject string) bool {
mset.mu.Lock()
defer mset.mu.Unlock()
@@ -1448,21 +1443,21 @@ func (mset *MsgSet) deliveryFormsCycle(deliverySubject string) bool {
}
// This is same as check for delivery cycle.
func (mset *MsgSet) validSubject(partitionSubject string) bool {
func (mset *Stream) validSubject(partitionSubject string) bool {
return mset.deliveryFormsCycle(partitionSubject)
}
// SetInActiveDeleteThreshold sets the delete threshold for how long to wait
// before deleting an inactive ephemeral observable.
func (o *Observable) SetInActiveDeleteThreshold(dthresh time.Duration) error {
func (o *Consumer) SetInActiveDeleteThreshold(dthresh time.Duration) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.isPullMode() {
return fmt.Errorf("observable is not push-based")
return fmt.Errorf("consumer is not push-based")
}
if o.isDurable() {
return fmt.Errorf("observable is not durable")
return fmt.Errorf("consumer is not durable")
}
deleteWasRunning := o.dtmr != nil
stopAndClearTimer(&o.dtmr)
@@ -1475,6 +1470,6 @@ func (o *Observable) SetInActiveDeleteThreshold(dthresh time.Duration) error {
// RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
// Returns empty otherwise.
func (o *Observable) RequestNextMsgSubject() string {
func (o *Consumer) RequestNextMsgSubject() string {
return o.nextMsgSubj
}

View File

@@ -47,11 +47,11 @@ type FileStoreConfig struct {
type fileStore struct {
mu sync.RWMutex
stats MsgSetStats
state StreamState
scb func(int64)
ageChk *time.Timer
syncTmr *time.Timer
cfg MsgSetConfig
cfg StreamConfig
fcfg FileStoreConfig
blks []*msgBlock
lmb *msgBlock
@@ -60,7 +60,7 @@ type fileStore struct {
fch chan struct{}
qch chan struct{}
bad []uint64
obs []*observableFileStore
cfs []*consumerFileStore
closed bool
}
@@ -115,10 +115,10 @@ const (
blkScan = "%d.blk"
// used to scan index file names.
indexScan = "%d.idx"
// This is where we keep state on observers.
obsDir = "obs"
// Index file for observable
obsState = "o.dat"
// This is where we keep state on consumers.
consumerDir = "obs"
// Index file for a consumer.
consumerState = "o.dat"
// This is where we keep state on templates.
tmplsDir = "templates"
// Maximum size of a write buffer we may consider for re-use.
@@ -138,12 +138,12 @@ const (
// coalesceMaximum
coalesceMaximum = 64 * 1024
// Metafiles for message sets and observables.
// Metafiles for streams and consumers.
JetStreamMetaFile = "meta.inf"
JetStreamMetaFileSum = "meta.sum"
)
func newFileStore(fcfg FileStoreConfig, cfg MsgSetConfig) (*fileStore, error) {
func newFileStore(fcfg FileStoreConfig, cfg StreamConfig) (*fileStore, error) {
if cfg.Name == "" {
return nil, fmt.Errorf("name required")
}
@@ -188,7 +188,7 @@ func newFileStore(fcfg FileStoreConfig, cfg MsgSetConfig) (*fileStore, error) {
// Check if this is a new setup.
mdir := path.Join(fcfg.StoreDir, msgDir)
odir := path.Join(fcfg.StoreDir, obsDir)
odir := path.Join(fcfg.StoreDir, consumerDir)
if err := os.MkdirAll(mdir, 0755); err != nil {
return nil, fmt.Errorf("could not create message storage directory - %v", err)
}
@@ -220,7 +220,7 @@ func newFileStore(fcfg FileStoreConfig, cfg MsgSetConfig) (*fileStore, error) {
}
func dynBlkSize(retention RetentionPolicy, maxBytes int64) uint64 {
if retention == StreamPolicy {
if retention == LimitsPolicy {
// TODO(dlc) - Make the blocksize relative to this if set.
return defaultStreamBlockSize
} else {
@@ -252,22 +252,22 @@ func (fs *fileStore) writeMsgSetMeta() error {
return nil
}
func (obs *observableFileStore) writeObservableMeta() error {
meta := path.Join(obs.odir, JetStreamMetaFile)
func (cfs *consumerFileStore) writeConsumerMeta() error {
meta := path.Join(cfs.odir, JetStreamMetaFile)
if _, err := os.Stat(meta); (err != nil && !os.IsNotExist(err)) || err == nil {
return err
}
b, err := json.MarshalIndent(obs.cfg, _EMPTY_, " ")
b, err := json.MarshalIndent(cfs.cfg, _EMPTY_, " ")
if err != nil {
return err
}
if err := ioutil.WriteFile(meta, b, 0644); err != nil {
return err
}
obs.hh.Reset()
obs.hh.Write(b)
checksum := hex.EncodeToString(obs.hh.Sum(nil))
sum := path.Join(obs.odir, JetStreamMetaFileSum)
cfs.hh.Reset()
cfs.hh.Write(b)
checksum := hex.EncodeToString(cfs.hh.Sum(nil))
sum := path.Join(cfs.odir, JetStreamMetaFileSum)
if err := ioutil.WriteFile(sum, []byte(checksum), 0644); err != nil {
return err
}
@@ -358,14 +358,14 @@ func (fs *fileStore) recoverMsgs() error {
var index uint64
if n, err := fmt.Sscanf(fi.Name(), blkScan, &index); err == nil && n == 1 {
if mb := fs.recoverMsgBlock(fi, index); mb != nil {
if fs.stats.FirstSeq == 0 {
fs.stats.FirstSeq = mb.first.seq
if fs.state.FirstSeq == 0 {
fs.state.FirstSeq = mb.first.seq
}
if mb.last.seq > fs.stats.LastSeq {
fs.stats.LastSeq = mb.last.seq
if mb.last.seq > fs.state.LastSeq {
fs.state.LastSeq = mb.last.seq
}
fs.stats.Msgs += mb.msgs
fs.stats.Bytes += mb.bytes
fs.state.Msgs += mb.msgs
fs.state.Bytes += mb.bytes
}
}
}
@@ -399,7 +399,7 @@ func (ms *fileStore) GetSeqFromTime(t time.Time) uint64 {
func (fs *fileStore) StorageBytesUpdate(cb func(int64)) {
fs.mu.Lock()
fs.scb = cb
bsz := fs.stats.Bytes
bsz := fs.state.Bytes
fs.mu.Unlock()
if cb != nil && bsz > 0 {
cb(int64(bsz))
@@ -460,10 +460,10 @@ func (fs *fileStore) enableLastMsgBlockForWriting() error {
// Store stores a message.
func (fs *fileStore) StoreMsg(subj string, msg []byte) (uint64, error) {
fs.mu.Lock()
seq := fs.stats.LastSeq + 1
seq := fs.state.LastSeq + 1
if fs.stats.FirstSeq == 0 {
fs.stats.FirstSeq = seq
if fs.state.FirstSeq == 0 {
fs.state.FirstSeq = seq
}
n, err := fs.writeMsgRecord(seq, subj, msg)
@@ -473,9 +473,9 @@ func (fs *fileStore) StoreMsg(subj string, msg []byte) (uint64, error) {
}
fs.kickFlusher()
fs.stats.Msgs++
fs.stats.Bytes += n
fs.stats.LastSeq = seq
fs.state.Msgs++
fs.state.Bytes += n
fs.state.LastSeq = seq
// Limits checks and enforcement.
// If they do any deletions they will update the
@@ -501,7 +501,7 @@ func (fs *fileStore) StoreMsg(subj string, msg []byte) (uint64, error) {
// Will check the msg limit and drop firstSeq msg if needed.
// Lock should be held.
func (fs *fileStore) enforceMsgLimit() {
if fs.cfg.MaxMsgs <= 0 || fs.stats.Msgs <= uint64(fs.cfg.MaxMsgs) {
if fs.cfg.MaxMsgs <= 0 || fs.state.Msgs <= uint64(fs.cfg.MaxMsgs) {
return
}
fs.deleteFirstMsg()
@@ -510,16 +510,16 @@ func (fs *fileStore) enforceMsgLimit() {
// Will check the bytes limit and drop msgs if needed.
// Lock should be held.
func (fs *fileStore) enforceBytesLimit() {
if fs.cfg.MaxBytes <= 0 || fs.stats.Bytes <= uint64(fs.cfg.MaxBytes) {
if fs.cfg.MaxBytes <= 0 || fs.state.Bytes <= uint64(fs.cfg.MaxBytes) {
return
}
for bs := fs.stats.Bytes; bs > uint64(fs.cfg.MaxBytes); bs = fs.stats.Bytes {
for bs := fs.state.Bytes; bs > uint64(fs.cfg.MaxBytes); bs = fs.state.Bytes {
fs.deleteFirstMsg()
}
}
func (fs *fileStore) deleteFirstMsg() bool {
return fs.removeMsg(fs.stats.FirstSeq, false)
return fs.removeMsg(fs.state.FirstSeq, false)
}
// RemoveMsg will remove the message from this store.
@@ -597,8 +597,8 @@ func (mb *msgBlock) selectNextFirst() {
func (fs *fileStore) deleteMsgFromBlock(mb *msgBlock, seq uint64, sm *fileStoredMsg, secure bool) {
// Update global accounting.
msz := fileStoreMsgSize(sm.subj, sm.msg)
fs.stats.Msgs--
fs.stats.Bytes -= msz
fs.state.Msgs--
fs.state.Bytes -= msz
// Now local updates.
mb.msgs--
@@ -615,8 +615,8 @@ func (fs *fileStore) deleteMsgFromBlock(mb *msgBlock, seq uint64, sm *fileStored
// Optimize for FIFO case.
if seq == mb.first.seq {
mb.selectNextFirst()
if seq == fs.stats.FirstSeq {
fs.stats.FirstSeq = mb.first.seq
if seq == fs.state.FirstSeq {
fs.state.FirstSeq = mb.first.seq
}
if mb.first.seq > mb.last.seq {
fs.removeMsgBlock(mb)
@@ -933,13 +933,13 @@ func (fs *fileStore) syncBlocks() {
mb.ifd.Truncate(mb.liwsz)
}
}
var _obs [256]*observableFileStore
obs := append(_obs[:0], fs.obs...)
var _cfs [256]*consumerFileStore
cfs := append(_cfs[:0], fs.cfs...)
fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks)
fs.mu.Unlock()
// Do observables.
for _, o := range obs {
// Do consumers.
for _, o := range cfs {
o.syncStateFile()
}
}
@@ -948,7 +948,7 @@ func (fs *fileStore) syncBlocks() {
// Return nil if not in the set.
// Read lock should be held.
func (fs *fileStore) selectMsgBlock(seq uint64) *msgBlock {
if seq < fs.stats.FirstSeq || seq > fs.stats.LastSeq {
if seq < fs.state.FirstSeq || seq > fs.state.LastSeq {
return nil
}
for _, mb := range fs.blks {
@@ -1074,11 +1074,11 @@ func (fs *fileStore) msgForSeq(seq uint64) (*fileStoredMsg, error) {
fs.mu.Lock()
// seq == 0 indicates we want first msg.
if seq == 0 {
seq = fs.stats.FirstSeq
seq = fs.state.FirstSeq
}
mb := fs.selectMsgBlock(seq)
if mb == nil {
if seq <= fs.stats.LastSeq {
if seq <= fs.state.LastSeq {
err = ErrStoreMsgNotFound
}
fs.mu.Unlock()
@@ -1101,7 +1101,7 @@ func (fs *fileStore) msgForSeq(seq uint64) (*fileStoredMsg, error) {
sm := fs.readAndCacheMsgs(mb, seq)
if sm != nil {
mb.cgenid++
} else if seq <= fs.stats.LastSeq {
} else if seq <= fs.state.LastSeq {
err = ErrStoreMsgNotFound
}
fs.mu.Unlock()
@@ -1136,12 +1136,12 @@ func (fs *fileStore) LoadMsg(seq uint64) (string, []byte, int64, error) {
return "", nil, 0, err
}
func (fs *fileStore) Stats() MsgSetStats {
func (fs *fileStore) State() StreamState {
fs.mu.RLock()
defer fs.mu.RUnlock()
stats := fs.stats
stats.Observables = len(fs.obs)
return stats
state := fs.state
state.Consumers = len(fs.cfs)
return state
}
func fileStoreMsgSize(subj string, msg []byte) uint64 {
@@ -1345,12 +1345,12 @@ func (fs *fileStore) dmapEntries() int {
func (fs *fileStore) Purge() uint64 {
fs.mu.Lock()
fs.flushPendingWrites()
purged := fs.stats.Msgs
purged := fs.state.Msgs
cb := fs.scb
bytes := int64(fs.stats.Bytes)
fs.stats.FirstSeq = fs.stats.LastSeq + 1
fs.stats.Bytes = 0
fs.stats.Msgs = 0
bytes := int64(fs.state.Bytes)
fs.state.FirstSeq = fs.state.LastSeq + 1
fs.state.Bytes = 0
fs.state.Msgs = 0
blks := fs.blks
lmb := fs.lmb
@@ -1483,25 +1483,25 @@ func (fs *fileStore) Stop() error {
fs.ageChk = nil
}
var _obs [256]*observableFileStore
obs := append(_obs[:0], fs.obs...)
fs.obs = nil
var _cfs [256]*consumerFileStore
cfs := append(_cfs[:0], fs.cfs...)
fs.cfs = nil
fs.mu.Unlock()
for _, o := range obs {
for _, o := range cfs {
o.Stop()
}
return err
}
////////////////////////////////////////////////////////////////////////////////
// Observable state
// Consumers
////////////////////////////////////////////////////////////////////////////////
type observableFileStore struct {
type consumerFileStore struct {
mu sync.Mutex
fs *fileStore
cfg *ObservableConfig
cfg *ConsumerConfig
name string
odir string
ifn string
@@ -1513,23 +1513,23 @@ type observableFileStore struct {
closed bool
}
func (fs *fileStore) ObservableStore(name string, cfg *ObservableConfig) (ObservableStore, error) {
func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) {
if fs == nil {
return nil, fmt.Errorf("filestore is nil")
}
if cfg == nil || name == "" {
return nil, fmt.Errorf("bad observable config")
return nil, fmt.Errorf("bad consumer config")
}
odir := path.Join(fs.fcfg.StoreDir, obsDir, name)
odir := path.Join(fs.fcfg.StoreDir, consumerDir, name)
if err := os.MkdirAll(odir, 0755); err != nil {
return nil, fmt.Errorf("could not create observable directory - %v", err)
return nil, fmt.Errorf("could not create consumer directory - %v", err)
}
o := &observableFileStore{
o := &consumerFileStore{
fs: fs,
cfg: cfg,
name: name,
odir: odir,
ifn: path.Join(odir, obsState),
ifn: path.Join(odir, consumerState),
fch: make(chan struct{}),
qch: make(chan struct{}),
}
@@ -1540,12 +1540,12 @@ func (fs *fileStore) ObservableStore(name string, cfg *ObservableConfig) (Observ
}
o.hh = hh
if err := o.writeObservableMeta(); err != nil {
if err := o.writeConsumerMeta(); err != nil {
return nil, err
}
fs.mu.Lock()
fs.obs = append(fs.obs, o)
fs.cfs = append(fs.cfs, o)
fs.mu.Unlock()
return o, nil
@@ -1553,16 +1553,16 @@ func (fs *fileStore) ObservableStore(name string, cfg *ObservableConfig) (Observ
const seqsHdrSize = 6*binary.MaxVarintLen64 + hdrLen
func (o *observableFileStore) Update(state *ObservableState) error {
func (o *consumerFileStore) Update(state *ConsumerState) error {
// Sanity checks.
if state.Delivered.ObsSeq < 1 || state.Delivered.SetSeq < 1 {
if state.Delivered.ConsumerSeq < 1 || state.Delivered.StreamSeq < 1 {
return fmt.Errorf("bad delivered sequences")
}
if state.AckFloor.ObsSeq > state.Delivered.ObsSeq {
return fmt.Errorf("bad ack floor for observable")
if state.AckFloor.ConsumerSeq > state.Delivered.ConsumerSeq {
return fmt.Errorf("bad ack floor for consumer")
}
if state.AckFloor.SetSeq > state.Delivered.SetSeq {
return fmt.Errorf("bad ack floor for set")
if state.AckFloor.StreamSeq > state.Delivered.StreamSeq {
return fmt.Errorf("bad ack floor for stream")
}
var hdr [seqsHdrSize]byte
@@ -1572,10 +1572,10 @@ func (o *observableFileStore) Update(state *ObservableState) error {
hdr[1] = version
n := hdrLen
n += binary.PutUvarint(hdr[n:], state.AckFloor.ObsSeq)
n += binary.PutUvarint(hdr[n:], state.AckFloor.SetSeq)
n += binary.PutUvarint(hdr[n:], state.Delivered.ObsSeq-state.AckFloor.ObsSeq)
n += binary.PutUvarint(hdr[n:], state.Delivered.SetSeq-state.AckFloor.SetSeq)
n += binary.PutUvarint(hdr[n:], state.AckFloor.ConsumerSeq)
n += binary.PutUvarint(hdr[n:], state.AckFloor.StreamSeq)
n += binary.PutUvarint(hdr[n:], state.Delivered.ConsumerSeq-state.AckFloor.ConsumerSeq)
n += binary.PutUvarint(hdr[n:], state.Delivered.StreamSeq-state.AckFloor.StreamSeq)
n += binary.PutUvarint(hdr[n:], uint64(len(state.Pending)))
buf := hdr[:n]
@@ -1584,8 +1584,8 @@ func (o *observableFileStore) Update(state *ObservableState) error {
if len(state.Pending) > 0 {
mbuf := make([]byte, len(state.Pending)*(2*binary.MaxVarintLen64)+binary.MaxVarintLen64)
aflr := state.AckFloor.SetSeq
maxd := state.Delivered.SetSeq
aflr := state.AckFloor.StreamSeq
maxd := state.Delivered.StreamSeq
// To save space we select the smallest timestamp.
var mints int64
@@ -1614,16 +1614,16 @@ func (o *observableFileStore) Update(state *ObservableState) error {
}
var lenbuf [binary.MaxVarintLen64]byte
n = binary.PutUvarint(lenbuf[0:], uint64(len(state.Redelivery)))
n = binary.PutUvarint(lenbuf[0:], uint64(len(state.Redelivered)))
buf = append(buf, lenbuf[:n]...)
// We expect these to be small so will not do anything too crazy here to
// keep the size small. Trick could be to offset sequence like above, but
// we would need to know low sequence number for redelivery, can't depend on ackfloor etc.
if len(state.Redelivery) > 0 {
mbuf := make([]byte, len(state.Redelivery)*(2*binary.MaxVarintLen64))
if len(state.Redelivered) > 0 {
mbuf := make([]byte, len(state.Redelivered)*(2*binary.MaxVarintLen64))
var n int
for k, v := range state.Redelivery {
for k, v := range state.Redelivered {
n += binary.PutUvarint(mbuf[n:], k)
n += binary.PutUvarint(mbuf[n:], v)
}
@@ -1643,7 +1643,7 @@ func (o *observableFileStore) Update(state *ObservableState) error {
return err
}
func (o *observableFileStore) syncStateFile() {
func (o *consumerFileStore) syncStateFile() {
// FIXME(dlc) - Hold last error?
o.mu.Lock()
if o.ifd != nil {
@@ -1654,7 +1654,7 @@ func (o *observableFileStore) syncStateFile() {
}
// Lock should be held.
func (o *observableFileStore) ensureStateFileOpen() error {
func (o *consumerFileStore) ensureStateFileOpen() error {
if o.ifd == nil {
ifd, err := os.OpenFile(o.ifn, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
@@ -1674,7 +1674,7 @@ func checkHeader(hdr []byte) error {
// State retrieves the state from the state file.
// This is not expected to be called in high performance code, only on startup.
func (o *observableFileStore) State() (*ObservableState, error) {
func (o *consumerFileStore) State() (*ConsumerState, error) {
o.mu.Lock()
defer o.mu.Unlock()
@@ -1683,7 +1683,7 @@ func (o *observableFileStore) State() (*ObservableState, error) {
return nil, err
}
var state *ObservableState
var state *ConsumerState
if len(buf) == 0 {
return state, nil
@@ -1724,18 +1724,18 @@ func (o *observableFileStore) State() (*ObservableState, error) {
readLen := readSeq
readCount := readSeq
state = &ObservableState{}
state.AckFloor.ObsSeq = readSeq()
state.AckFloor.SetSeq = readSeq()
state.Delivered.ObsSeq = readSeq()
state.Delivered.SetSeq = readSeq()
state = &ConsumerState{}
state.AckFloor.ConsumerSeq = readSeq()
state.AckFloor.StreamSeq = readSeq()
state.Delivered.ConsumerSeq = readSeq()
state.Delivered.StreamSeq = readSeq()
if bi == -1 {
return nil, fmt.Errorf("corrupt state file")
}
// Adjust back.
state.Delivered.ObsSeq += state.AckFloor.ObsSeq
state.Delivered.SetSeq += state.AckFloor.SetSeq
state.Delivered.ConsumerSeq += state.AckFloor.ConsumerSeq
state.Delivered.StreamSeq += state.AckFloor.StreamSeq
numPending := readLen()
@@ -1750,7 +1750,7 @@ func (o *observableFileStore) State() (*ObservableState, error) {
return nil, fmt.Errorf("corrupt state file")
}
// Adjust seq back.
seq += state.AckFloor.SetSeq
seq += state.AckFloor.StreamSeq
// Adjust the timestamp back.
ts = (ts + mints) * int64(time.Second)
// Store in pending.
@@ -1760,23 +1760,23 @@ func (o *observableFileStore) State() (*ObservableState, error) {
numRedelivered := readLen()
// We have redelivery entries here.
// We have redelivered entries here.
if numRedelivered > 0 {
state.Redelivery = make(map[uint64]uint64, numRedelivered)
state.Redelivered = make(map[uint64]uint64, numRedelivered)
for i := 0; i < int(numRedelivered); i++ {
seq := readSeq()
n := readCount()
if seq == 0 || n == 0 {
return nil, fmt.Errorf("corrupt state file")
}
state.Redelivery[seq] = n
state.Redelivered[seq] = n
}
}
return state, nil
}
// Stop the processing of the observable's state.
func (o *observableFileStore) Stop() error {
// Stop the processing of the consumers's state.
func (o *consumerFileStore) Stop() error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
@@ -1790,12 +1790,12 @@ func (o *observableFileStore) Stop() error {
}
fs := o.fs
o.mu.Unlock()
fs.removeObs(o)
fs.removeConsumer(o)
return nil
}
// Delete the observable.
func (o *observableFileStore) Delete() error {
// Delete the consumer.
func (o *consumerFileStore) Delete() error {
// Call stop first. OK if already stopped.
o.Stop()
o.mu.Lock()
@@ -1807,11 +1807,11 @@ func (o *observableFileStore) Delete() error {
return err
}
func (fs *fileStore) removeObs(obs *observableFileStore) {
func (fs *fileStore) removeConsumer(cfs *consumerFileStore) {
fs.mu.Lock()
for i, o := range fs.obs {
if o == obs {
fs.obs = append(fs.obs[:i], fs.obs[i+1:]...)
for i, o := range fs.cfs {
if o == cfs {
fs.cfs = append(fs.cfs[:i], fs.cfs[i+1:]...)
break
}
}

View File

@@ -31,7 +31,7 @@ import (
func TestFileStoreBasics(t *testing.T) {
storeDir, _ := ioutil.TempDir("", JetStreamStoreDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -45,13 +45,13 @@ func TestFileStoreBasics(t *testing.T) {
t.Fatalf("Expected sequence to be %d, got %d", i, seq)
}
}
stats := fs.Stats()
if stats.Msgs != 5 {
t.Fatalf("Expected 5 msgs, got %d", stats.Msgs)
state := fs.State()
if state.Msgs != 5 {
t.Fatalf("Expected 5 msgs, got %d", state.Msgs)
}
expectedSize := 5 * fileStoreMsgSize(subj, msg)
if stats.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, stats.Bytes)
if state.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes)
}
nsubj, nmsg, _, err := fs.LoadMsg(2)
if err != nil {
@@ -73,10 +73,10 @@ func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) {
storeDir := filepath.Join("", JetStreamStoreDir)
fcfg := FileStoreConfig{StoreDir: storeDir}
if _, err := newFileStore(fcfg, MsgSetConfig{Storage: MemoryStorage}); err == nil {
if _, err := newFileStore(fcfg, StreamConfig{Storage: MemoryStorage}); err == nil {
t.Fatalf("Expected an error with wrong type")
}
if _, err := newFileStore(fcfg, MsgSetConfig{Storage: FileStorage}); err == nil {
if _, err := newFileStore(fcfg, StreamConfig{Storage: FileStorage}); err == nil {
t.Fatalf("Expected an error with no name")
}
@@ -84,7 +84,7 @@ func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(fcfg, MsgSetConfig{Name: "dlc", Storage: FileStorage})
fs, err := newFileStore(fcfg, StreamConfig{Name: "dlc", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -102,32 +102,32 @@ func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) {
t.Fatalf("Expected sequence to be %d, got %d", i, seq)
}
}
stats := fs.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
msg22 := []byte(fmt.Sprintf("[%08d] Hello World!", 22))
expectedSize := toStore * fileStoreMsgSize(subj, msg22)
if stats.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, stats.Bytes)
if state.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes)
}
// Stop will flush to disk.
fs.Stop()
// Restart
fs, err = newFileStore(fcfg, MsgSetConfig{Name: "dlc", Storage: FileStorage})
fs, err = newFileStore(fcfg, StreamConfig{Name: "dlc", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats = fs.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state = fs.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, stats.Bytes)
if state.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes)
}
// Now write 100 more msgs
@@ -139,9 +139,9 @@ func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) {
t.Fatalf("Expected sequence to be %d, got %d", i, seq)
}
}
stats = fs.Stats()
if stats.Msgs != toStore*2 {
t.Fatalf("Expected %d msgs, got %d", toStore*2, stats.Msgs)
state = fs.State()
if state.Msgs != toStore*2 {
t.Fatalf("Expected %d msgs, got %d", toStore*2, state.Msgs)
}
// Now cycle again and make sure that last batch was stored.
@@ -149,18 +149,18 @@ func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) {
fs.Stop()
// Restart
fs, err = newFileStore(fcfg, MsgSetConfig{Name: "dlc", Storage: FileStorage})
fs, err = newFileStore(fcfg, StreamConfig{Name: "dlc", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats = fs.Stats()
if stats.Msgs != toStore*2 {
t.Fatalf("Expected %d msgs, got %d", toStore*2, stats.Msgs)
state = fs.State()
if state.Msgs != toStore*2 {
t.Fatalf("Expected %d msgs, got %d", toStore*2, state.Msgs)
}
if stats.Bytes != expectedSize*2 {
t.Fatalf("Expected %d bytes, got %d", expectedSize*2, stats.Bytes)
if state.Bytes != expectedSize*2 {
t.Fatalf("Expected %d bytes, got %d", expectedSize*2, state.Bytes)
}
}
@@ -169,7 +169,7 @@ func TestFileStoreMsgLimit(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage, MaxMsgs: 10})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage, MaxMsgs: 10})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -179,22 +179,22 @@ func TestFileStoreMsgLimit(t *testing.T) {
for i := 0; i < 10; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, stats.Msgs)
state := fs.State()
if state.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs)
}
if _, err := fs.StoreMsg(subj, msg); err != nil {
t.Fatalf("Error storing msg: %v", err)
}
stats = fs.Stats()
if stats.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, stats.Msgs)
state = fs.State()
if state.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs)
}
if stats.LastSeq != 11 {
t.Fatalf("Expected the last sequence to be 11 now, but got %d", stats.LastSeq)
if state.LastSeq != 11 {
t.Fatalf("Expected the last sequence to be 11 now, but got %d", state.LastSeq)
}
if stats.FirstSeq != 2 {
t.Fatalf("Expected the first sequence to be 2 now, but got %d", stats.FirstSeq)
if state.FirstSeq != 2 {
t.Fatalf("Expected the first sequence to be 2 now, but got %d", state.FirstSeq)
}
// Make sure we can not lookup seq 1.
if _, _, _, err := fs.LoadMsg(1); err == nil {
@@ -213,7 +213,7 @@ func TestFileStoreBytesLimit(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage, MaxBytes: int64(maxBytes)})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage, MaxBytes: int64(maxBytes)})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -222,12 +222,12 @@ func TestFileStoreBytesLimit(t *testing.T) {
for i := uint64(0); i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, stats.Bytes)
if state.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes)
}
// Now send 10 more and check that bytes limit enforced.
@@ -236,18 +236,18 @@ func TestFileStoreBytesLimit(t *testing.T) {
t.Fatalf("Error storing msg: %v", err)
}
}
stats = fs.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state = fs.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, stats.Bytes)
if state.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes)
}
if stats.FirstSeq != 11 {
t.Fatalf("Expected first sequence to be 11, got %d", stats.FirstSeq)
if state.FirstSeq != 11 {
t.Fatalf("Expected first sequence to be 11, got %d", state.FirstSeq)
}
if stats.LastSeq != toStore+10 {
t.Fatalf("Expected last sequence to be %d, got %d", toStore+10, stats.LastSeq)
if state.LastSeq != toStore+10 {
t.Fatalf("Expected last sequence to be %d, got %d", toStore+10, state.LastSeq)
}
}
@@ -258,7 +258,7 @@ func TestFileStoreAgeLimit(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -270,19 +270,19 @@ func TestFileStoreAgeLimit(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
checkExpired := func(t *testing.T) {
t.Helper()
checkFor(t, time.Second, maxAge, func() error {
stats = fs.Stats()
if stats.Msgs != 0 {
return fmt.Errorf("Expected no msgs, got %d", stats.Msgs)
state = fs.State()
if state.Msgs != 0 {
return fmt.Errorf("Expected no msgs, got %d", state.Msgs)
}
if stats.Bytes != 0 {
return fmt.Errorf("Expected no bytes, got %d", stats.Bytes)
if state.Bytes != 0 {
return fmt.Errorf("Expected no bytes, got %d", state.Bytes)
}
return nil
})
@@ -293,9 +293,9 @@ func TestFileStoreAgeLimit(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats = fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state = fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
checkExpired(t)
}
@@ -305,7 +305,7 @@ func TestFileStoreTimeStamps(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -335,7 +335,7 @@ func TestFileStorePurge(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir, BlockSize: 64 * 1024}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir, BlockSize: 64 * 1024}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -348,12 +348,12 @@ func TestFileStorePurge(t *testing.T) {
for i := uint64(0); i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, stats.Bytes)
if state.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes)
}
if numBlocks := fs.numMsgBlocks(); numBlocks <= 1 {
@@ -366,28 +366,28 @@ func TestFileStorePurge(t *testing.T) {
t.Fatalf("Expected to have exactly 1 empty msg block, got %d", numBlocks)
}
checkPurgeStats := func() {
checkPurgeState := func() {
t.Helper()
stats = fs.Stats()
if stats.Msgs != 0 {
t.Fatalf("Expected 0 msgs after purge, got %d", stats.Msgs)
state = fs.State()
if state.Msgs != 0 {
t.Fatalf("Expected 0 msgs after purge, got %d", state.Msgs)
}
if stats.Bytes != 0 {
t.Fatalf("Expected 0 bytes after purge, got %d", stats.Bytes)
if state.Bytes != 0 {
t.Fatalf("Expected 0 bytes after purge, got %d", state.Bytes)
}
if stats.LastSeq != toStore {
t.Fatalf("Expected LastSeq to be %d., got %d", toStore, stats.LastSeq)
if state.LastSeq != toStore {
t.Fatalf("Expected LastSeq to be %d., got %d", toStore, state.LastSeq)
}
if stats.FirstSeq != toStore+1 {
t.Fatalf("Expected FirstSeq to be %d., got %d", toStore+1, stats.FirstSeq)
if state.FirstSeq != toStore+1 {
t.Fatalf("Expected FirstSeq to be %d., got %d", toStore+1, state.FirstSeq)
}
}
checkPurgeStats()
checkPurgeState()
// Make sure we recover same state.
fs.Stop()
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir, BlockSize: 64 * 1024}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir, BlockSize: 64 * 1024}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -397,7 +397,7 @@ func TestFileStorePurge(t *testing.T) {
t.Fatalf("Expected to have exactly 1 empty msg block, got %d", numBlocks)
}
checkPurgeStats()
checkPurgeState()
}
func TestFileStoreRemovePartialRecovery(t *testing.T) {
@@ -405,7 +405,7 @@ func TestFileStoreRemovePartialRecovery(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -416,9 +416,9 @@ func TestFileStoreRemovePartialRecovery(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
// Remove half
@@ -426,23 +426,23 @@ func TestFileStoreRemovePartialRecovery(t *testing.T) {
fs.RemoveMsg(uint64(i))
}
stats = fs.Stats()
if stats.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, stats.Msgs)
state = fs.State()
if state.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs)
}
// Make sure we recover same state.
fs.Stop()
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats2 := fs.Stats()
if stats != stats2 {
t.Fatalf("Expected recovered stats to be the same, got %+v vs %+v\n", stats2, stats)
state2 := fs.State()
if state != state2 {
t.Fatalf("Expected recovered state to be the same, got %+v vs %+v\n", state2, state)
}
}
@@ -451,7 +451,7 @@ func TestFileStoreRemoveOutOfOrderRecovery(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -462,9 +462,9 @@ func TestFileStoreRemoveOutOfOrderRecovery(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
// Remove evens
@@ -474,9 +474,9 @@ func TestFileStoreRemoveOutOfOrderRecovery(t *testing.T) {
}
}
stats = fs.Stats()
if stats.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, stats.Msgs)
state = fs.State()
if state.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs)
}
if _, _, _, err := fs.LoadMsg(1); err != nil {
@@ -491,15 +491,15 @@ func TestFileStoreRemoveOutOfOrderRecovery(t *testing.T) {
// Make sure we recover same state.
fs.Stop()
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats2 := fs.Stats()
if stats != stats2 {
t.Fatalf("Expected receovered stats to be the same, got %+v vs %+v\n", stats, stats2)
state2 := fs.State()
if state != state2 {
t.Fatalf("Expected receovered states to be the same, got %+v vs %+v\n", state, state2)
}
if _, _, _, err := fs.LoadMsg(1); err != nil {
@@ -519,7 +519,7 @@ func TestFileStoreAgeLimitRecovery(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -531,25 +531,25 @@ func TestFileStoreAgeLimitRecovery(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
fs.Stop()
time.Sleep(2 * maxAge)
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats = fs.Stats()
if stats.Msgs != 0 {
t.Fatalf("Expected no msgs, got %d", stats.Msgs)
state = fs.State()
if state.Msgs != 0 {
t.Fatalf("Expected no msgs, got %d", state.Msgs)
}
if stats.Bytes != 0 {
t.Fatalf("Expected no bytes, got %d", stats.Bytes)
if state.Bytes != 0 {
t.Fatalf("Expected no bytes, got %d", state.Bytes)
}
}
@@ -558,7 +558,7 @@ func TestFileStoreBitRot(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -570,9 +570,9 @@ func TestFileStoreBitRot(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if badSeqs := len(fs.checkMsgs()); badSeqs > 0 {
@@ -604,7 +604,7 @@ func TestFileStoreBitRot(t *testing.T) {
// Make sure we can restore.
fs.Stop()
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -620,7 +620,7 @@ func TestFileStoreEraseMsg(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -675,7 +675,7 @@ func TestFileStoreEraseAndNoIndexRecovery(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -686,9 +686,9 @@ func TestFileStoreEraseAndNoIndexRecovery(t *testing.T) {
for i := 0; i < toStore; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := fs.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
// Erase the even messages.
@@ -697,9 +697,9 @@ func TestFileStoreEraseAndNoIndexRecovery(t *testing.T) {
t.Fatalf("Expected erase msg to return true")
}
}
stats = fs.Stats()
if stats.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, stats.Msgs)
state = fs.State()
if state.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs)
}
// Stop and remove the index file.
@@ -707,15 +707,15 @@ func TestFileStoreEraseAndNoIndexRecovery(t *testing.T) {
ifn := path.Join(storeDir, msgDir, fmt.Sprintf(indexScan, 1))
os.Remove(ifn)
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err = newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats = fs.Stats()
if stats.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, stats.Msgs)
state = fs.State()
if state.Msgs != uint64(toStore/2) {
t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs)
}
for i := 2; i <= toStore; i += 2 {
@@ -730,7 +730,7 @@ func TestFileStoreMeta(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
mconfig := MsgSetConfig{Name: "ZZ-22-33", Storage: FileStorage, Subjects: []string{"foo.*"}, Replicas: 22}
mconfig := StreamConfig{Name: "ZZ-22-33", Storage: FileStorage, Subjects: []string{"foo.*"}, Replicas: 22}
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, mconfig)
if err != nil {
@@ -752,12 +752,12 @@ func TestFileStoreMeta(t *testing.T) {
if err != nil {
t.Fatalf("Error reading metafile: %v", err)
}
var mconfig2 MsgSetConfig
var mconfig2 StreamConfig
if err := json.Unmarshal(buf, &mconfig2); err != nil {
t.Fatalf("Error unmarshalling: %v", err)
}
if !reflect.DeepEqual(mconfig, mconfig2) {
t.Fatalf("MsgSet configs not equal, got %+v vs %+v", mconfig2, mconfig)
t.Fatalf("Stream configs not equal, got %+v vs %+v", mconfig2, mconfig)
}
checksum, err := ioutil.ReadFile(metasum)
if err != nil {
@@ -771,49 +771,48 @@ func TestFileStoreMeta(t *testing.T) {
}
// Now create an observable. Same deal for them.
oconfig := ObservableConfig{
oconfig := ConsumerConfig{
Delivery: "d",
DeliverAll: true,
FilterSubject: "foo",
AckPolicy: AckAll,
}
oname := "obs22"
obs, err := fs.ObservableStore(oname, &oconfig)
obs, err := fs.ConsumerStore(oname, &oconfig)
if err != nil {
t.Fatalf("Unexepected error: %v", err)
}
ometafile := path.Join(storeDir, obsDir, oname, JetStreamMetaFile)
ometasum := path.Join(storeDir, obsDir, oname, JetStreamMetaFileSum)
ometafile := path.Join(storeDir, consumerDir, oname, JetStreamMetaFile)
ometasum := path.Join(storeDir, consumerDir, oname, JetStreamMetaFileSum)
// Test to make sure meta file and checksum are present.
if _, err := os.Stat(ometafile); os.IsNotExist(err) {
t.Fatalf("Expected observable metafile %q to exist", ometafile)
t.Fatalf("Expected consumer metafile %q to exist", ometafile)
}
if _, err := os.Stat(ometasum); os.IsNotExist(err) {
t.Fatalf("Expected observable metafile's checksum %q to exist", ometasum)
t.Fatalf("Expected consumer metafile's checksum %q to exist", ometasum)
}
buf, err = ioutil.ReadFile(ometafile)
if err != nil {
t.Fatalf("Error reading observable metafile: %v", err)
t.Fatalf("Error reading consumer metafile: %v", err)
}
var oconfig2 ObservableConfig
var oconfig2 ConsumerConfig
if err := json.Unmarshal(buf, &oconfig2); err != nil {
t.Fatalf("Error unmarshalling: %v", err)
}
if oconfig2 != oconfig {
//if !reflect.DeepEqual(oconfig, oconfig2) {
t.Fatalf("Observable configs not equal, got %+v vs %+v", oconfig2, oconfig)
t.Fatalf("Consumer configs not equal, got %+v vs %+v", oconfig2, oconfig)
}
checksum, err = ioutil.ReadFile(ometasum)
if err != nil {
t.Fatalf("Error reading observable metafile checksum: %v", err)
t.Fatalf("Error reading consumer metafile checksum: %v", err)
}
hh := obs.(*observableFileStore).hh
hh := obs.(*consumerFileStore).hh
hh.Reset()
hh.Write(buf)
mychecksum = hex.EncodeToString(hh.Sum(nil))
@@ -832,7 +831,7 @@ func TestFileStoreCollapseDmap(t *testing.T) {
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 4 * storedMsgSize},
MsgSetConfig{Name: "zzz", Storage: FileStorage},
StreamConfig{Name: "zzz", Storage: FileStorage},
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -841,9 +840,9 @@ func TestFileStoreCollapseDmap(t *testing.T) {
for i := 0; i < 10; i++ {
fs.StoreMsg(subj, msg)
}
stats := fs.Stats()
if stats.Msgs != 10 {
t.Fatalf("Expected 10 msgs, got %d", stats.Msgs)
state := fs.State()
if state.Msgs != 10 {
t.Fatalf("Expected 10 msgs, got %d", state.Msgs)
}
checkDmapTotal := func(total int) {
@@ -855,9 +854,9 @@ func TestFileStoreCollapseDmap(t *testing.T) {
checkFirstSeq := func(seq uint64) {
t.Helper()
stats := fs.Stats()
if stats.FirstSeq != seq {
t.Fatalf("Expected first seq to be %d, got %d", seq, stats.FirstSeq)
state := fs.State()
if state.FirstSeq != seq {
t.Fatalf("Expected first seq to be %d, got %d", seq, state.FirstSeq)
}
}
@@ -869,9 +868,9 @@ func TestFileStoreCollapseDmap(t *testing.T) {
fs.RemoveMsg(8)
checkFirstSeq(1)
stats = fs.Stats()
if stats.Msgs != 7 {
t.Fatalf("Expected 7 msgs, got %d", stats.Msgs)
state = fs.State()
if state.Msgs != 7 {
t.Fatalf("Expected 7 msgs, got %d", state.Msgs)
}
checkDmapTotal(3)
@@ -904,7 +903,7 @@ func TestFileStoreReadCache(t *testing.T) {
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir, ReadCacheExpire: 50 * time.Millisecond}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir, ReadCacheExpire: 50 * time.Millisecond}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -943,25 +942,25 @@ func TestFileStoreReadCache(t *testing.T) {
}
}
func TestFileStoreObservable(t *testing.T) {
func TestFileStoreConsumer(t *testing.T) {
storeDir, _ := ioutil.TempDir("", JetStreamStoreDir)
os.MkdirAll(storeDir, 0755)
defer os.RemoveAll(storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
o, err := fs.ObservableStore("obs22", &ObservableConfig{})
o, err := fs.ConsumerStore("obs22", &ConsumerConfig{})
if err != nil {
t.Fatalf("Unexepected error: %v", err)
}
if state, err := o.State(); state != nil || err != nil {
t.Fatalf("Unexpected state or error: %v", err)
}
state := &ObservableState{}
state := &ConsumerState{}
if err := o.Update(state); err == nil {
t.Fatalf("Exepected an error and got none")
}
@@ -987,24 +986,24 @@ func TestFileStoreObservable(t *testing.T) {
}
}
state.Delivered.ObsSeq = 1
state.Delivered.SetSeq = 22
state.Delivered.ConsumerSeq = 1
state.Delivered.StreamSeq = 22
updateAndCheck()
state.Delivered.ObsSeq = 100
state.Delivered.SetSeq = 122
state.AckFloor.ObsSeq = 50
state.AckFloor.SetSeq = 123
state.Delivered.ConsumerSeq = 100
state.Delivered.StreamSeq = 122
state.AckFloor.ConsumerSeq = 50
state.AckFloor.StreamSeq = 123
// This should fail, bad state.
shouldFail()
// So should this.
state.AckFloor.ObsSeq = 200
state.AckFloor.SetSeq = 100
state.AckFloor.ConsumerSeq = 200
state.AckFloor.StreamSeq = 100
shouldFail()
// Should succeed
state.AckFloor.ObsSeq = 50
state.AckFloor.SetSeq = 72
state.AckFloor.ConsumerSeq = 50
state.AckFloor.StreamSeq = 72
updateAndCheck()
tn := time.Now().UnixNano()
@@ -1032,7 +1031,7 @@ func TestFileStoreObservable(t *testing.T) {
// Now do redlivery, but first with no pending.
state.Pending = nil
state.Redelivery = map[uint64]uint64{22: 3, 44: 8}
state.Redelivered = map[uint64]uint64{22: 3, 44: 8}
updateAndCheck()
// All together.
@@ -1040,10 +1039,10 @@ func TestFileStoreObservable(t *testing.T) {
updateAndCheck()
// Large one
state.Delivered.ObsSeq = 10000
state.Delivered.SetSeq = 10000
state.AckFloor.ObsSeq = 100
state.AckFloor.SetSeq = 100
state.Delivered.ConsumerSeq = 10000
state.Delivered.StreamSeq = 10000
state.AckFloor.ConsumerSeq = 100
state.AckFloor.StreamSeq = 100
// Generate 8k pending.
state.Pending = make(map[uint64]int64)
for len(state.Pending) < 8192 {
@@ -1055,8 +1054,8 @@ func TestFileStoreObservable(t *testing.T) {
updateAndCheck()
state.Pending = nil
state.AckFloor.ObsSeq = 10000
state.AckFloor.SetSeq = 10000
state.AckFloor.ConsumerSeq = 10000
state.AckFloor.StreamSeq = 10000
updateAndCheck()
}
@@ -1086,7 +1085,7 @@ func TestFileStorePerf(t *testing.T) {
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir},
MsgSetConfig{Name: "zzz", Storage: FileStorage},
StreamConfig{Name: "zzz", Storage: FileStorage},
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -1114,7 +1113,7 @@ func TestFileStorePerf(t *testing.T) {
fs, err = newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 128 * 1024 * 1024},
MsgSetConfig{Name: "zzz", Storage: FileStorage},
StreamConfig{Name: "zzz", Storage: FileStorage},
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -1133,7 +1132,7 @@ func TestFileStorePerf(t *testing.T) {
fs, err = newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 128 * 1024 * 1024},
MsgSetConfig{Name: "zzz", Storage: FileStorage},
StreamConfig{Name: "zzz", Storage: FileStorage},
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -1160,23 +1159,23 @@ func TestFileStorePerf(t *testing.T) {
fs, err = newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 128 * 1024 * 1024},
MsgSetConfig{Name: "zzz", Storage: FileStorage},
StreamConfig{Name: "zzz", Storage: FileStorage},
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
stats := fs.Stats()
if stats.Msgs != 0 {
t.Fatalf("Expected no msgs, got %d", stats.Msgs)
state := fs.State()
if state.Msgs != 0 {
t.Fatalf("Expected no msgs, got %d", state.Msgs)
}
if stats.Bytes != 0 {
t.Fatalf("Expected no bytes, got %d", stats.Bytes)
if state.Bytes != 0 {
t.Fatalf("Expected no bytes, got %d", state.Bytes)
}
}
func TestFileStoreObservablesPerf(t *testing.T) {
func TestFileStoreConsumerPerf(t *testing.T) {
// Uncomment to run, holding place for now.
t.SkipNow()
@@ -1185,27 +1184,27 @@ func TestFileStoreObservablesPerf(t *testing.T) {
defer os.RemoveAll(storeDir)
fmt.Printf("StoreDir is %q\n", storeDir)
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, MsgSetConfig{Name: "zzz", Storage: FileStorage})
fs, err := newFileStore(FileStoreConfig{StoreDir: storeDir}, StreamConfig{Name: "zzz", Storage: FileStorage})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer fs.Stop()
// Test Observables.
o, err := fs.ObservableStore("obs22", &ObservableConfig{})
// Test Consumers.
o, err := fs.ConsumerStore("obs22", &ConsumerConfig{})
if err != nil {
t.Fatalf("Unexepected error: %v", err)
}
state := &ObservableState{}
state := &ConsumerState{}
toStore := uint64(1000000)
fmt.Printf("observable of %d msgs for ACK NONE\n", toStore)
fmt.Printf("consumer of %d msgs for ACK NONE\n", toStore)
start := time.Now()
for i := uint64(1); i <= toStore; i++ {
state.Delivered.ObsSeq = i
state.Delivered.SetSeq = i
state.AckFloor.ObsSeq = i
state.AckFloor.SetSeq = i
state.Delivered.ConsumerSeq = i
state.Delivered.StreamSeq = i
state.AckFloor.ConsumerSeq = i
state.AckFloor.StreamSeq = i
if err := o.Update(state); err != nil {
t.Fatalf("Unexepected error updating state: %v", err)
}
@@ -1217,20 +1216,20 @@ func TestFileStoreObservablesPerf(t *testing.T) {
// We will lag behind with pending.
state.Pending = make(map[uint64]int64)
lag := uint64(100)
state.AckFloor.ObsSeq = 0
state.AckFloor.SetSeq = 0
state.AckFloor.ConsumerSeq = 0
state.AckFloor.StreamSeq = 0
fmt.Printf("\nobservable of %d msgs for ACK EXPLICIT with pending lag of %d\n", toStore, lag)
fmt.Printf("\nconsumer of %d msgs for ACK EXPLICIT with pending lag of %d\n", toStore, lag)
start = time.Now()
for i := uint64(1); i <= toStore; i++ {
state.Delivered.ObsSeq = i
state.Delivered.SetSeq = i
state.Delivered.ConsumerSeq = i
state.Delivered.StreamSeq = i
state.Pending[i] = time.Now().UnixNano()
if i > lag {
ackseq := i - lag
state.AckFloor.ObsSeq = ackseq
state.AckFloor.SetSeq = ackseq
state.AckFloor.ConsumerSeq = ackseq
state.AckFloor.StreamSeq = ackseq
delete(state.Pending, ackseq)
}
if err := o.Update(state); err != nil {

View File

@@ -41,17 +41,17 @@ type JetStreamConfig struct {
// TODO(dlc) - need to track and rollup against server limits, etc.
type JetStreamAccountLimits struct {
MaxMemory int64 `json:"max_memory"`
MaxStore int64 `json:"max_storage"`
MaxMsgSets int `json:"max_msg_sets"`
MaxObservables int `json:"max_observables"`
MaxMemory int64 `json:"max_memory"`
MaxStore int64 `json:"max_storage"`
MaxStreams int `json:"max_streams"`
MaxConsumers int `json:"max_consumers"`
}
// JetStreamAccountStats returns current statistics about the account's JetStream usage.
type JetStreamAccountStats struct {
Memory uint64 `json:"memory"`
Store uint64 `json:"storage"`
MsgSets int `json:"msg_sets"`
Streams int `json:"streams"`
Limits JetStreamAccountLimits `json:"limits"`
}
@@ -80,67 +80,67 @@ const (
JetStreamInfo = "$JS.INFO"
jsInfoExport = "$JS.*.INFO"
// JetStreamCreateMsgSet is the endpoint to create new message sets.
// JetStreamCreateStream is the endpoint to create new streams.
// Will return +OK on success and -ERR on failure.
JetStreamCreateMsgSet = "$JS.MSGSET.CREATE"
jsCreateMsgSetExport = "$JS.*.MSGSET.CREATE"
JetStreamCreateStream = "$JS.STREAM.CREATE"
jsCreateStreamExport = "$JS.*.STREAM.CREATE"
// JetStreamMsgSets is the endpoint to list all message sets for this account.
// JetStreamStreams is the endpoint to list all streams for this account.
// Will return json list of string on success and -ERR on failure.
JetStreamMsgSets = "$JS.MSGSETS"
jsMsgSetsExport = "$JS.*.MSGSETS"
JetStreamStreams = "$JS.STREAMS"
jsStreamsExport = "$JS.*.STREAMS"
// JetStreamMsgSetInfo is for obtaining general information about a named message set.
// JetStreamStreamInfo is for obtaining general information about a named stream.
// Will return JSON response.
JetStreamMsgSetInfo = "$JS.MSGSET.INFO"
jsMsgSetInfoExport = "$JS.*.MSGSET.INFO"
JetStreamStreamInfo = "$JS.STREAM.INFO"
jsStreamInfoExport = "$JS.*.STREAM.INFO"
// JetStreamDeleteMsgSet is the endpoint to delete message sets.
// JetStreamDeleteStream is the endpoint to delete streams.
// Will return +OK on success and -ERR on failure.
JetStreamDeleteMsgSet = "$JS.MSGSET.DELETE"
jsDeleteMsgSetExport = "$JS.*.MSGSET.DELETE"
JetStreamDeleteStream = "$JS.STREAM.DELETE"
jsDeleteStreamExport = "$JS.*.STREAM.DELETE"
// JetStreamPurgeMsgSet is the endpoint to purge message sets.
// JetStreamPurgeStream is the endpoint to purge streams.
// Will return +OK on success and -ERR on failure.
JetStreamPurgeMsgSet = "$JS.MSGSET.PURGE"
jsPurgeMsgSetExport = "$JS.*.MSGSET.PURGE"
JetStreamPurgeStream = "$JS.STREAM.PURGE"
jsPurgeStreamExport = "$JS.*.MSGSET.PURGE"
// JetStreamDeleteMsg is the endpoint to delete messages from a message set.
// Will return +OK on success and -ERR on failure.
JetStreamDeleteMsg = "$JS.MSGSET.MSG.DELETE"
jsDeleteMsgExport = "$JS.*.MSGSET.MSG.DELETE"
JetStreamDeleteMsg = "$JS.STREAM.MSG.DELETE"
jsDeleteMsgExport = "$JS.*.STREAM.MSG.DELETE"
// JetStreamCreateObservable is the endpoint to create observers for a message set.
// JetStreamCreateConsumer is the endpoint to create consumers for streams.
// Will return +OK on success and -ERR on failure.
JetStreamCreateObservable = "$JS.OBSERVABLE.CREATE"
jsCreateObservableExport = "$JS.*.OBSERVABLE.CREATE"
JetStreamCreateConsumer = "$JS.CONSUMER.CREATE"
jsCreateConsumerExport = "$JS.*.CONSUMER.CREATE"
// JetStreamObservables is the endpoint to list all observables for the message set.
// JetStreamConsumers is the endpoint to list all consumers for the stream.
// Will return json list of string on success and -ERR on failure.
JetStreamObservables = "$JS.OBSERVABLES"
jsObservablesExport = "$JS.*.OBSERVABLES"
JetStreamConsumers = "$JS.CONSUMERS"
jsConsumersExport = "$JS.*.CONSUMERS"
// JsObservableInfo is for obtaining general information about an observable.
// JetStreamConsumerInfo is for obtaining general information about a consumer.
// Will return JSON response.
JetStreamObservableInfo = "$JS.OBSERVABLE.INFO"
jsObservableInfoExport = "$JS.*.OBSERVABLE.INFO"
JetStreamConsumerInfo = "$JS.CONSUMER.INFO"
jsConsumerInfoExport = "$JS.*.CONSUMER.INFO"
// JetStreamDeleteObservable is the endpoint to delete observables.
// JetStreamDeleteConsumer is the endpoint to delete consumers.
// Will return +OK on success and -ERR on failure.
JetStreamDeleteObservable = "$JS.OBSERVABLE.DELETE"
jsDeleteObservableExport = "$JS.*.OBSERVABLE.DELETE"
JetStreamDeleteConsumer = "$JS.CONSUMER.DELETE"
jsDeleteConsumerExport = "$JS.*.CONSUMER.DELETE"
// JetStreamAckPre is the prefix for the ack stream coming back to an observable.
// JetStreamAckPre is the prefix for the ack stream coming back to an consumer.
JetStreamAckPre = "$JS.A"
// JetStreamRequestNextPre is the prefix for the request next message(s) for an observable in worker/pull mode.
JetStreamRequestNextPre = "$JS.RN"
// JetStreamRequestNextPre is the prefix for the request next message(s) for a consumer in worker/pull mode.
JetStreamRequestNextPre = "$JS.NEXT"
// JetStreamMsgBySeqPre is the prefix for direct requests for a message by message set sequence number.
// JetStreamMsgBySeqPre is the prefix for direct requests for a message by its stream sequence number.
JetStreamMsgBySeqPre = "$JS.BYSEQ"
// JetStreamObservableAckSamplePre is the prefix for sample messages from observables
JetStreamObservableAckSamplePre = "$JS.OBSERVABLE.ACKSAMPLE"
// JetStreamConsumerAckSamplePre is the prefix for sampling metric messages for consumers.
JetStreamConsumerAckSamplePre = "$JS.CONSUMER.ACKSAMPLE"
)
// This is for internal accounting for JetStream for this server.
@@ -157,16 +157,16 @@ type jetStream struct {
var allJsExports = []string{
jsEnabledExport,
jsInfoExport,
jsCreateMsgSetExport,
jsMsgSetsExport,
jsMsgSetInfoExport,
jsDeleteMsgSetExport,
jsPurgeMsgSetExport,
jsCreateStreamExport,
jsStreamsExport,
jsStreamInfoExport,
jsDeleteStreamExport,
jsPurgeStreamExport,
jsDeleteMsgExport,
jsCreateObservableExport,
jsObservablesExport,
jsObservableInfoExport,
jsDeleteObservableExport,
jsCreateConsumerExport,
jsConsumersExport,
jsConsumerInfoExport,
jsDeleteConsumerExport,
}
// This represents a jetstream enabled account.
@@ -184,7 +184,7 @@ type jsAccount struct {
storeReserved int64
storeUsed int64
storeDir string
msgSets map[string]*MsgSet
streams map[string]*Stream
templates map[string]*StreamTemplate
store TemplateStore
}
@@ -249,34 +249,34 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error {
if _, err := s.sysSubscribe(jsInfoExport, s.jsAccountInfoRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsCreateMsgSetExport, s.jsCreateMsgSetRequest); err != nil {
if _, err := s.sysSubscribe(jsCreateStreamExport, s.jsCreateStreamRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsMsgSetsExport, s.jsMsgSetsRequest); err != nil {
if _, err := s.sysSubscribe(jsStreamsExport, s.jsStreamsRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsMsgSetInfoExport, s.jsMsgSetInfoRequest); err != nil {
if _, err := s.sysSubscribe(jsStreamInfoExport, s.jsStreamInfoRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsDeleteMsgSetExport, s.jsMsgSetDeleteRequest); err != nil {
if _, err := s.sysSubscribe(jsDeleteStreamExport, s.jsStreamDeleteRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsPurgeMsgSetExport, s.jsMsgSetPurgeRequest); err != nil {
if _, err := s.sysSubscribe(jsPurgeStreamExport, s.jsStreamPurgeRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsDeleteMsgExport, s.jsMsgDeleteRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsCreateObservableExport, s.jsCreateObservableRequest); err != nil {
if _, err := s.sysSubscribe(jsCreateConsumerExport, s.jsCreateConsumerRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsObservablesExport, s.jsObservablesRequest); err != nil {
if _, err := s.sysSubscribe(jsConsumersExport, s.jsConsumersRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsObservableInfoExport, s.jsObservableInfoRequest); err != nil {
if _, err := s.sysSubscribe(jsConsumerInfoExport, s.jsConsumerInfoRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
if _, err := s.sysSubscribe(jsDeleteObservableExport, s.jsObservableDeleteRequest); err != nil {
if _, err := s.sysSubscribe(jsDeleteConsumerExport, s.jsConsumerDeleteRequest); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
@@ -413,7 +413,7 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
js.mu.Unlock()
return fmt.Errorf("jetstream already enabled for account")
}
jsa := &jsAccount{js: js, account: a, limits: *limits, msgSets: make(map[string]*MsgSet)}
jsa := &jsAccount{js: js, account: a, limits: *limits, streams: make(map[string]*Stream)}
jsa.storeDir = path.Join(js.config.StoreDir, a.Name)
js.accounts[a] = jsa
js.reserveResources(limits)
@@ -513,7 +513,7 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
metafile := path.Join(mdir, JetStreamMetaFile)
metasum := path.Join(mdir, JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing MsgSet metafile for %q", metafile)
s.Warnf(" Missing Stream metafile for %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
@@ -522,51 +522,51 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing MsgSet checksum for %q", metasum)
s.Warnf(" Missing Stream checksum for %q", metasum)
continue
}
sum, err := ioutil.ReadFile(metasum)
if err != nil {
s.Warnf(" Error reading MsgSet metafile checksum %q: %v", metasum, err)
s.Warnf(" Error reading Stream metafile checksum %q: %v", metasum, err)
continue
}
hh.Write(buf)
checksum := hex.EncodeToString(hh.Sum(nil))
if checksum != string(sum) {
s.Warnf(" MsgSet metafile checksums do not match %q vs %q", sum, checksum)
s.Warnf(" Stream metafile checksums do not match %q vs %q", sum, checksum)
continue
}
var cfg MsgSetConfig
var cfg StreamConfig
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling MsgSet metafile: %v", err)
s.Warnf(" Error unmarshalling Stream metafile: %v", err)
continue
}
if cfg.Template != _EMPTY_ {
if err := jsa.addMsgSetNameToTemplate(cfg.Template, cfg.Name); err != nil {
s.Warnf(" Error adding MsgSet %q to Template %q: %v", cfg.Name, cfg.Template, err)
if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil {
s.Warnf(" Error adding Stream %q to Template %q: %v", cfg.Name, cfg.Template, err)
}
}
mset, err := a.AddMsgSet(&cfg)
mset, err := a.AddStream(&cfg)
if err != nil {
s.Warnf(" Error recreating MsgSet %q: %v", cfg.Name, err)
s.Warnf(" Error recreating Stream %q: %v", cfg.Name, err)
continue
}
stats := mset.Stats()
s.Noticef(" Restored %d messages for MsgSet %q", comma(int64(stats.Msgs)), fi.Name())
stats := mset.State()
s.Noticef(" Restored %d messages for Stream %q", comma(int64(stats.Msgs)), fi.Name())
// Now do Observables.
odir := path.Join(sdir, fi.Name(), obsDir)
// Now do Consumers.
odir := path.Join(sdir, fi.Name(), consumerDir)
ofis, _ := ioutil.ReadDir(odir)
if len(ofis) > 0 {
s.Noticef(" Recovering %d Observables for MsgSet - %q", len(ofis), fi.Name())
s.Noticef(" Recovering %d Consumers for Stream - %q", len(ofis), fi.Name())
}
for _, ofi := range ofis {
metafile := path.Join(odir, ofi.Name(), JetStreamMetaFile)
metasum := path.Join(odir, ofi.Name(), JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing Observable Metafile %q", metafile)
s.Warnf(" Missing Consumer Metafile %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
@@ -575,21 +575,21 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing Observable checksum for %q", metasum)
s.Warnf(" Missing Consumer checksum for %q", metasum)
continue
}
var cfg ObservableConfig
var cfg ConsumerConfig
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling Observable metafile: %v", err)
s.Warnf(" Error unmarshalling Consumer metafile: %v", err)
continue
}
obs, err := mset.AddObservable(&cfg)
obs, err := mset.AddConsumer(&cfg)
if err != nil {
s.Warnf(" Error adding Observable: %v", err)
s.Warnf(" Error adding Consumer: %v", err)
continue
}
if err := obs.readStoredState(); err != nil {
s.Warnf(" Error restoring Observable state: %v", err)
s.Warnf(" Error restoring Consumer state: %v", err)
}
}
}
@@ -599,8 +599,8 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
return nil
}
// NumMsgSets will return how many message sets we have.
func (a *Account) NumMsgSets() int {
// NumStreams will return how many streams we have.
func (a *Account) NumStreams() int {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
@@ -608,29 +608,29 @@ func (a *Account) NumMsgSets() int {
return 0
}
jsa.mu.Lock()
n := len(jsa.msgSets)
n := len(jsa.streams)
jsa.mu.Unlock()
return n
}
// MsgSets will return all known message sets.
func (a *Account) MsgSets() []*MsgSet {
// Streams will return all known streams.
func (a *Account) Streams() []*Stream {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil
}
var msets []*MsgSet
var msets []*Stream
jsa.mu.Lock()
for _, mset := range jsa.msgSets {
for _, mset := range jsa.streams {
msets = append(msets, mset)
}
jsa.mu.Unlock()
return msets
}
func (a *Account) LookupMsgSet(name string) (*MsgSet, error) {
func (a *Account) LookupStream(name string) (*Stream, error) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
@@ -639,10 +639,10 @@ func (a *Account) LookupMsgSet(name string) (*MsgSet, error) {
return nil, fmt.Errorf("jetstream not enabled")
}
jsa.mu.Lock()
mset, ok := jsa.msgSets[name]
mset, ok := jsa.streams[name]
jsa.mu.Unlock()
if !ok {
return nil, fmt.Errorf("msgset not found")
return nil, fmt.Errorf("stream not found")
}
return mset, nil
}
@@ -713,7 +713,7 @@ func (a *Account) JetStreamUsage() JetStreamAccountStats {
jsa.mu.Lock()
stats.Memory = uint64(jsa.memUsed)
stats.Store = uint64(jsa.storeUsed)
stats.MsgSets = len(jsa.msgSets)
stats.Streams = len(jsa.streams)
stats.Limits = jsa.limits
jsa.mu.Unlock()
}
@@ -768,10 +768,10 @@ func (jsa *jsAccount) flushState() error {
}
// Collect the message sets.
var _msets [64]*MsgSet
var _msets [64]*Stream
msets := _msets[:0]
jsa.mu.Lock()
for _, mset := range jsa.msgSets {
for _, mset := range jsa.streams {
msets = append(msets, mset)
}
jsa.mu.Unlock()
@@ -823,19 +823,19 @@ func (jsa *jsAccount) limitsExceeded(storeType StorageType) bool {
// Check if a new proposed msg set while exceed our account limits.
// Lock should be held.
func (jsa *jsAccount) checkLimits(config *MsgSetConfig) error {
if jsa.limits.MaxMsgSets > 0 && len(jsa.msgSets) >= jsa.limits.MaxMsgSets {
return fmt.Errorf("maximum number of message sets reached")
func (jsa *jsAccount) checkLimits(config *StreamConfig) error {
if jsa.limits.MaxStreams > 0 && len(jsa.streams) >= jsa.limits.MaxStreams {
return fmt.Errorf("maximum number of streams reached")
}
// FIXME(dlc) - Add check here for replicas based on clustering.
if config.Replicas != 1 {
return fmt.Errorf("replicas setting of %d not allowed", config.Replicas)
}
// Check MaxObservables
if config.MaxObservables > 0 && config.MaxObservables > jsa.limits.MaxObservables {
return fmt.Errorf("maximum observables exceeds account limit")
// Check MaxConsumers
if config.MaxConsumers > 0 && config.MaxConsumers > jsa.limits.MaxConsumers {
return fmt.Errorf("maximum consumers exceeds account limit")
} else {
config.MaxObservables = jsa.limits.MaxObservables
config.MaxConsumers = jsa.limits.MaxConsumers
}
// Check storage, memory or disk.
if config.MaxBytes > 0 {
@@ -856,12 +856,12 @@ func (jsa *jsAccount) checkLimits(config *MsgSetConfig) error {
// Delete the JetStream resources.
func (jsa *jsAccount) delete() {
var msgSets []*MsgSet
var streams []*Stream
var ts []string
jsa.mu.Lock()
for _, ms := range jsa.msgSets {
msgSets = append(msgSets, ms)
for _, ms := range jsa.streams {
streams = append(streams, ms)
}
acc := jsa.account
for _, t := range jsa.templates {
@@ -870,7 +870,7 @@ func (jsa *jsAccount) delete() {
jsa.templates = nil
jsa.mu.Unlock()
for _, ms := range msgSets {
for _, ms := range streams {
ms.stop(false)
}
for _, t := range ts {
@@ -968,7 +968,7 @@ func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, subject, rep
}
// Request to create a message set.
func (s *Server) jsCreateMsgSetRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsCreateStreamRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -976,20 +976,20 @@ func (s *Server) jsCreateMsgSetRequest(sub *subscription, c *client, subject, re
s.sendInternalAccountMsg(c.acc, reply, JetStreamNotEnabled)
return
}
var cfg MsgSetConfig
var cfg StreamConfig
if err := json.Unmarshal(msg, &cfg); err != nil {
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
var response = OK
if _, err := c.acc.AddMsgSet(&cfg); err != nil {
if _, err := c.acc.AddStream(&cfg); err != nil {
response = fmt.Sprintf("%s %v", ErrPrefix, err)
}
s.sendInternalAccountMsg(c.acc, reply, response)
}
// Request for the list of all message sets.
func (s *Server) jsMsgSetsRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
// Request for the list of all streams.
func (s *Server) jsStreamsRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -998,7 +998,7 @@ func (s *Server) jsMsgSetsRequest(sub *subscription, c *client, subject, reply s
return
}
var names []string
msets := c.acc.MsgSets()
msets := c.acc.Streams()
for _, mset := range msets {
names = append(names, mset.Name())
}
@@ -1011,7 +1011,7 @@ func (s *Server) jsMsgSetsRequest(sub *subscription, c *client, subject, reply s
// Request for information about a message set.
// This expects a message set name as the msg body.
func (s *Server) jsMsgSetInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1023,13 +1023,13 @@ func (s *Server) jsMsgSetInfoRequest(sub *subscription, c *client, subject, repl
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(string(msg))
mset, err := c.acc.LookupStream(string(msg))
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s '%v'", ErrPrefix, err))
return
}
msi := MsgSetInfo{
Stats: mset.Stats(),
msi := StreamInfo{
State: mset.State(),
Config: mset.Config(),
}
b, err := json.MarshalIndent(msi, "", " ")
@@ -1041,7 +1041,7 @@ func (s *Server) jsMsgSetInfoRequest(sub *subscription, c *client, subject, repl
// Request to delete a message set.
// This expects a message set name as the msg body.
func (s *Server) jsMsgSetDeleteRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1053,7 +1053,7 @@ func (s *Server) jsMsgSetDeleteRequest(sub *subscription, c *client, subject, re
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(string(msg))
mset, err := c.acc.LookupStream(string(msg))
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
@@ -1083,7 +1083,7 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply
name := args[0]
seq, _ := strconv.Atoi(args[1])
mset, err := c.acc.LookupMsgSet(name)
mset, err := c.acc.LookupStream(name)
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
@@ -1097,7 +1097,7 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply
// Request to purge a message set.
// This expects a message set name as the msg body.
func (s *Server) jsMsgSetPurgeRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1109,7 +1109,7 @@ func (s *Server) jsMsgSetPurgeRequest(sub *subscription, c *client, subject, rep
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(string(msg))
mset, err := c.acc.LookupStream(string(msg))
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
@@ -1120,7 +1120,7 @@ func (s *Server) jsMsgSetPurgeRequest(sub *subscription, c *client, subject, rep
}
// Request to create an observable.
func (s *Server) jsCreateObservableRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsCreateConsumerRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1128,26 +1128,29 @@ func (s *Server) jsCreateObservableRequest(sub *subscription, c *client, subject
s.sendInternalAccountMsg(c.acc, reply, JetStreamNotEnabled)
return
}
var req CreateObservableRequest
var req CreateConsumerRequest
if err := json.Unmarshal(msg, &req); err != nil {
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(string(req.MsgSet))
mset, err := c.acc.LookupStream(string(req.Stream))
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
}
var response = OK
if _, err := mset.AddObservable(&req.Config); err != nil {
if o, err := mset.AddConsumer(&req.Config); err != nil {
response = fmt.Sprintf("%s '%v'", ErrPrefix, err)
} else if !o.isDurable() {
// If the consumer is ephemeral add in the name
response = OK + " " + o.Name()
}
s.sendInternalAccountMsg(c.acc, reply, response)
}
// Request for the list of all observables.
// This expects a message set name as the msg body.
func (s *Server) jsObservablesRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsConsumersRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1159,13 +1162,13 @@ func (s *Server) jsObservablesRequest(sub *subscription, c *client, subject, rep
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(string(msg))
mset, err := c.acc.LookupStream(string(msg))
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
}
var onames []string
obs := mset.Observables()
obs := mset.Consumers()
for _, o := range obs {
onames = append(onames, o.Name())
}
@@ -1178,7 +1181,7 @@ func (s *Server) jsObservablesRequest(sub *subscription, c *client, subject, rep
// Request for information about an observable.
// This expects a message set name and observable name as the msg body. e.g. "MSGSET1 OBS1"
func (s *Server) jsObservableInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1195,12 +1198,12 @@ func (s *Server) jsObservableInfoRequest(sub *subscription, c *client, subject,
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(names[0])
mset, err := c.acc.LookupStream(names[0])
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
}
obs := mset.LookupObservable(names[1])
obs := mset.LookupConsumer(names[1])
if obs == nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s observable not found", ErrPrefix))
return
@@ -1213,9 +1216,9 @@ func (s *Server) jsObservableInfoRequest(sub *subscription, c *client, subject,
s.sendInternalAccountMsg(c.acc, reply, b)
}
// Request to delete an Observable.
// Request to delete an Consumer.
// This expects a message set name and observable name as the msg body. e.g. "MSGSET1 OBS1"
func (s *Server) jsObservableDeleteRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
if c == nil || c.acc == nil {
return
}
@@ -1232,12 +1235,12 @@ func (s *Server) jsObservableDeleteRequest(sub *subscription, c *client, subject
s.sendInternalAccountMsg(c.acc, reply, JetStreamBadRequest)
return
}
mset, err := c.acc.LookupMsgSet(names[0])
mset, err := c.acc.LookupStream(names[0])
if err != nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s %v", ErrPrefix, err))
return
}
obs := mset.LookupObservable(names[1])
obs := mset.LookupConsumer(names[1])
if obs == nil {
s.sendInternalAccountMsg(c.acc, reply, fmt.Sprintf("%s observable not found", ErrPrefix))
return
@@ -1298,8 +1301,8 @@ func (a *Account) checkForJetStream() (*Server, *jsAccount, error) {
// is received that matches. Each new message set will use the config as the template config to create them.
type StreamTemplateConfig struct {
Name string `json:"name"`
Config *MsgSetConfig `json:"config"`
MaxMsgSets uint32 `json:"max_msg_sets"`
Config *StreamConfig `json:"config"`
MaxStreams uint32 `json:"max_streams"`
}
// StreamTemplate
@@ -1330,7 +1333,7 @@ func (a *Account) AddStreamTemplate(tc *StreamTemplateConfig) (*StreamTemplate,
// FIXME(dlc) - Hacky
tcopy := tc.deepCopy()
tcopy.Config.Name = "_"
cfg, err := checkMsgSetCfg(tcopy.Config)
cfg, err := checkStreamCfg(tcopy.Config)
if err != nil {
return nil, err
}
@@ -1405,7 +1408,7 @@ func (t *StreamTemplate) processInboundTemplateMsg(_ *subscription, _ *client, s
jsa := t.jsa
jsa.mu.Lock()
// If we already are registered then we can just return here.
if _, ok := jsa.msgSets[subject]; ok {
if _, ok := jsa.streams[subject]; ok {
jsa.mu.Unlock()
return
}
@@ -1417,7 +1420,7 @@ func (t *StreamTemplate) processInboundTemplateMsg(_ *subscription, _ *client, s
c := t.tc
cfg := *t.Config
cfg.Template = t.Name
atLimit := len(t.msgSets) >= int(t.MaxMsgSets)
atLimit := len(t.msgSets) >= int(t.MaxStreams)
if !atLimit {
t.msgSets = append(t.msgSets, subject)
}
@@ -1432,7 +1435,7 @@ func (t *StreamTemplate) processInboundTemplateMsg(_ *subscription, _ *client, s
// Change the config from the template and only use literal subject.
cfg.Subjects = nil
cfg.Name = subject
mset, err := acc.AddMsgSet(&cfg)
mset, err := acc.AddStream(&cfg)
if err != nil {
// FIXME(dlc) - Remove from t.msgSets
c.Warnf("JetStream could not create message set for account %q on subject %q", acc.Name, subject)
@@ -1495,10 +1498,10 @@ func (t *StreamTemplate) Delete() error {
jsa.mu.Unlock()
// Remove message sets associated with this template.
var msgSets []*MsgSet
var msgSets []*Stream
t.mu.Lock()
for _, name := range t.msgSets {
if mset, err := acc.LookupMsgSet(name); err == nil {
if mset, err := acc.LookupStream(name); err == nil {
msgSets = append(msgSets, mset)
}
}
@@ -1545,7 +1548,7 @@ func (a *Account) Templates() []*StreamTemplate {
}
// Will add a message set to a template, this is for recovery.
func (jsa *jsAccount) addMsgSetNameToTemplate(tname, mname string) error {
func (jsa *jsAccount) addStreamNameToTemplate(tname, mname string) error {
if jsa.templates == nil {
return fmt.Errorf("no template found")
}

View File

@@ -1,4 +1,4 @@
// Copyright 2018 The NATS Authors
// Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -829,7 +829,7 @@ func TestJWTAccountExportWithResponseType(t *testing.T) {
if se.tokenReq {
t.Fatalf("Expected the service export to not require tokens")
}
if se.respType != Stream {
if se.respType != Streamed {
t.Fatalf("Expected the service export to respond with a stream")
}

View File

@@ -1,4 +1,4 @@
// Copyright 2019 The NATS Authors
// Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -23,13 +23,13 @@ import (
// TODO(dlc) - This is a fairly simplistic approach but should do for now.
type memStore struct {
mu sync.RWMutex
stats MsgSetStats
msgs map[uint64]*storedMsg
scb func(int64)
ageChk *time.Timer
config MsgSetConfig
obsCount int
mu sync.RWMutex
state StreamState
msgs map[uint64]*storedMsg
scb func(int64)
ageChk *time.Timer
config StreamConfig
consumers int
}
type storedMsg struct {
@@ -39,24 +39,22 @@ type storedMsg struct {
ts int64 // nanoseconds
}
func newMemStore(cfg *MsgSetConfig) (*memStore, error) {
func newMemStore(cfg *StreamConfig) (*memStore, error) {
if cfg == nil {
return nil, fmt.Errorf("config required")
}
if cfg.Storage != MemoryStorage {
return nil, fmt.Errorf("memStore requires memory storage type in config")
}
ms := &memStore{msgs: make(map[uint64]*storedMsg), config: *cfg}
// This only happens once, so ok to call here.
return ms, nil
return &memStore{msgs: make(map[uint64]*storedMsg), config: *cfg}, nil
}
// Store stores a message.
func (ms *memStore) StoreMsg(subj string, msg []byte) (uint64, error) {
ms.mu.Lock()
seq := ms.stats.LastSeq + 1
if ms.stats.FirstSeq == 0 {
ms.stats.FirstSeq = seq
seq := ms.state.LastSeq + 1
if ms.state.FirstSeq == 0 {
ms.state.FirstSeq = seq
}
// Make copies - https://github.com/go101/go101/wiki
@@ -65,12 +63,12 @@ func (ms *memStore) StoreMsg(subj string, msg []byte) (uint64, error) {
msg = append(msg[:0:0], msg...)
}
startBytes := int64(ms.stats.Bytes)
startBytes := int64(ms.state.Bytes)
ms.msgs[seq] = &storedMsg{subj, msg, seq, time.Now().UnixNano()}
ms.stats.Msgs++
ms.stats.Bytes += memStoreMsgSize(subj, msg)
ms.stats.LastSeq = seq
ms.state.Msgs++
ms.state.Bytes += memStoreMsgSize(subj, msg)
ms.state.LastSeq = seq
// Limits checks and enforcement.
ms.enforceMsgLimit()
@@ -81,7 +79,7 @@ func (ms *memStore) StoreMsg(subj string, msg []byte) (uint64, error) {
ms.startAgeChk()
}
cb := ms.scb
stopBytes := int64(ms.stats.Bytes)
stopBytes := int64(ms.state.Bytes)
ms.mu.Unlock()
if cb != nil {
@@ -105,28 +103,28 @@ func (ms *memStore) GetSeqFromTime(t time.Time) uint64 {
ms.mu.RLock()
defer ms.mu.RUnlock()
if len(ms.msgs) == 0 {
return ms.stats.LastSeq + 1
return ms.state.LastSeq + 1
}
if ts <= ms.msgs[ms.stats.FirstSeq].ts {
return ms.stats.FirstSeq
if ts <= ms.msgs[ms.state.FirstSeq].ts {
return ms.state.FirstSeq
}
last := ms.msgs[ms.stats.LastSeq].ts
last := ms.msgs[ms.state.LastSeq].ts
if ts == last {
return ms.stats.LastSeq
return ms.state.LastSeq
}
if ts > last {
return ms.stats.LastSeq + 1
return ms.state.LastSeq + 1
}
index := sort.Search(len(ms.msgs), func(i int) bool {
return ms.msgs[uint64(i)+ms.stats.FirstSeq].ts >= ts
return ms.msgs[uint64(i)+ms.state.FirstSeq].ts >= ts
})
return uint64(index) + ms.stats.FirstSeq
return uint64(index) + ms.state.FirstSeq
}
// Will check the msg limit and drop firstSeq msg if needed.
// Lock should be held.
func (ms *memStore) enforceMsgLimit() {
if ms.config.MaxMsgs <= 0 || ms.stats.Msgs <= uint64(ms.config.MaxMsgs) {
if ms.config.MaxMsgs <= 0 || ms.state.Msgs <= uint64(ms.config.MaxMsgs) {
return
}
ms.deleteFirstMsgOrPanic()
@@ -135,10 +133,10 @@ func (ms *memStore) enforceMsgLimit() {
// Will check the bytes limit and drop msgs if needed.
// Lock should be held.
func (ms *memStore) enforceBytesLimit() {
if ms.config.MaxBytes <= 0 || ms.stats.Bytes <= uint64(ms.config.MaxBytes) {
if ms.config.MaxBytes <= 0 || ms.state.Bytes <= uint64(ms.config.MaxBytes) {
return
}
for bs := ms.stats.Bytes; bs > uint64(ms.config.MaxBytes); bs = ms.stats.Bytes {
for bs := ms.state.Bytes; bs > uint64(ms.config.MaxBytes); bs = ms.state.Bytes {
ms.deleteFirstMsgOrPanic()
}
}
@@ -159,7 +157,7 @@ func (ms *memStore) expireMsgs() {
now := time.Now().UnixNano()
minAge := now - int64(ms.config.MaxAge)
for {
if sm, ok := ms.msgs[ms.stats.FirstSeq]; ok && sm.ts <= minAge {
if sm, ok := ms.msgs[ms.state.FirstSeq]; ok && sm.ts <= minAge {
ms.deleteFirstMsgOrPanic()
} else {
if !ok {
@@ -180,10 +178,10 @@ func (ms *memStore) Purge() uint64 {
ms.mu.Lock()
purged := uint64(len(ms.msgs))
cb := ms.scb
bytes := int64(ms.stats.Bytes)
ms.stats.FirstSeq = ms.stats.LastSeq + 1
ms.stats.Bytes = 0
ms.stats.Msgs = 0
bytes := int64(ms.state.Bytes)
ms.state.FirstSeq = ms.state.LastSeq + 1
ms.state.Bytes = 0
ms.state.Msgs = 0
ms.msgs = make(map[uint64]*storedMsg)
ms.mu.Unlock()
@@ -201,14 +199,14 @@ func (ms *memStore) deleteFirstMsgOrPanic() {
}
func (ms *memStore) deleteFirstMsg() bool {
return ms.removeMsg(ms.stats.FirstSeq, false)
return ms.removeMsg(ms.state.FirstSeq, false)
}
// LoadMsg will lookup the message by sequence number and return it if found.
func (ms *memStore) LoadMsg(seq uint64) (string, []byte, int64, error) {
ms.mu.RLock()
sm, ok := ms.msgs[seq]
last := ms.stats.LastSeq
last := ms.state.LastSeq
ms.mu.RUnlock()
if !ok || sm == nil {
@@ -244,11 +242,11 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool {
sm, ok := ms.msgs[seq]
if ok {
delete(ms.msgs, seq)
ms.stats.Msgs--
ms.state.Msgs--
ss = memStoreMsgSize(sm.subj, sm.msg)
ms.stats.Bytes -= ss
if seq == ms.stats.FirstSeq {
ms.stats.FirstSeq++
ms.state.Bytes -= ss
if seq == ms.state.FirstSeq {
ms.state.FirstSeq++
}
if secure {
rand.Read(sm.msg)
@@ -262,13 +260,12 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool {
return ok
}
func (ms *memStore) Stats() MsgSetStats {
func (ms *memStore) State() StreamState {
ms.mu.RLock()
stats := ms.stats
stats.Observables = ms.obsCount
state := ms.state
state.Consumers = ms.consumers
ms.mu.RUnlock()
return stats
return state
}
func memStoreMsgSize(subj string, msg []byte) uint64 {
@@ -292,48 +289,43 @@ func (ms *memStore) Stop() error {
return nil
}
func (ms *memStore) incObsCount() {
func (ms *memStore) incConsumers() {
ms.mu.Lock()
ms.obsCount++
ms.consumers++
ms.mu.Unlock()
}
func (ms *memStore) decObsCount() {
func (ms *memStore) decConsumers() {
ms.mu.Lock()
if ms.obsCount == 0 {
ms.mu.RUnlock()
return
if ms.consumers > 0 {
ms.consumers--
}
ms.obsCount--
ms.mu.Unlock()
}
type observableMemStore struct {
type consumerMemStore struct {
ms *memStore
}
func (ms *memStore) ObservableStore(_ string, _ *ObservableConfig) (ObservableStore, error) {
ms.incObsCount()
return &observableMemStore{ms}, nil
func (ms *memStore) ConsumerStore(_ string, _ *ConsumerConfig) (ConsumerStore, error) {
ms.incConsumers()
return &consumerMemStore{ms}, nil
}
// No-ops.
func (os *observableMemStore) Update(_ *ObservableState) error {
func (os *consumerMemStore) Update(_ *ConsumerState) error {
return nil
}
func (os *observableMemStore) Stop() error {
os.ms.decObsCount()
func (os *consumerMemStore) Stop() error {
os.ms.decConsumers()
return nil
}
func (os *observableMemStore) Delete() error {
func (os *consumerMemStore) Delete() error {
return os.Stop()
}
func (os *observableMemStore) State() (*ObservableState, error) { return nil, nil }
func (os *consumerMemStore) State() (*ConsumerState, error) { return nil, nil }
// Templates
type templateMemStore struct{}

View File

@@ -1,4 +1,4 @@
// Copyright 2019 The NATS Authors
// Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -21,7 +21,7 @@ import (
)
func TestMemStoreBasics(t *testing.T) {
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}
@@ -31,13 +31,13 @@ func TestMemStoreBasics(t *testing.T) {
} else if seq != 1 {
t.Fatalf("Expected sequence to be 1, got %d", seq)
}
stats := ms.Stats()
if stats.Msgs != 1 {
t.Fatalf("Expected 1 msg, got %d", stats.Msgs)
state := ms.State()
if state.Msgs != 1 {
t.Fatalf("Expected 1 msg, got %d", state.Msgs)
}
expectedSize := memStoreMsgSize(subj, msg)
if stats.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, stats.Bytes)
if state.Bytes != expectedSize {
t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes)
}
nsubj, nmsg, _, err := ms.LoadMsg(1)
if err != nil {
@@ -52,7 +52,7 @@ func TestMemStoreBasics(t *testing.T) {
}
func TestMemStoreMsgLimit(t *testing.T) {
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage, MaxMsgs: 10})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage, MaxMsgs: 10})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}
@@ -60,22 +60,22 @@ func TestMemStoreMsgLimit(t *testing.T) {
for i := 0; i < 10; i++ {
ms.StoreMsg(subj, msg)
}
stats := ms.Stats()
if stats.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, stats.Msgs)
state := ms.State()
if state.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs)
}
if _, err := ms.StoreMsg(subj, msg); err != nil {
t.Fatalf("Error storing msg: %v", err)
}
stats = ms.Stats()
if stats.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, stats.Msgs)
state = ms.State()
if state.Msgs != 10 {
t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs)
}
if stats.LastSeq != 11 {
t.Fatalf("Expected the last sequence to be 11 now, but got %d", stats.LastSeq)
if state.LastSeq != 11 {
t.Fatalf("Expected the last sequence to be 11 now, but got %d", state.LastSeq)
}
if stats.FirstSeq != 2 {
t.Fatalf("Expected the first sequence to be 2 now, but got %d", stats.FirstSeq)
if state.FirstSeq != 2 {
t.Fatalf("Expected the first sequence to be 2 now, but got %d", state.FirstSeq)
}
// Make sure we can not lookup seq 1.
if _, _, _, err := ms.LoadMsg(1); err == nil {
@@ -90,7 +90,7 @@ func TestMemStoreBytesLimit(t *testing.T) {
toStore := uint64(1024)
maxBytes := storedMsgSize * toStore
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage, MaxBytes: int64(maxBytes)})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage, MaxBytes: int64(maxBytes)})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}
@@ -98,12 +98,12 @@ func TestMemStoreBytesLimit(t *testing.T) {
for i := uint64(0); i < toStore; i++ {
ms.StoreMsg(subj, msg)
}
stats := ms.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := ms.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, stats.Bytes)
if state.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes)
}
// Now send 10 more and check that bytes limit enforced.
@@ -112,24 +112,24 @@ func TestMemStoreBytesLimit(t *testing.T) {
t.Fatalf("Error storing msg: %v", err)
}
}
stats = ms.Stats()
if stats.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state = ms.State()
if state.Msgs != toStore {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
if stats.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, stats.Bytes)
if state.Bytes != storedMsgSize*toStore {
t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes)
}
if stats.FirstSeq != 11 {
t.Fatalf("Expected first sequence to be 11, got %d", stats.FirstSeq)
if state.FirstSeq != 11 {
t.Fatalf("Expected first sequence to be 11, got %d", state.FirstSeq)
}
if stats.LastSeq != toStore+10 {
t.Fatalf("Expected last sequence to be %d, got %d", toStore+10, stats.LastSeq)
if state.LastSeq != toStore+10 {
t.Fatalf("Expected last sequence to be %d, got %d", toStore+10, state.LastSeq)
}
}
func TestMemStoreAgeLimit(t *testing.T) {
maxAge := 10 * time.Millisecond
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage, MaxAge: maxAge})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage, MaxAge: maxAge})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}
@@ -139,19 +139,19 @@ func TestMemStoreAgeLimit(t *testing.T) {
for i := 0; i < toStore; i++ {
ms.StoreMsg(subj, msg)
}
stats := ms.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state := ms.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
checkExpired := func(t *testing.T) {
t.Helper()
checkFor(t, time.Second, maxAge, func() error {
stats = ms.Stats()
if stats.Msgs != 0 {
return fmt.Errorf("Expected no msgs, got %d", stats.Msgs)
state = ms.State()
if state.Msgs != 0 {
return fmt.Errorf("Expected no msgs, got %d", state.Msgs)
}
if stats.Bytes != 0 {
return fmt.Errorf("Expected no bytes, got %d", stats.Bytes)
if state.Bytes != 0 {
return fmt.Errorf("Expected no bytes, got %d", state.Bytes)
}
return nil
})
@@ -162,15 +162,15 @@ func TestMemStoreAgeLimit(t *testing.T) {
for i := 0; i < toStore; i++ {
ms.StoreMsg(subj, msg)
}
stats = ms.Stats()
if stats.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, stats.Msgs)
state = ms.State()
if state.Msgs != uint64(toStore) {
t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs)
}
checkExpired(t)
}
func TestMemStoreTimeStamps(t *testing.T) {
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}
@@ -194,7 +194,7 @@ func TestMemStoreTimeStamps(t *testing.T) {
}
func TestMemStoreEraseMsg(t *testing.T) {
ms, err := newMemStore(&MsgSetConfig{Storage: MemoryStorage})
ms, err := newMemStore(&StreamConfig{Storage: MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error creating store: %v", err)
}

View File

@@ -1954,7 +1954,7 @@ func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*expo
case "single", "singleton":
rt = Singleton
case "stream":
rt = Stream
rt = Streamed
case "chunk", "chunked":
rt = Chunked
default:

View File

@@ -1,4 +1,4 @@
// Copyright 2019 The NATS Authors
// Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -35,58 +35,71 @@ var (
// ErrStoreMsgNotFound when message was not found but was expected to be.
ErrStoreMsgNotFound = errors.New("no message found")
// ErrStoreEOF is returned when message seq is greater than the last sequence.
ErrStoreEOF = errors.New("msgset EOF")
ErrStoreEOF = errors.New("stream EOF")
)
type MsgSetStore interface {
type StreamStore interface {
StoreMsg(subj string, msg []byte) (uint64, error)
LoadMsg(seq uint64) (subj string, msg []byte, ts int64, err error)
RemoveMsg(seq uint64) bool
EraseMsg(seq uint64) bool
Purge() uint64
GetSeqFromTime(t time.Time) uint64
State() StreamState
StorageBytesUpdate(func(int64))
Stats() MsgSetStats
Delete() error
Stop() error
ObservableStore(name string, cfg *ObservableConfig) (ObservableStore, error)
ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error)
}
// MsgSetStats are stats about this given message set.
type MsgSetStats struct {
Msgs uint64 `json:"messages"`
Bytes uint64 `json:"bytes"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
Observables int `json:"observable_count"`
// RetentionPolicy determines how messages in a set are retained.
type RetentionPolicy int
const (
// LimitsPolicy (default) means that messages are retained until any given limit is reached.
// This could be one of MaxMsgs, MaxBytes, or MaxAge.
LimitsPolicy RetentionPolicy = iota
// InterestPolicy specifies that when all known observables have acknowledged a message it can be removed.
InterestPolicy
// WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed.
WorkQueuePolicy
)
// StreamStats is information about the given stream.
type StreamState struct {
Msgs uint64 `json:"messages"`
Bytes uint64 `json:"bytes"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
Consumers int `json:"consumer_count"`
}
// ObservableStore stores state on observables.
type ObservableStore interface {
State() (*ObservableState, error)
Update(*ObservableState) error
// ConsumerStore stores state on consumers for streams.
type ConsumerStore interface {
State() (*ConsumerState, error)
Update(*ConsumerState) error
Stop() error
Delete() error
}
// SequencePair has both the observable and the message set sequence. This point to same message.
// SequencePair has both the consumer and the stream sequence. They point to same message.
type SequencePair struct {
ObsSeq uint64 `json:"observable_sequence"`
SetSeq uint64 `json:"msg_set_sequence"`
ConsumerSeq uint64 `json:"consumer_seq"`
StreamSeq uint64 `json:"stream_seq"`
}
// ObservableState represents a stored state for an observable.
type ObservableState struct {
// Delivered keep track of last delivered sequence numbers for both set and observable.
// ConsumerState represents a stored state for a consumer.
type ConsumerState struct {
// Delivered keeps track of last delivered sequence numbers for both the stream and the consumer.
Delivered SequencePair `json:"delivered"`
// AckFloor keeps track of the ack floors for both set and observable.
// AckFloor keeps track of the ack floors for both the stream and the consumer.
AckFloor SequencePair `json:"ack_floor"`
// These are both in set sequence context.
// These are both in stream sequence context.
// Pending is for all messages pending and the timestamp for the delivered time.
// This will only be present when the AckPolicy is ExplicitAck.
Pending map[uint64]int64 `json:"pending"`
// This is for messages that have been redelivered, so count > 1.
Redelivery map[uint64]uint64 `json:"redelivery"`
Redelivered map[uint64]uint64 `json:"redelivered"`
}
// TemplateStore stores templates.
@@ -100,14 +113,14 @@ func jsonString(s string) string {
}
const (
streamPolicyString = "stream_limits"
interestPolicyString = "interest_based"
workQueuePolicyString = "work_queue"
limitsPolicyString = "limits"
interestPolicyString = "interest"
workQueuePolicyString = "workqueue"
)
func (rp RetentionPolicy) String() string {
switch rp {
case StreamPolicy:
case LimitsPolicy:
return "Limits"
case InterestPolicy:
return "Interest"
@@ -120,8 +133,8 @@ func (rp RetentionPolicy) String() string {
func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
switch rp {
case StreamPolicy:
return json.Marshal(streamPolicyString)
case LimitsPolicy:
return json.Marshal(limitsPolicyString)
case InterestPolicy:
return json.Marshal(interestPolicyString)
case WorkQueuePolicy:
@@ -133,8 +146,8 @@ func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(streamPolicyString):
*rp = StreamPolicy
case jsonString(limitsPolicyString):
*rp = LimitsPolicy
case jsonString(interestPolicyString):
*rp = InterestPolicy
case jsonString(workQueuePolicyString):

View File

@@ -23,78 +23,65 @@ import (
"time"
)
// MsgSetConfig will determine the name, subjects and retention policy
// for a given message set. If subjects is empty the name will be used.
type MsgSetConfig struct {
Name string `json:"name"`
Subjects []string `json:"subjects,omitempty"`
Retention RetentionPolicy `json:"retention"`
MaxObservables int `json:"max_observables"`
MaxMsgs int64 `json:"max_msgs"`
MaxBytes int64 `json:"max_bytes"`
MaxAge time.Duration `json:"max_age"`
MaxMsgSize int32 `json:"max_msg_size,omitempty"`
Storage StorageType `json:"storage"`
Replicas int `json:"num_replicas"`
NoAck bool `json:"no_ack,omitempty"`
Template string `json:"template_owner,omitempty"`
// StreamConfig will determine the name, subjects and retention policy
// for a given stream. If subjects is empty the name will be used.
type StreamConfig struct {
Name string `json:"name"`
Subjects []string `json:"subjects,omitempty"`
Retention RetentionPolicy `json:"retention"`
MaxConsumers int `json:"max_consumers"`
MaxMsgs int64 `json:"max_msgs"`
MaxBytes int64 `json:"max_bytes"`
MaxAge time.Duration `json:"max_age"`
MaxMsgSize int32 `json:"max_msg_size,omitempty"`
Storage StorageType `json:"storage"`
Replicas int `json:"num_replicas"`
NoAck bool `json:"no_ack,omitempty"`
Template string `json:"template_owner,omitempty"`
}
type MsgSetInfo struct {
Config MsgSetConfig `json:"config"`
Stats MsgSetStats `json:"stats"`
type StreamInfo struct {
Config StreamConfig `json:"config"`
State StreamState `json:"state"`
}
// RetentionPolicy determines how messages in a set are retained.
type RetentionPolicy int
const (
// StreamPolicy (default) means that messages are retained until any possible given limit is reached.
// This could be any one of MaxMsgs, MaxBytes, or MaxAge.
StreamPolicy RetentionPolicy = iota
// InterestPolicy specifies that when all known observables have acknowledged a message it can be removed.
InterestPolicy
// WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed.
WorkQueuePolicy
)
// MsgSet is a jetstream message set. When we receive a message internally destined
// for a MsgSet we will direct link from the client to this MsgSet structure.
type MsgSet struct {
mu sync.RWMutex
sg *sync.Cond
sgw int
jsa *jsAccount
client *client
sid int
sendq chan *jsPubMsg
store MsgSetStore
obs map[string]*Observable
config MsgSetConfig
// Stream is a jetstream stream of messages. When we receive a message internally destined
// for a Stream we will direct link from the client to this Stream structure.
type Stream struct {
mu sync.RWMutex
sg *sync.Cond
sgw int
jsa *jsAccount
client *client
sid int
sendq chan *jsPubMsg
store StreamStore
consumers map[string]*Consumer
config StreamConfig
}
const (
MsgSetDefaultReplicas = 1
MsgSetMaxReplicas = 8
StreamDefaultReplicas = 1
StreamMaxReplicas = 8
)
// AddMsgSet adds a JetStream message set for the given account.
func (a *Account) AddMsgSet(config *MsgSetConfig) (*MsgSet, error) {
// AddStream adds a stream for the given account.
func (a *Account) AddStream(config *StreamConfig) (*Stream, error) {
s, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
// Sensible defaults.
cfg, err := checkMsgSetCfg(config)
cfg, err := checkStreamCfg(config)
if err != nil {
return nil, err
}
jsa.mu.Lock()
if _, ok := jsa.msgSets[cfg.Name]; ok {
if _, ok := jsa.streams[cfg.Name]; ok {
jsa.mu.Unlock()
return nil, fmt.Errorf("message set name already in use")
return nil, fmt.Errorf("stream name already in use")
}
// Check for limits.
if err := jsa.checkLimits(&cfg); err != nil {
@@ -105,7 +92,7 @@ func (a *Account) AddMsgSet(config *MsgSetConfig) (*MsgSet, error) {
if cfg.Template != _EMPTY_ && jsa.account != nil {
if !jsa.checkTemplateOwnership(cfg.Template, cfg.Name) {
jsa.mu.Unlock()
return nil, fmt.Errorf("message set not owned by template")
return nil, fmt.Errorf("stream not owned by template")
}
}
@@ -115,15 +102,15 @@ func (a *Account) AddMsgSet(config *MsgSetConfig) (*MsgSet, error) {
// Check for overlapping subjects. These are not allowed for now.
if jsa.subjectsOverlap(cfg.Subjects) {
jsa.mu.Unlock()
return nil, fmt.Errorf("subjects overlap with an existing message set")
return nil, fmt.Errorf("subjects overlap with an existing stream")
}
// Setup the internal client.
c := s.createInternalJetStreamClient()
mset := &MsgSet{jsa: jsa, config: cfg, client: c, obs: make(map[string]*Observable)}
mset := &Stream{jsa: jsa, config: cfg, client: c, consumers: make(map[string]*Consumer)}
mset.sg = sync.NewCond(&mset.mu)
jsa.msgSets[cfg.Name] = mset
jsa.streams[cfg.Name] = mset
storeDir := path.Join(jsa.storeDir, streamsDir, cfg.Name)
jsa.mu.Unlock()
@@ -139,7 +126,7 @@ func (a *Account) AddMsgSet(config *MsgSetConfig) (*MsgSet, error) {
mset.setupSendCapabilities()
// Setup subscriptions
if err := mset.subscribeToMsgSet(); err != nil {
if err := mset.subscribeToStream(); err != nil {
mset.delete()
return nil, err
}
@@ -150,7 +137,7 @@ func (a *Account) AddMsgSet(config *MsgSetConfig) (*MsgSet, error) {
// Check to see if these subjects overlap with existing subjects.
// Lock should be held.
func (jsa *jsAccount) subjectsOverlap(subjects []string) bool {
for _, mset := range jsa.msgSets {
for _, mset := range jsa.streams {
for _, subj := range mset.config.Subjects {
for _, tsubj := range subjects {
if SubjectsCollide(tsubj, subj) {
@@ -162,14 +149,14 @@ func (jsa *jsAccount) subjectsOverlap(subjects []string) bool {
return false
}
func checkMsgSetCfg(config *MsgSetConfig) (MsgSetConfig, error) {
func checkStreamCfg(config *StreamConfig) (StreamConfig, error) {
if config == nil {
return MsgSetConfig{}, fmt.Errorf("message set configuration invalid")
return StreamConfig{}, fmt.Errorf("stream configuration invalid")
}
if len(config.Name) == 0 || strings.ContainsAny(config.Name, "*>") {
//if !isValidName(config.Name) {
return MsgSetConfig{}, fmt.Errorf("message set name is required and can not contain '*', '>'")
return StreamConfig{}, fmt.Errorf("stream name is required and can not contain '*', '>'")
}
cfg := *config
@@ -178,8 +165,8 @@ func checkMsgSetCfg(config *MsgSetConfig) (MsgSetConfig, error) {
if cfg.Replicas == 0 {
cfg.Replicas = 1
}
if cfg.Replicas > MsgSetMaxReplicas {
return cfg, fmt.Errorf("maximum replicas is %d", MsgSetMaxReplicas)
if cfg.Replicas > StreamMaxReplicas {
return cfg, fmt.Errorf("maximum replicas is %d", StreamMaxReplicas)
}
if cfg.MaxMsgs == 0 {
cfg.MaxMsgs = -1
@@ -193,15 +180,15 @@ func checkMsgSetCfg(config *MsgSetConfig) (MsgSetConfig, error) {
return cfg, nil
}
// Config returns the message set's configuration.
func (mset *MsgSet) Config() MsgSetConfig {
// Config returns the stream's configuration.
func (mset *Stream) Config() StreamConfig {
mset.mu.Lock()
defer mset.mu.Unlock()
return mset.config
}
// Delete deletes a message set from the owning account.
func (mset *MsgSet) Delete() error {
// Delete deletes a stream from the owning account.
func (mset *Stream) Delete() error {
mset.mu.Lock()
jsa := mset.jsa
mset.mu.Unlock()
@@ -209,23 +196,23 @@ func (mset *MsgSet) Delete() error {
return fmt.Errorf("jetstream not enabled for account")
}
jsa.mu.Lock()
delete(jsa.msgSets, mset.config.Name)
delete(jsa.streams, mset.config.Name)
jsa.mu.Unlock()
return mset.delete()
}
// Purge will remove all messages from the message set and underlying store.
func (mset *MsgSet) Purge() uint64 {
// Purge will remove all messages from the stream and underlying store.
func (mset *Stream) Purge() uint64 {
mset.mu.Lock()
if mset.client == nil {
mset.mu.Unlock()
return 0
}
purged := mset.store.Purge()
stats := mset.store.Stats()
var obs []*Observable
for _, o := range mset.obs {
stats := mset.store.State()
var obs []*Consumer
for _, o := range mset.consumers {
obs = append(obs, o)
}
mset.mu.Unlock()
@@ -235,25 +222,25 @@ func (mset *MsgSet) Purge() uint64 {
return purged
}
// RemoveMsg will remove a message from a message set.
// RemoveMsg will remove a message from a stream.
// FIXME(dlc) - Should pick one and be consistent.
func (mset *MsgSet) RemoveMsg(seq uint64) bool {
func (mset *Stream) RemoveMsg(seq uint64) bool {
return mset.store.RemoveMsg(seq)
}
// DeleteMsg will remove a message from a message set.
func (mset *MsgSet) DeleteMsg(seq uint64) bool {
// DeleteMsg will remove a message from a stream.
func (mset *Stream) DeleteMsg(seq uint64) bool {
return mset.store.RemoveMsg(seq)
}
// EraseMsg will securely remove a message and rewrite the data with random data.
func (mset *MsgSet) EraseMsg(seq uint64) bool {
func (mset *Stream) EraseMsg(seq uint64) bool {
return mset.store.EraseMsg(seq)
}
// Will create internal subscriptions for the msgSet.
// Lock should be held.
func (mset *MsgSet) subscribeToMsgSet() error {
func (mset *Stream) subscribeToStream() error {
for _, subject := range mset.config.Subjects {
if _, err := mset.subscribeInternal(subject, mset.processInboundJetStreamMsg); err != nil {
return err
@@ -269,14 +256,14 @@ func (mset *MsgSet) subscribeToMsgSet() error {
}
// FIXME(dlc) - This only works in single server mode for the moment. Need to fix as we expand to clusters.
func (mset *MsgSet) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
func (mset *Stream) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return mset.nmsSubscribeInternal(subject, false, cb)
}
func (mset *MsgSet) nmsSubscribeInternal(subject string, internalOnly bool, cb msgHandler) (*subscription, error) {
func (mset *Stream) nmsSubscribeInternal(subject string, internalOnly bool, cb msgHandler) (*subscription, error) {
c := mset.client
if c == nil {
return nil, fmt.Errorf("invalid message set")
return nil, fmt.Errorf("invalid stream")
}
if !c.srv.eventsEnabled() {
return nil, ErrNoSysAccount
@@ -299,14 +286,14 @@ func (mset *MsgSet) nmsSubscribeInternal(subject string, internalOnly bool, cb m
}
// Lock should be held.
func (mset *MsgSet) unsubscribe(sub *subscription) {
func (mset *Stream) unsubscribe(sub *subscription) {
if sub == nil || mset.client == nil {
return
}
mset.client.unsubscribe(mset.client.acc, sub, true, true)
}
func (mset *MsgSet) setupStore(storeDir string) error {
func (mset *Stream) setupStore(storeDir string) error {
mset.mu.Lock()
defer mset.mu.Unlock()
@@ -330,7 +317,7 @@ func (mset *MsgSet) setupStore(storeDir string) error {
}
// processMsgBySeq will return the message at the given sequence, or an -ERR if not found.
func (mset *MsgSet) processMsgBySeq(_ *subscription, _ *client, subject, reply string, msg []byte) {
func (mset *Stream) processMsgBySeq(_ *subscription, _ *client, subject, reply string, msg []byte) {
mset.mu.Lock()
store := mset.store
c := mset.client
@@ -346,7 +333,7 @@ func (mset *MsgSet) processMsgBySeq(_ *subscription, _ *client, subject, reply s
// If no sequence arg assume last sequence we have.
if len(msg) == 0 {
stats := store.Stats()
stats := store.State()
seq = stats.LastSeq
} else {
seq, err = strconv.ParseUint(string(msg), 10, 64)
@@ -375,8 +362,8 @@ func (mset *MsgSet) processMsgBySeq(_ *subscription, _ *client, subject, reply s
mset.sendq <- &jsPubMsg{reply, _EMPTY_, _EMPTY_, response, nil, 0}
}
// processInboundJetStreamMsg handles processing messages bound for a message set.
func (mset *MsgSet) processInboundJetStreamMsg(_ *subscription, _ *client, subject, reply string, msg []byte) {
// processInboundJetStreamMsg handles processing messages bound for a stream.
func (mset *Stream) processInboundJetStreamMsg(_ *subscription, _ *client, subject, reply string, msg []byte) {
mset.mu.Lock()
store := mset.store
c := mset.client
@@ -406,7 +393,7 @@ func (mset *MsgSet) processInboundJetStreamMsg(_ *subscription, _ *client, subje
// Check to see if we are over the account limit.
seq, err = store.StoreMsg(subject, msg)
if err != nil {
c.Errorf("JetStream failed to store a msg on account: %q message set: %q - %v", accName, name, err)
c.Errorf("JetStream failed to store a msg on account: %q stream: %q - %v", accName, name, err)
response = []byte(fmt.Sprintf("-ERR '%s'", err.Error()))
} else if jsa.limitsExceeded(stype) {
c.Warnf("JetStream resource limits exceeded for account: %q", accName)
@@ -424,7 +411,7 @@ func (mset *MsgSet) processInboundJetStreamMsg(_ *subscription, _ *client, subje
if err == nil && seq > 0 {
var needSignal bool
mset.mu.Lock()
for _, o := range mset.obs {
for _, o := range mset.consumers {
if !o.deliverCurrentMsg(subject, msg, seq) {
needSignal = true
}
@@ -432,13 +419,13 @@ func (mset *MsgSet) processInboundJetStreamMsg(_ *subscription, _ *client, subje
mset.mu.Unlock()
if needSignal {
mset.signalObservers()
mset.signalConsumers()
}
}
}
// Will signal all waiting observables.
func (mset *MsgSet) signalObservers() {
// Will signal all waiting consumers.
func (mset *Stream) signalConsumers() {
mset.mu.Lock()
if mset.sgw > 0 {
mset.sg.Broadcast()
@@ -452,11 +439,11 @@ type jsPubMsg struct {
dsubj string
reply string
msg []byte
o *Observable
o *Consumer
seq uint64
}
// StoredMsg is for raw access to messages in a message set.
// StoredMsg is for raw access to messages in a stream.
type StoredMsg struct {
Subject string `json:"subject"`
Sequence uint64 `json:"seq"`
@@ -469,7 +456,7 @@ const msetSendQSize = 1024
// This is similar to system semantics but did not want to overload the single system sendq,
// or require system account when doing simple setup with jetstream.
func (mset *MsgSet) setupSendCapabilities() {
func (mset *Stream) setupSendCapabilities() {
mset.mu.Lock()
defer mset.mu.Unlock()
if mset.sendq != nil {
@@ -479,14 +466,14 @@ func (mset *MsgSet) setupSendCapabilities() {
go mset.internalSendLoop()
}
// Name returns the message set name.
func (mset *MsgSet) Name() string {
// Name returns the stream name.
func (mset *Stream) Name() string {
mset.mu.Lock()
defer mset.mu.Unlock()
return mset.config.Name
}
func (mset *MsgSet) internalSendLoop() {
func (mset *Stream) internalSendLoop() {
mset.mu.Lock()
c := mset.client
if c == nil {
@@ -505,7 +492,7 @@ func (mset *MsgSet) internalSendLoop() {
for {
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Jetstream internal send queue > 75% for account: %q message set: %q", c.acc.Name, name)
s.Warnf("Jetstream internal send queue > 75% for account: %q stream: %q", c.acc.Name, name)
last = time.Now()
}
select {
@@ -533,13 +520,13 @@ func (mset *MsgSet) internalSendLoop() {
}
}
// Internal function to delete a message set.
func (mset *MsgSet) delete() error {
// Internal function to delete a stream.
func (mset *Stream) delete() error {
return mset.stop(true)
}
// Internal function to stop or delete the message set.
func (mset *MsgSet) stop(delete bool) error {
// Internal function to stop or delete the stream.
func (mset *Stream) stop(delete bool) error {
mset.mu.Lock()
if mset.sendq != nil {
mset.sendq <- nil
@@ -550,11 +537,11 @@ func (mset *MsgSet) stop(delete bool) error {
mset.mu.Unlock()
return nil
}
var obs []*Observable
for _, o := range mset.obs {
var obs []*Consumer
for _, o := range mset.consumers {
obs = append(obs, o)
}
mset.obs = nil
mset.consumers = nil
mset.mu.Unlock()
c.closeConnection(ClientClosed)
@@ -584,31 +571,31 @@ func (mset *MsgSet) stop(delete bool) error {
return nil
}
// Observables will return all the current observables for this message set.
func (mset *MsgSet) Observables() []*Observable {
// Consunmers will return all the current consumers for this stream.
func (mset *Stream) Consumers() []*Consumer {
mset.mu.Lock()
defer mset.mu.Unlock()
var obs []*Observable
for _, o := range mset.obs {
var obs []*Consumer
for _, o := range mset.consumers {
obs = append(obs, o)
}
return obs
}
// NumObservables reports on number of active observables for this message set.
func (mset *MsgSet) NumObservables() int {
// NumConsumers reports on number of active observables for this stream.
func (mset *Stream) NumConsumers() int {
mset.mu.Lock()
defer mset.mu.Unlock()
return len(mset.obs)
return len(mset.consumers)
}
// LookupObservable will retrieve an observable by name.
func (mset *MsgSet) LookupObservable(name string) *Observable {
// LookupConsumer will retrieve a consumer by name.
func (mset *Stream) LookupConsumer(name string) *Consumer {
mset.mu.Lock()
defer mset.mu.Unlock()
for _, o := range mset.obs {
for _, o := range mset.consumers {
if o.name == name {
return o
}
@@ -616,21 +603,21 @@ func (mset *MsgSet) LookupObservable(name string) *Observable {
return nil
}
// Stats will return the current stats for this message set.
func (mset *MsgSet) Stats() MsgSetStats {
// State will return the current state for this stream.
func (mset *Stream) State() StreamState {
mset.mu.Lock()
c := mset.client
mset.mu.Unlock()
if c == nil {
return MsgSetStats{}
return StreamState{}
}
// Currently rely on store.
// TODO(dlc) - This will need to change with clusters.
return mset.store.Stats()
return mset.store.State()
}
// waitForMsgs will have the message set wait for the arrival of new messages.
func (mset *MsgSet) waitForMsgs() {
// waitForMsgs will have the stream wait for the arrival of new messages.
func (mset *Stream) waitForMsgs() {
mset.mu.Lock()
if mset.client == nil {
@@ -647,8 +634,8 @@ func (mset *MsgSet) waitForMsgs() {
// Determines if the new proposed partition is unique amongst all observables.
// Lock should be held.
func (mset *MsgSet) partitionUnique(partition string) bool {
for _, o := range mset.obs {
func (mset *Stream) partitionUnique(partition string) bool {
for _, o := range mset.consumers {
if o.config.FilterSubject == _EMPTY_ {
return false
}
@@ -660,16 +647,16 @@ func (mset *MsgSet) partitionUnique(partition string) bool {
}
// ackMsg is called into from an observable when we have a WorkQueue or Interest retention policy.
func (mset *MsgSet) ackMsg(obs *Observable, seq uint64) {
func (mset *Stream) ackMsg(obs *Consumer, seq uint64) {
switch mset.config.Retention {
case StreamPolicy:
case LimitsPolicy:
return
case WorkQueuePolicy:
mset.store.RemoveMsg(seq)
case InterestPolicy:
var needAck bool
mset.mu.Lock()
for _, o := range mset.obs {
for _, o := range mset.consumers {
if o != obs && o.needAck(seq) {
needAck = true
break

View File

@@ -1407,9 +1407,9 @@ func Benchmark_JetStreamPubWithAck(b *testing.B) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: "foo"})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "foo"})
if err != nil {
b.Fatalf("Unexpected error adding message set: %v", err)
b.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
@@ -1425,9 +1425,9 @@ func Benchmark_JetStreamPubWithAck(b *testing.B) {
}
b.StopTimer()
stats := mset.Stats()
if int(stats.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, stats.Msgs)
state := mset.State()
if int(state.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs)
}
}
@@ -1435,9 +1435,9 @@ func Benchmark_JetStreamPubNoAck(b *testing.B) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: "foo"})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "foo"})
if err != nil {
b.Fatalf("Unexpected error adding message set: %v", err)
b.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
@@ -1456,9 +1456,9 @@ func Benchmark_JetStreamPubNoAck(b *testing.B) {
nc.Flush()
b.StopTimer()
stats := mset.Stats()
if int(stats.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, stats.Msgs)
state := mset.State()
if int(state.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs)
}
}
@@ -1466,9 +1466,9 @@ func Benchmark_JetStreamPubAsyncAck(b *testing.B) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: "foo"})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "foo"})
if err != nil {
b.Fatalf("Unexpected error adding message set: %v", err)
b.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
@@ -1505,9 +1505,9 @@ func Benchmark_JetStreamPubAsyncAck(b *testing.B) {
nc.Flush()
b.StopTimer()
stats := mset.Stats()
if int(stats.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, stats.Msgs)
state := mset.State()
if int(state.Msgs) != b.N {
b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs)
}
}
@@ -1520,9 +1520,9 @@ func Benchmark____JetStreamSubNoAck(b *testing.B) {
defer s.Shutdown()
mname := "foo"
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: mname})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname})
if err != nil {
b.Fatalf("Unexpected error adding message set: %v", err)
b.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
@@ -1538,9 +1538,9 @@ func Benchmark____JetStreamSubNoAck(b *testing.B) {
}
nc.Flush()
stats := mset.Stats()
if stats.Msgs != uint64(b.N) {
b.Fatalf("Expected %d messages, got %d", b.N, stats.Msgs)
state := mset.State()
if state.Msgs != uint64(b.N) {
b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs)
}
total := int32(b.N)
@@ -1559,7 +1559,7 @@ func Benchmark____JetStreamSubNoAck(b *testing.B) {
nc.Flush()
b.ResetTimer()
o, err := mset.AddObservable(&server.ObservableConfig{Delivery: deliverTo, Durable: oname, AckPolicy: server.AckNone, DeliverAll: true})
o, err := mset.AddConsumer(&server.ConsumerConfig{Delivery: deliverTo, Durable: oname, AckPolicy: server.AckNone, DeliverAll: true})
if err != nil {
b.Fatalf("Expected no error with registered interest, got %v", err)
}
@@ -1578,9 +1578,9 @@ func benchJetStreamWorkersAndBatch(b *testing.B, numWorkers, batchSize int) {
defer s.Shutdown()
mname := "MSET22"
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: mname})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname})
if err != nil {
b.Fatalf("Unexpected error adding message set: %v", err)
b.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
@@ -1596,14 +1596,14 @@ func benchJetStreamWorkersAndBatch(b *testing.B, numWorkers, batchSize int) {
}
nc.Flush()
stats := mset.Stats()
if stats.Msgs != uint64(b.N) {
b.Fatalf("Expected %d messages, got %d", b.N, stats.Msgs)
state := mset.State()
if state.Msgs != uint64(b.N) {
b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs)
}
// Create basic work queue mode observable.
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddObservable(&server.ObservableConfig{Durable: oname, DeliverAll: true})
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, DeliverAll: true})
if err != nil {
b.Fatalf("Expected no error with registered interest, got %v", err)
}

File diff suppressed because it is too large Load Diff

View File

@@ -541,15 +541,15 @@ func TestJetStreamWorkQueueLoadBalance(t *testing.T) {
defer s.Shutdown()
mname := "MY_MSG_SET"
mset, err := s.GlobalAccount().AddMsgSet(&server.MsgSetConfig{Name: mname, Subjects: []string{"foo", "bar"}})
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Subjects: []string{"foo", "bar"}})
if err != nil {
t.Fatalf("Unexpected error adding message set: %v", err)
}
defer mset.Delete()
// Create basic work queue mode observable.
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddObservable(&server.ObservableConfig{Durable: oname, DeliverAll: true, AckPolicy: server.AckExplicit})
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, DeliverAll: true, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error with durable, got %v", err)
}
@@ -559,7 +559,7 @@ func TestJetStreamWorkQueueLoadBalance(t *testing.T) {
nc := clientConnectToServer(t, s)
defer nc.Close()
// For normal work queue semantics, you send requests to the subject with message set and observable name.
// For normal work queue semantics, you send requests to the subject with stream and consumer name.
reqMsgSubj := o.RequestNextMsgSubject()
numWorkers := 25