mirror of
https://github.com/gogrlx/nats-server.git
synced 2026-04-13 17:58:00 -07:00
----------------------------------------------------------------
Backward-incompatibility note:
Varz used to embed *Info and *Options which are other server objects.
However, Info is a struct that servers used to send protocols to other
servers or clients and its content must contain json tags since we
need to marshal those to be sent over. The problem is that it made
those fields now accessible to users calling Varz() and also visible
to the http /varz output. Some fields in Info were introduced in the
2.0 branch that clashed with json tag in Options, which made cluster{}
for instance disappear in the /varz output - because a Cluster string
in Info has the same json tag, and Cluster in Info is empty in some
cases.
For users that embed NATS and were using Server.Varz() directly,
without the use of the monitoring endpoint, they were then given
access (which was not the intent) to server internals (Info and Options).
Fields that were in Info or Options or directly in Varz that did not
clash with each other could be referenced directly, for instace, this
is you could access the server ID:
v, _ := s.Varz(nil)
fmt.Println(v.ID)
Another way would be:
fmt.Println(v.Info.ID)
Same goes for fields that were brought from embedding the Options:
fmt.Println(v.MaxConn)
or
fmt.Println(v.Options.MaxConn)
We have decided to explicitly define fields in Varz, which means
that if you previously accessed fields through v.Info or v.Options,
you will have to update your code to use the corresponding field
directly: v.ID or v.MaxConn for instance.
So fields were also duplicated between Info/Options and Varz itself
so depending on which one your application was accessing, you may
have to update your code.
---------------------------------------------------------------
Other issues that have been fixed is races that were introduced
by the fact that the creation of a Varz object (pointing to
some server data) was done under server lock, but marshaling not
being done under that lock caused races.
The fact that object returned to user through Server.Varz() also
had references to server internal objects had to be fixed by
returning deep copy of those internal objects.
Signed-off-by: Ivan Kozlovic <ivan@synadia.com>
1248 lines
35 KiB
Go
1248 lines
35 KiB
Go
// Copyright 2013-2019 The NATS Authors
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package server
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"encoding/json"
|
|
"fmt"
|
|
"net"
|
|
"net/http"
|
|
"runtime"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/nats-io/nats-server/server/pse"
|
|
)
|
|
|
|
// Snapshot this
|
|
var numCores int
|
|
|
|
func init() {
|
|
numCores = runtime.NumCPU()
|
|
}
|
|
|
|
// Connz represents detailed information on current client connections.
|
|
type Connz struct {
|
|
ID string `json:"server_id"`
|
|
Now time.Time `json:"now"`
|
|
NumConns int `json:"num_connections"`
|
|
Total int `json:"total"`
|
|
Offset int `json:"offset"`
|
|
Limit int `json:"limit"`
|
|
Conns []*ConnInfo `json:"connections"`
|
|
}
|
|
|
|
// ConnzOptions are the options passed to Connz()
|
|
type ConnzOptions struct {
|
|
// Sort indicates how the results will be sorted. Check SortOpt for possible values.
|
|
// Only the sort by connection ID (ByCid) is ascending, all others are descending.
|
|
Sort SortOpt `json:"sort"`
|
|
|
|
// Username indicates if user names should be included in the results.
|
|
Username bool `json:"auth"`
|
|
|
|
// Subscriptions indicates if subscriptions should be included in the results.
|
|
Subscriptions bool `json:"subscriptions"`
|
|
|
|
// Offset is used for pagination. Connz() only returns connections starting at this
|
|
// offset from the global results.
|
|
Offset int `json:"offset"`
|
|
|
|
// Limit is the maximum number of connections that should be returned by Connz().
|
|
Limit int `json:"limit"`
|
|
|
|
// Filter for this explicit client connection.
|
|
CID uint64 `json:"cid"`
|
|
|
|
// Filter by connection state.
|
|
State ConnState `json:"state"`
|
|
}
|
|
|
|
// ConnState is for filtering states of connections. We will only have two, open and closed.
|
|
type ConnState int
|
|
|
|
const (
|
|
// ConnOpen filters on open clients.
|
|
ConnOpen = ConnState(iota)
|
|
// ConnClosed filters on closed clients.
|
|
ConnClosed
|
|
// ConnAll returns all clients.
|
|
ConnAll
|
|
)
|
|
|
|
// ConnInfo has detailed information on a per connection basis.
|
|
type ConnInfo struct {
|
|
Cid uint64 `json:"cid"`
|
|
IP string `json:"ip"`
|
|
Port int `json:"port"`
|
|
Start time.Time `json:"start"`
|
|
LastActivity time.Time `json:"last_activity"`
|
|
Stop *time.Time `json:"stop,omitempty"`
|
|
Reason string `json:"reason,omitempty"`
|
|
RTT string `json:"rtt,omitempty"`
|
|
Uptime string `json:"uptime"`
|
|
Idle string `json:"idle"`
|
|
Pending int `json:"pending_bytes"`
|
|
InMsgs int64 `json:"in_msgs"`
|
|
OutMsgs int64 `json:"out_msgs"`
|
|
InBytes int64 `json:"in_bytes"`
|
|
OutBytes int64 `json:"out_bytes"`
|
|
NumSubs uint32 `json:"subscriptions"`
|
|
Name string `json:"name,omitempty"`
|
|
Lang string `json:"lang,omitempty"`
|
|
Version string `json:"version,omitempty"`
|
|
TLSVersion string `json:"tls_version,omitempty"`
|
|
TLSCipher string `json:"tls_cipher_suite,omitempty"`
|
|
AuthorizedUser string `json:"authorized_user,omitempty"`
|
|
Subs []string `json:"subscriptions_list,omitempty"`
|
|
}
|
|
|
|
// DefaultConnListSize is the default size of the connection list.
|
|
const DefaultConnListSize = 1024
|
|
|
|
// DefaultSubListSize is the default size of the subscriptions list.
|
|
const DefaultSubListSize = 1024
|
|
|
|
const defaultStackBufSize = 10000
|
|
|
|
// Connz returns a Connz struct containing inormation about connections.
|
|
func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) {
|
|
var (
|
|
sortOpt = ByCid
|
|
auth bool
|
|
subs bool
|
|
offset int
|
|
limit = DefaultConnListSize
|
|
cid = uint64(0)
|
|
state = ConnOpen
|
|
)
|
|
|
|
if opts != nil {
|
|
// If no sort option given or sort is by uptime, then sort by cid
|
|
if opts.Sort == "" {
|
|
sortOpt = ByCid
|
|
} else {
|
|
sortOpt = opts.Sort
|
|
if !sortOpt.IsValid() {
|
|
return nil, fmt.Errorf("invalid sorting option: %s", sortOpt)
|
|
}
|
|
}
|
|
auth = opts.Username
|
|
subs = opts.Subscriptions
|
|
offset = opts.Offset
|
|
if offset < 0 {
|
|
offset = 0
|
|
}
|
|
limit = opts.Limit
|
|
if limit <= 0 {
|
|
limit = DefaultConnListSize
|
|
}
|
|
// state
|
|
state = opts.State
|
|
|
|
// ByStop only makes sense on closed connections
|
|
if sortOpt == ByStop && state != ConnClosed {
|
|
return nil, fmt.Errorf("sort by stop only valid on closed connections")
|
|
}
|
|
// ByReason is the same.
|
|
if sortOpt == ByReason && state != ConnClosed {
|
|
return nil, fmt.Errorf("sort by reason only valid on closed connections")
|
|
}
|
|
|
|
// If searching by CID
|
|
if opts.CID > 0 {
|
|
cid = opts.CID
|
|
limit = 1
|
|
}
|
|
}
|
|
|
|
c := &Connz{
|
|
Offset: offset,
|
|
Limit: limit,
|
|
Now: time.Now(),
|
|
}
|
|
|
|
// Open clients
|
|
var openClients []*client
|
|
// Hold for closed clients if requested.
|
|
var closedClients []*closedClient
|
|
|
|
// Walk the open client list with server lock held.
|
|
s.mu.Lock()
|
|
|
|
// copy the server id for monitoring
|
|
c.ID = s.info.ID
|
|
|
|
// Number of total clients. The resulting ConnInfo array
|
|
// may be smaller if pagination is used.
|
|
switch state {
|
|
case ConnOpen:
|
|
c.Total = len(s.clients)
|
|
case ConnClosed:
|
|
c.Total = s.closed.len()
|
|
closedClients = s.closed.closedClients()
|
|
c.Total = len(closedClients)
|
|
case ConnAll:
|
|
closedClients = s.closed.closedClients()
|
|
c.Total = len(s.clients) + len(closedClients)
|
|
}
|
|
|
|
totalClients := c.Total
|
|
if cid > 0 { // Meaning we only want 1.
|
|
totalClients = 1
|
|
}
|
|
if state == ConnOpen || state == ConnAll {
|
|
openClients = make([]*client, 0, totalClients)
|
|
}
|
|
|
|
// Data structures for results.
|
|
var conns []ConnInfo // Limits allocs for actual ConnInfos.
|
|
var pconns ConnInfos
|
|
|
|
switch state {
|
|
case ConnOpen:
|
|
conns = make([]ConnInfo, totalClients)
|
|
pconns = make(ConnInfos, totalClients)
|
|
case ConnClosed:
|
|
pconns = make(ConnInfos, totalClients)
|
|
case ConnAll:
|
|
conns = make([]ConnInfo, cap(openClients))
|
|
pconns = make(ConnInfos, totalClients)
|
|
}
|
|
|
|
// Search by individual CID.
|
|
if cid > 0 {
|
|
if state == ConnClosed || state == ConnAll {
|
|
copyClosed := closedClients
|
|
closedClients = nil
|
|
for _, cc := range copyClosed {
|
|
if cc.Cid == cid {
|
|
closedClients = []*closedClient{cc}
|
|
break
|
|
}
|
|
}
|
|
} else if state == ConnOpen || state == ConnAll {
|
|
client := s.clients[cid]
|
|
if client != nil {
|
|
openClients = append(openClients, client)
|
|
}
|
|
}
|
|
} else {
|
|
// Gather all open clients.
|
|
if state == ConnOpen || state == ConnAll {
|
|
for _, client := range s.clients {
|
|
openClients = append(openClients, client)
|
|
}
|
|
}
|
|
}
|
|
s.mu.Unlock()
|
|
|
|
// Just return with empty array if nothing here.
|
|
if len(openClients) == 0 && len(closedClients) == 0 {
|
|
c.Conns = ConnInfos{}
|
|
return c, nil
|
|
}
|
|
|
|
// Now whip through and generate ConnInfo entries
|
|
|
|
// Open Clients
|
|
i := 0
|
|
for _, client := range openClients {
|
|
client.mu.Lock()
|
|
ci := &conns[i]
|
|
ci.fill(client, client.nc, c.Now)
|
|
// Fill in subscription data if requested.
|
|
if subs && len(client.subs) > 0 {
|
|
ci.Subs = make([]string, 0, len(client.subs))
|
|
for _, sub := range client.subs {
|
|
ci.Subs = append(ci.Subs, string(sub.subject))
|
|
}
|
|
}
|
|
// Fill in user if auth requested.
|
|
if auth {
|
|
ci.AuthorizedUser = client.opts.Username
|
|
}
|
|
client.mu.Unlock()
|
|
pconns[i] = ci
|
|
i++
|
|
}
|
|
// Closed Clients
|
|
var needCopy bool
|
|
if subs || auth {
|
|
needCopy = true
|
|
}
|
|
for _, cc := range closedClients {
|
|
// Copy if needed for any changes to the ConnInfo
|
|
if needCopy {
|
|
cx := *cc
|
|
cc = &cx
|
|
}
|
|
// Fill in subscription data if requested.
|
|
if subs && len(cc.subs) > 0 {
|
|
cc.Subs = cc.subs
|
|
}
|
|
// Fill in user if auth requested.
|
|
if auth {
|
|
cc.AuthorizedUser = cc.user
|
|
}
|
|
pconns[i] = &cc.ConnInfo
|
|
i++
|
|
}
|
|
|
|
switch sortOpt {
|
|
case ByCid, ByStart:
|
|
sort.Sort(byCid{pconns})
|
|
case BySubs:
|
|
sort.Sort(sort.Reverse(bySubs{pconns}))
|
|
case ByPending:
|
|
sort.Sort(sort.Reverse(byPending{pconns}))
|
|
case ByOutMsgs:
|
|
sort.Sort(sort.Reverse(byOutMsgs{pconns}))
|
|
case ByInMsgs:
|
|
sort.Sort(sort.Reverse(byInMsgs{pconns}))
|
|
case ByOutBytes:
|
|
sort.Sort(sort.Reverse(byOutBytes{pconns}))
|
|
case ByInBytes:
|
|
sort.Sort(sort.Reverse(byInBytes{pconns}))
|
|
case ByLast:
|
|
sort.Sort(sort.Reverse(byLast{pconns}))
|
|
case ByIdle:
|
|
sort.Sort(sort.Reverse(byIdle{pconns}))
|
|
case ByUptime:
|
|
sort.Sort(byUptime{pconns, time.Now()})
|
|
case ByStop:
|
|
sort.Sort(sort.Reverse(byStop{pconns}))
|
|
case ByReason:
|
|
sort.Sort(byReason{pconns})
|
|
}
|
|
|
|
minoff := c.Offset
|
|
maxoff := c.Offset + c.Limit
|
|
|
|
maxIndex := totalClients
|
|
|
|
// Make sure these are sane.
|
|
if minoff > maxIndex {
|
|
minoff = maxIndex
|
|
}
|
|
if maxoff > maxIndex {
|
|
maxoff = maxIndex
|
|
}
|
|
|
|
// Now pare down to the requested size.
|
|
// TODO(dlc) - for very large number of connections we
|
|
// could save the whole list in a hash, send hash on first
|
|
// request and allow users to use has for subsequent pages.
|
|
// Low TTL, say < 1sec.
|
|
c.Conns = pconns[minoff:maxoff]
|
|
c.NumConns = len(c.Conns)
|
|
|
|
return c, nil
|
|
}
|
|
|
|
// Fills in the ConnInfo from the client.
|
|
// client should be locked.
|
|
func (ci *ConnInfo) fill(client *client, nc net.Conn, now time.Time) {
|
|
ci.Cid = client.cid
|
|
ci.Start = client.start
|
|
ci.LastActivity = client.last
|
|
ci.Uptime = myUptime(now.Sub(client.start))
|
|
ci.Idle = myUptime(now.Sub(client.last))
|
|
ci.RTT = client.getRTT()
|
|
ci.OutMsgs = client.outMsgs
|
|
ci.OutBytes = client.outBytes
|
|
ci.NumSubs = uint32(len(client.subs))
|
|
ci.Pending = int(client.out.pb)
|
|
ci.Name = client.opts.Name
|
|
ci.Lang = client.opts.Lang
|
|
ci.Version = client.opts.Version
|
|
// inMsgs and inBytes are updated outside of the client's lock, so
|
|
// we need to use atomic here.
|
|
ci.InMsgs = atomic.LoadInt64(&client.inMsgs)
|
|
ci.InBytes = atomic.LoadInt64(&client.inBytes)
|
|
|
|
// If the connection is gone, too bad, we won't set TLSVersion and TLSCipher.
|
|
// Exclude clients that are still doing handshake so we don't block in
|
|
// ConnectionState().
|
|
if client.flags.isSet(handshakeComplete) && nc != nil {
|
|
conn := nc.(*tls.Conn)
|
|
cs := conn.ConnectionState()
|
|
ci.TLSVersion = tlsVersion(cs.Version)
|
|
ci.TLSCipher = tlsCipher(cs.CipherSuite)
|
|
}
|
|
|
|
if client.port != 0 {
|
|
ci.Port = int(client.port)
|
|
ci.IP = client.host
|
|
}
|
|
}
|
|
|
|
// Assume lock is held
|
|
func (c *client) getRTT() string {
|
|
if c.rtt == 0 {
|
|
// If a real client, go ahead and send ping now to get a value
|
|
// for RTT. For tests and telnet, or if client is closing, etc skip.
|
|
if !c.flags.isSet(clearConnection) && c.flags.isSet(connectReceived) && c.opts.Lang != "" {
|
|
c.sendPing()
|
|
}
|
|
return ""
|
|
}
|
|
var rtt time.Duration
|
|
if c.rtt > time.Microsecond && c.rtt < time.Millisecond {
|
|
rtt = c.rtt.Truncate(time.Microsecond)
|
|
} else {
|
|
rtt = c.rtt.Truncate(time.Millisecond)
|
|
}
|
|
return rtt.String()
|
|
}
|
|
|
|
func decodeBool(w http.ResponseWriter, r *http.Request, param string) (bool, error) {
|
|
str := r.URL.Query().Get(param)
|
|
if str == "" {
|
|
return false, nil
|
|
}
|
|
val, err := strconv.ParseBool(str)
|
|
if err != nil {
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
w.Write([]byte(fmt.Sprintf("Error decoding boolean for '%s': %v", param, err)))
|
|
return false, err
|
|
}
|
|
return val, nil
|
|
}
|
|
|
|
func decodeUint64(w http.ResponseWriter, r *http.Request, param string) (uint64, error) {
|
|
str := r.URL.Query().Get(param)
|
|
if str == "" {
|
|
return 0, nil
|
|
}
|
|
val, err := strconv.ParseUint(str, 10, 64)
|
|
if err != nil {
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
w.Write([]byte(fmt.Sprintf("Error decoding uint64 for '%s': %v", param, err)))
|
|
return 0, err
|
|
}
|
|
return val, nil
|
|
}
|
|
|
|
func decodeInt(w http.ResponseWriter, r *http.Request, param string) (int, error) {
|
|
str := r.URL.Query().Get(param)
|
|
if str == "" {
|
|
return 0, nil
|
|
}
|
|
val, err := strconv.Atoi(str)
|
|
if err != nil {
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
w.Write([]byte(fmt.Sprintf("Error decoding int for '%s': %v", param, err)))
|
|
return 0, err
|
|
}
|
|
return val, nil
|
|
}
|
|
|
|
func decodeState(w http.ResponseWriter, r *http.Request) (ConnState, error) {
|
|
str := r.URL.Query().Get("state")
|
|
if str == "" {
|
|
return ConnOpen, nil
|
|
}
|
|
switch strings.ToLower(str) {
|
|
case "open":
|
|
return ConnOpen, nil
|
|
case "closed":
|
|
return ConnClosed, nil
|
|
case "any", "all":
|
|
return ConnAll, nil
|
|
}
|
|
// We do not understand intended state here.
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
err := fmt.Errorf("Error decoding state for %s", str)
|
|
w.Write([]byte(err.Error()))
|
|
return 0, err
|
|
}
|
|
|
|
// HandleConnz process HTTP requests for connection information.
|
|
func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) {
|
|
sortOpt := SortOpt(r.URL.Query().Get("sort"))
|
|
auth, err := decodeBool(w, r, "auth")
|
|
if err != nil {
|
|
return
|
|
}
|
|
subs, err := decodeBool(w, r, "subs")
|
|
if err != nil {
|
|
return
|
|
}
|
|
offset, err := decodeInt(w, r, "offset")
|
|
if err != nil {
|
|
return
|
|
}
|
|
limit, err := decodeInt(w, r, "limit")
|
|
if err != nil {
|
|
return
|
|
}
|
|
cid, err := decodeUint64(w, r, "cid")
|
|
if err != nil {
|
|
return
|
|
}
|
|
state, err := decodeState(w, r)
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
connzOpts := &ConnzOptions{
|
|
Sort: sortOpt,
|
|
Username: auth,
|
|
Subscriptions: subs,
|
|
Offset: offset,
|
|
Limit: limit,
|
|
CID: cid,
|
|
State: state,
|
|
}
|
|
|
|
s.mu.Lock()
|
|
s.httpReqStats[ConnzPath]++
|
|
s.mu.Unlock()
|
|
|
|
c, err := s.Connz(connzOpts)
|
|
if err != nil {
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
w.Write([]byte(err.Error()))
|
|
return
|
|
}
|
|
b, err := json.MarshalIndent(c, "", " ")
|
|
if err != nil {
|
|
s.Errorf("Error marshaling response to /connz request: %v", err)
|
|
}
|
|
|
|
// Handle response
|
|
ResponseHandler(w, r, b)
|
|
}
|
|
|
|
// Routez represents detailed information on current client connections.
|
|
type Routez struct {
|
|
ID string `json:"server_id"`
|
|
Now time.Time `json:"now"`
|
|
Import *SubjectPermission `json:"import,omitempty"`
|
|
Export *SubjectPermission `json:"export,omitempty"`
|
|
NumRoutes int `json:"num_routes"`
|
|
Routes []*RouteInfo `json:"routes"`
|
|
}
|
|
|
|
// RoutezOptions are options passed to Routez
|
|
type RoutezOptions struct {
|
|
// Subscriptions indicates that Routez will return a route's subscriptions
|
|
Subscriptions bool `json:"subscriptions"`
|
|
}
|
|
|
|
// RouteInfo has detailed information on a per connection basis.
|
|
type RouteInfo struct {
|
|
Rid uint64 `json:"rid"`
|
|
RemoteID string `json:"remote_id"`
|
|
DidSolicit bool `json:"did_solicit"`
|
|
IsConfigured bool `json:"is_configured"`
|
|
IP string `json:"ip"`
|
|
Port int `json:"port"`
|
|
Import *SubjectPermission `json:"import,omitempty"`
|
|
Export *SubjectPermission `json:"export,omitempty"`
|
|
Pending int `json:"pending_size"`
|
|
InMsgs int64 `json:"in_msgs"`
|
|
OutMsgs int64 `json:"out_msgs"`
|
|
InBytes int64 `json:"in_bytes"`
|
|
OutBytes int64 `json:"out_bytes"`
|
|
NumSubs uint32 `json:"subscriptions"`
|
|
Subs []string `json:"subscriptions_list,omitempty"`
|
|
}
|
|
|
|
// Routez returns a Routez struct containing inormation about routes.
|
|
func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) {
|
|
rs := &Routez{Routes: []*RouteInfo{}}
|
|
rs.Now = time.Now()
|
|
|
|
subs := routezOpts != nil && routezOpts.Subscriptions
|
|
|
|
s.mu.Lock()
|
|
rs.NumRoutes = len(s.routes)
|
|
|
|
// copy the server id for monitoring
|
|
rs.ID = s.info.ID
|
|
|
|
// Check for defined permissions for all connected routes.
|
|
if perms := s.getOpts().Cluster.Permissions; perms != nil {
|
|
rs.Import = perms.Import
|
|
rs.Export = perms.Export
|
|
}
|
|
|
|
// Walk the list
|
|
for _, r := range s.routes {
|
|
r.mu.Lock()
|
|
ri := &RouteInfo{
|
|
Rid: r.cid,
|
|
RemoteID: r.route.remoteID,
|
|
DidSolicit: r.route.didSolicit,
|
|
IsConfigured: r.route.routeType == Explicit,
|
|
InMsgs: atomic.LoadInt64(&r.inMsgs),
|
|
OutMsgs: r.outMsgs,
|
|
InBytes: atomic.LoadInt64(&r.inBytes),
|
|
OutBytes: r.outBytes,
|
|
NumSubs: uint32(len(r.subs)),
|
|
Import: r.opts.Import,
|
|
Export: r.opts.Export,
|
|
}
|
|
|
|
if subs && len(r.subs) > 0 {
|
|
ri.Subs = make([]string, 0, len(r.subs))
|
|
for _, sub := range r.subs {
|
|
ri.Subs = append(ri.Subs, string(sub.subject))
|
|
}
|
|
}
|
|
switch conn := r.nc.(type) {
|
|
case *net.TCPConn, *tls.Conn:
|
|
addr := conn.RemoteAddr().(*net.TCPAddr)
|
|
ri.Port = addr.Port
|
|
ri.IP = addr.IP.String()
|
|
}
|
|
r.mu.Unlock()
|
|
rs.Routes = append(rs.Routes, ri)
|
|
}
|
|
s.mu.Unlock()
|
|
return rs, nil
|
|
}
|
|
|
|
// HandleRoutez process HTTP requests for route information.
|
|
func (s *Server) HandleRoutez(w http.ResponseWriter, r *http.Request) {
|
|
subs, err := decodeBool(w, r, "subs")
|
|
if err != nil {
|
|
return
|
|
}
|
|
var opts *RoutezOptions
|
|
if subs {
|
|
opts = &RoutezOptions{Subscriptions: true}
|
|
}
|
|
|
|
s.mu.Lock()
|
|
s.httpReqStats[RoutezPath]++
|
|
s.mu.Unlock()
|
|
|
|
// As of now, no error is ever returned.
|
|
rs, _ := s.Routez(opts)
|
|
b, err := json.MarshalIndent(rs, "", " ")
|
|
if err != nil {
|
|
s.Errorf("Error marshaling response to /routez request: %v", err)
|
|
}
|
|
|
|
// Handle response
|
|
ResponseHandler(w, r, b)
|
|
}
|
|
|
|
// Subsz represents detail information on current connections.
|
|
type Subsz struct {
|
|
*SublistStats
|
|
Total int `json:"total"`
|
|
Offset int `json:"offset"`
|
|
Limit int `json:"limit"`
|
|
Subs []SubDetail `json:"subscriptions_list,omitempty"`
|
|
}
|
|
|
|
// SubszOptions are the options passed to Subsz.
|
|
// As of now, there are no options defined.
|
|
type SubszOptions struct {
|
|
// Offset is used for pagination. Subsz() only returns connections starting at this
|
|
// offset from the global results.
|
|
Offset int `json:"offset"`
|
|
|
|
// Limit is the maximum number of subscriptions that should be returned by Subsz().
|
|
Limit int `json:"limit"`
|
|
|
|
// Subscriptions indicates if subscriptions should be included in the results.
|
|
Subscriptions bool `json:"subscriptions"`
|
|
|
|
// Test the list against this subject. Needs to be literal since it signifies a publish subject.
|
|
// We will only return subscriptions that would match if a message was sent to this subject.
|
|
Test string `json:"test,omitempty"`
|
|
}
|
|
|
|
// SubDetail is for verbose information for subscriptions.
|
|
type SubDetail struct {
|
|
Subject string `json:"subject"`
|
|
Queue string `json:"qgroup,omitempty"`
|
|
Sid string `json:"sid"`
|
|
Msgs int64 `json:"msgs"`
|
|
Max int64 `json:"max,omitempty"`
|
|
Cid uint64 `json:"cid"`
|
|
}
|
|
|
|
// Subsz returns a Subsz struct containing subjects statistics
|
|
func (s *Server) Subsz(opts *SubszOptions) (*Subsz, error) {
|
|
var (
|
|
subdetail bool
|
|
test bool
|
|
offset int
|
|
limit = DefaultSubListSize
|
|
testSub = ""
|
|
)
|
|
|
|
if opts != nil {
|
|
subdetail = opts.Subscriptions
|
|
offset = opts.Offset
|
|
if offset < 0 {
|
|
offset = 0
|
|
}
|
|
limit = opts.Limit
|
|
if limit <= 0 {
|
|
limit = DefaultSubListSize
|
|
}
|
|
if opts.Test != "" {
|
|
testSub = opts.Test
|
|
test = true
|
|
if !IsValidLiteralSubject(testSub) {
|
|
return nil, fmt.Errorf("invalid test subject, must be valid publish subject: %s", testSub)
|
|
}
|
|
}
|
|
}
|
|
|
|
// FIXME(dlc) - Make account aware.
|
|
sz := &Subsz{s.gacc.sl.Stats(), 0, offset, limit, nil}
|
|
|
|
if subdetail {
|
|
// Now add in subscription's details
|
|
var raw [4096]*subscription
|
|
subs := raw[:0]
|
|
|
|
s.gacc.sl.localSubs(&subs)
|
|
details := make([]SubDetail, len(subs))
|
|
i := 0
|
|
// TODO(dlc) - may be inefficient and could just do normal match when total subs is large and filtering.
|
|
for _, sub := range subs {
|
|
// Check for filter
|
|
if test && !matchLiteral(testSub, string(sub.subject)) {
|
|
continue
|
|
}
|
|
if sub.client == nil {
|
|
continue
|
|
}
|
|
sub.client.mu.Lock()
|
|
details[i] = SubDetail{
|
|
Subject: string(sub.subject),
|
|
Queue: string(sub.queue),
|
|
Sid: string(sub.sid),
|
|
Msgs: sub.nm,
|
|
Max: sub.max,
|
|
Cid: sub.client.cid,
|
|
}
|
|
sub.client.mu.Unlock()
|
|
i++
|
|
}
|
|
minoff := sz.Offset
|
|
maxoff := sz.Offset + sz.Limit
|
|
|
|
maxIndex := i
|
|
|
|
// Make sure these are sane.
|
|
if minoff > maxIndex {
|
|
minoff = maxIndex
|
|
}
|
|
if maxoff > maxIndex {
|
|
maxoff = maxIndex
|
|
}
|
|
sz.Subs = details[minoff:maxoff]
|
|
sz.Total = len(sz.Subs)
|
|
}
|
|
|
|
return sz, nil
|
|
}
|
|
|
|
// HandleSubsz processes HTTP requests for subjects stats.
|
|
func (s *Server) HandleSubsz(w http.ResponseWriter, r *http.Request) {
|
|
s.mu.Lock()
|
|
s.httpReqStats[SubszPath]++
|
|
s.mu.Unlock()
|
|
|
|
subs, err := decodeBool(w, r, "subs")
|
|
if err != nil {
|
|
return
|
|
}
|
|
offset, err := decodeInt(w, r, "offset")
|
|
if err != nil {
|
|
return
|
|
}
|
|
limit, err := decodeInt(w, r, "limit")
|
|
if err != nil {
|
|
return
|
|
}
|
|
testSub := r.URL.Query().Get("test")
|
|
|
|
subszOpts := &SubszOptions{
|
|
Subscriptions: subs,
|
|
Offset: offset,
|
|
Limit: limit,
|
|
Test: testSub,
|
|
}
|
|
|
|
st, err := s.Subsz(subszOpts)
|
|
if err != nil {
|
|
w.WriteHeader(http.StatusBadRequest)
|
|
w.Write([]byte(err.Error()))
|
|
return
|
|
}
|
|
|
|
var b []byte
|
|
|
|
if len(st.Subs) == 0 {
|
|
b, err = json.MarshalIndent(st.SublistStats, "", " ")
|
|
} else {
|
|
b, err = json.MarshalIndent(st, "", " ")
|
|
}
|
|
if err != nil {
|
|
s.Errorf("Error marshaling response to /subscriptionsz request: %v", err)
|
|
}
|
|
|
|
// Handle response
|
|
ResponseHandler(w, r, b)
|
|
}
|
|
|
|
// HandleStacksz processes HTTP requests for getting stacks
|
|
func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) {
|
|
// Do not get any lock here that would prevent getting the stacks
|
|
// if we were to have a deadlock somewhere.
|
|
var defaultBuf [defaultStackBufSize]byte
|
|
size := defaultStackBufSize
|
|
buf := defaultBuf[:size]
|
|
n := 0
|
|
for {
|
|
n = runtime.Stack(buf, true)
|
|
if n < size {
|
|
break
|
|
}
|
|
size *= 2
|
|
buf = make([]byte, size)
|
|
}
|
|
// Handle response
|
|
ResponseHandler(w, r, buf[:n])
|
|
}
|
|
|
|
// Varz will output server information on the monitoring port at /varz.
|
|
type Varz struct {
|
|
ID string `json:"server_id"`
|
|
Version string `json:"version"`
|
|
Proto int `json:"proto"`
|
|
GitCommit string `json:"git_commit,omitempty"`
|
|
GoVersion string `json:"go"`
|
|
Host string `json:"host"`
|
|
Port int `json:"port"`
|
|
AuthRequired bool `json:"auth_required,omitempty"`
|
|
TLSRequired bool `json:"tls_required,omitempty"`
|
|
TLSVerify bool `json:"tls_verify,omitempty"`
|
|
IP string `json:"ip,omitempty"`
|
|
ClientConnectURLs []string `json:"connect_urls,omitempty"`
|
|
MaxConn int `json:"max_connections"`
|
|
MaxSubs int `json:"max_subscriptions,omitempty"`
|
|
PingInterval time.Duration `json:"ping_interval"`
|
|
MaxPingsOut int `json:"ping_max"`
|
|
HTTPHost string `json:"http_host"`
|
|
HTTPPort int `json:"http_port"`
|
|
HTTPSPort int `json:"https_port"`
|
|
AuthTimeout float64 `json:"auth_timeout"`
|
|
MaxControlLine int32 `json:"max_control_line"`
|
|
MaxPayload int `json:"max_payload"`
|
|
MaxPending int64 `json:"max_pending"`
|
|
Cluster ClusterOptsVarz `json:"cluster,omitempty"`
|
|
Gateway GatewayOptsVarz `json:"gateway,omitempty"`
|
|
LeafNode LeafNodeOptsVarz `json:"leaf,omitempty"`
|
|
TLSTimeout float64 `json:"tls_timeout"`
|
|
WriteDeadline time.Duration `json:"write_deadline"`
|
|
Start time.Time `json:"start"`
|
|
Now time.Time `json:"now"`
|
|
Uptime string `json:"uptime"`
|
|
Mem int64 `json:"mem"`
|
|
Cores int `json:"cores"`
|
|
CPU float64 `json:"cpu"`
|
|
Connections int `json:"connections"`
|
|
TotalConnections uint64 `json:"total_connections"`
|
|
Routes int `json:"routes"`
|
|
Remotes int `json:"remotes"`
|
|
InMsgs int64 `json:"in_msgs"`
|
|
OutMsgs int64 `json:"out_msgs"`
|
|
InBytes int64 `json:"in_bytes"`
|
|
OutBytes int64 `json:"out_bytes"`
|
|
SlowConsumers int64 `json:"slow_consumers"`
|
|
Subscriptions uint32 `json:"subscriptions"`
|
|
HTTPReqStats map[string]uint64 `json:"http_req_stats"`
|
|
ConfigLoadTime time.Time `json:"config_load_time"`
|
|
}
|
|
|
|
// ClusterOptsVarz contains monitoring cluster information
|
|
type ClusterOptsVarz struct {
|
|
Host string `json:"addr,omitempty"`
|
|
Port int `json:"cluster_port,omitempty"`
|
|
AuthTimeout float64 `json:"auth_timeout,omitempty"`
|
|
}
|
|
|
|
// GatewayOptsVarz contains monitoring gateway information
|
|
type GatewayOptsVarz struct {
|
|
Name string `json:"name,omitempty"`
|
|
Host string `json:"host,omitempty"`
|
|
Port int `json:"port,omitempty"`
|
|
AuthTimeout float64 `json:"auth_timeout,omitempty"`
|
|
TLSTimeout float64 `json:"tls_timeout,omitempty"`
|
|
Advertise string `json:"advertise,omitempty"`
|
|
ConnectRetries int `json:"connect_retries,omitempty"`
|
|
Gateways []RemoteGatewayOptsVarz `json:"gateways,omitempty"`
|
|
RejectUnknown bool `json:"reject_unknown,omitempty"`
|
|
}
|
|
|
|
// RemoteGatewayOptsVarz contains monitoring remote gateway information
|
|
type RemoteGatewayOptsVarz struct {
|
|
Name string `json:"name"`
|
|
TLSTimeout float64 `json:"tls_timeout,omitempty"`
|
|
URLs []string `json:"urls,omitempty"`
|
|
}
|
|
|
|
// LeafNodeOptsVarz contains monitoring leaf node information
|
|
type LeafNodeOptsVarz struct {
|
|
Host string `json:"host,omitempty"`
|
|
Port int `json:"port,omitempty"`
|
|
AuthTimeout float64 `json:"auth_timeout,omitempty"`
|
|
TLSTimeout float64 `json:"tls_timeout,omitempty"`
|
|
Remotes []RemoteLeafOptsVarz `json:"remotes,omitempty"`
|
|
}
|
|
|
|
// RemoteLeafOptsVarz contains monitoring remote leaf node information
|
|
type RemoteLeafOptsVarz struct {
|
|
LocalAccount string `json:"local_account,omitempty"`
|
|
URL string `json:"url,omitempty"`
|
|
TLSTimeout float64 `json:"tls_timeout,omitempty"`
|
|
}
|
|
|
|
// VarzOptions are the options passed to Varz().
|
|
// Currently, there are no options defined.
|
|
type VarzOptions struct{}
|
|
|
|
func myUptime(d time.Duration) string {
|
|
// Just use total seconds for uptime, and display days / years
|
|
tsecs := d / time.Second
|
|
tmins := tsecs / 60
|
|
thrs := tmins / 60
|
|
tdays := thrs / 24
|
|
tyrs := tdays / 365
|
|
|
|
if tyrs > 0 {
|
|
return fmt.Sprintf("%dy%dd%dh%dm%ds", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)
|
|
}
|
|
if tdays > 0 {
|
|
return fmt.Sprintf("%dd%dh%dm%ds", tdays, thrs%24, tmins%60, tsecs%60)
|
|
}
|
|
if thrs > 0 {
|
|
return fmt.Sprintf("%dh%dm%ds", thrs, tmins%60, tsecs%60)
|
|
}
|
|
if tmins > 0 {
|
|
return fmt.Sprintf("%dm%ds", tmins, tsecs%60)
|
|
}
|
|
return fmt.Sprintf("%ds", tsecs)
|
|
}
|
|
|
|
// HandleRoot will show basic info and links to others handlers.
|
|
func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) {
|
|
// This feels dumb to me, but is required: https://code.google.com/p/go/issues/detail?id=4799
|
|
if r.URL.Path != "/" {
|
|
http.NotFound(w, r)
|
|
return
|
|
}
|
|
s.mu.Lock()
|
|
s.httpReqStats[RootPath]++
|
|
s.mu.Unlock()
|
|
fmt.Fprintf(w, `<html lang="en">
|
|
<head>
|
|
<link rel="shortcut icon" href="http://nats.io/img/favicon.ico">
|
|
<style type="text/css">
|
|
body { font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif; font-size: 22; }
|
|
a { margin-left: 32px; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<img src="http://nats.io/img/logo.png" alt="NATS">
|
|
<br/>
|
|
<a href=/varz>varz</a><br/>
|
|
<a href=/connz>connz</a><br/>
|
|
<a href=/routez>routez</a><br/>
|
|
<a href=/subsz>subsz</a><br/>
|
|
<br/>
|
|
<a href=http://nats.io/documentation/server/monitoring/>help</a>
|
|
</body>
|
|
</html>`)
|
|
}
|
|
|
|
// Varz returns a Varz struct containing the server information.
|
|
func (s *Server) Varz(varzOpts *VarzOptions) (*Varz, error) {
|
|
var rss, vss int64
|
|
var pcpu float64
|
|
|
|
// We want to do that outside of the lock.
|
|
pse.ProcUsage(&pcpu, &rss, &vss)
|
|
|
|
s.mu.Lock()
|
|
// We need to create a new instance of Varz (with no reference
|
|
// whatsoever to anything stored in the server) since the user
|
|
// has access to the returned value.
|
|
v := s.createVarz(pcpu, rss)
|
|
s.mu.Unlock()
|
|
|
|
return v, nil
|
|
}
|
|
|
|
// Returns a Varz instance.
|
|
// Server lock is held on entry.
|
|
func (s *Server) createVarz(pcpu float64, rss int64) *Varz {
|
|
info := s.info
|
|
opts := s.getOpts()
|
|
c := &opts.Cluster
|
|
gw := &opts.Gateway
|
|
ln := &opts.LeafNode
|
|
varz := &Varz{
|
|
ID: info.ID,
|
|
Version: info.Version,
|
|
Proto: info.Proto,
|
|
GitCommit: info.GitCommit,
|
|
GoVersion: info.GoVersion,
|
|
Host: info.Host,
|
|
Port: info.Port,
|
|
IP: info.IP,
|
|
HTTPHost: opts.HTTPHost,
|
|
HTTPPort: opts.HTTPPort,
|
|
HTTPSPort: opts.HTTPSPort,
|
|
Cluster: ClusterOptsVarz{
|
|
Host: c.Host,
|
|
Port: c.Port,
|
|
AuthTimeout: c.AuthTimeout,
|
|
},
|
|
Gateway: GatewayOptsVarz{
|
|
Name: gw.Name,
|
|
Host: gw.Host,
|
|
Port: gw.Port,
|
|
AuthTimeout: gw.AuthTimeout,
|
|
TLSTimeout: gw.TLSTimeout,
|
|
Advertise: gw.Advertise,
|
|
ConnectRetries: gw.ConnectRetries,
|
|
Gateways: []RemoteGatewayOptsVarz{},
|
|
RejectUnknown: gw.RejectUnknown,
|
|
},
|
|
LeafNode: LeafNodeOptsVarz{
|
|
Host: ln.Host,
|
|
Port: ln.Port,
|
|
AuthTimeout: ln.AuthTimeout,
|
|
TLSTimeout: ln.TLSTimeout,
|
|
Remotes: []RemoteLeafOptsVarz{},
|
|
},
|
|
Start: s.start,
|
|
MaxSubs: opts.MaxSubs,
|
|
}
|
|
if l := len(gw.Gateways); l > 0 {
|
|
rgwa := make([]RemoteGatewayOptsVarz, l)
|
|
for i, r := range gw.Gateways {
|
|
rgwa[i] = RemoteGatewayOptsVarz{
|
|
Name: r.Name,
|
|
TLSTimeout: r.TLSTimeout,
|
|
}
|
|
if nu := len(r.URLs); nu > 0 {
|
|
rgwa[i].URLs = make([]string, nu)
|
|
for j, u := range r.URLs {
|
|
rgwa[i].URLs[j] = u.Host
|
|
}
|
|
}
|
|
}
|
|
varz.Gateway.Gateways = rgwa
|
|
}
|
|
if l := len(ln.Remotes); l > 0 {
|
|
rlna := make([]RemoteLeafOptsVarz, l)
|
|
for i, r := range ln.Remotes {
|
|
rlna[i] = RemoteLeafOptsVarz{
|
|
LocalAccount: r.LocalAccount,
|
|
URL: r.URL.Host,
|
|
TLSTimeout: r.TLSTimeout,
|
|
}
|
|
}
|
|
varz.LeafNode.Remotes = rlna
|
|
}
|
|
// Finish setting it up with fields that can be updated during
|
|
// configuration reload and runtime.
|
|
s.updateVarzConfigReloadableFields(varz)
|
|
s.updateVarzRuntimeFields(varz, pcpu, rss)
|
|
return varz
|
|
}
|
|
|
|
// Invoked during configuration reload once options have possibly be changed
|
|
// and config load time has been set. If s.varz has not been initialized yet
|
|
// (because no pooling of /varz has been made), this function does nothing.
|
|
// Server lock is held on entry.
|
|
func (s *Server) updateVarzConfigReloadableFields(v *Varz) {
|
|
if v == nil {
|
|
return
|
|
}
|
|
opts := s.getOpts()
|
|
info := &s.info
|
|
v.AuthRequired = info.AuthRequired
|
|
v.TLSRequired = info.TLSRequired
|
|
v.TLSVerify = info.TLSVerify
|
|
v.MaxConn = opts.MaxConn
|
|
v.PingInterval = opts.PingInterval
|
|
v.MaxPingsOut = opts.MaxPingsOut
|
|
v.AuthTimeout = opts.AuthTimeout
|
|
v.MaxControlLine = opts.MaxControlLine
|
|
v.MaxPayload = int(opts.MaxPayload)
|
|
v.MaxPending = opts.MaxPending
|
|
v.TLSTimeout = opts.TLSTimeout
|
|
v.WriteDeadline = opts.WriteDeadline
|
|
v.ConfigLoadTime = s.configTime
|
|
}
|
|
|
|
// Updates the runtime Varz fields, that is, fields that change during
|
|
// runtime and that should be updated any time Varz() or polling of /varz
|
|
// is done.
|
|
// Server lock is held on entry.
|
|
func (s *Server) updateVarzRuntimeFields(v *Varz, pcpu float64, rss int64) {
|
|
v.Now = time.Now()
|
|
v.Uptime = myUptime(time.Since(s.start))
|
|
v.Mem = rss
|
|
v.CPU = pcpu
|
|
v.Cores = numCores
|
|
if l := len(s.info.ClientConnectURLs); l > 0 {
|
|
v.ClientConnectURLs = make([]string, l)
|
|
copy(v.ClientConnectURLs, s.info.ClientConnectURLs)
|
|
}
|
|
v.Connections = len(s.clients)
|
|
v.TotalConnections = s.totalClients
|
|
v.Routes = len(s.routes)
|
|
v.Remotes = len(s.remotes)
|
|
v.InMsgs = atomic.LoadInt64(&s.inMsgs)
|
|
v.InBytes = atomic.LoadInt64(&s.inBytes)
|
|
v.OutMsgs = atomic.LoadInt64(&s.outMsgs)
|
|
v.OutBytes = atomic.LoadInt64(&s.outBytes)
|
|
v.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
|
|
// FIXME(dlc) - make this multi-account aware.
|
|
v.Subscriptions = s.gacc.sl.Count()
|
|
v.HTTPReqStats = make(map[string]uint64, len(s.httpReqStats))
|
|
for key, val := range s.httpReqStats {
|
|
v.HTTPReqStats[key] = val
|
|
}
|
|
}
|
|
|
|
// HandleVarz will process HTTP requests for server information.
|
|
func (s *Server) HandleVarz(w http.ResponseWriter, r *http.Request) {
|
|
var rss, vss int64
|
|
var pcpu float64
|
|
|
|
// We want to do that outside of the lock.
|
|
pse.ProcUsage(&pcpu, &rss, &vss)
|
|
|
|
// In response to http requests, we want to minimize mem copies
|
|
// so we use an object stored in the server. Creating/collecting
|
|
// server metrics is done under server lock, but we don't want
|
|
// to marshal under that lock. Still, we need to prevent concurrent
|
|
// http requests to /varz to update s.varz while marshal is
|
|
// happening, so we need a new lock that serialize those http
|
|
// requests and include marshaling.
|
|
s.varzMu.Lock()
|
|
|
|
// Use server lock to create/update the server's varz object.
|
|
s.mu.Lock()
|
|
s.httpReqStats[VarzPath]++
|
|
if s.varz == nil {
|
|
s.varz = s.createVarz(pcpu, rss)
|
|
} else {
|
|
s.updateVarzRuntimeFields(s.varz, pcpu, rss)
|
|
}
|
|
s.mu.Unlock()
|
|
|
|
// Do the marshaling outside of server lock, but under varzMu lock.
|
|
b, err := json.MarshalIndent(s.varz, "", " ")
|
|
s.varzMu.Unlock()
|
|
|
|
if err != nil {
|
|
s.Errorf("Error marshaling response to /varz request: %v", err)
|
|
}
|
|
|
|
// Handle response
|
|
ResponseHandler(w, r, b)
|
|
}
|
|
|
|
// ResponseHandler handles responses for monitoring routes
|
|
func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) {
|
|
// Get callback from request
|
|
callback := r.URL.Query().Get("callback")
|
|
// If callback is not empty then
|
|
if callback != "" {
|
|
// Response for JSONP
|
|
w.Header().Set("Content-Type", "application/javascript")
|
|
fmt.Fprintf(w, "%s(%s)", callback, data)
|
|
} else {
|
|
// Otherwise JSON
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Write(data)
|
|
}
|
|
}
|
|
|
|
func (reason ClosedState) String() string {
|
|
switch reason {
|
|
case ClientClosed:
|
|
return "Client Closed"
|
|
case AuthenticationTimeout:
|
|
return "Authentication Timeout"
|
|
case AuthenticationViolation:
|
|
return "Authentication Failure"
|
|
case TLSHandshakeError:
|
|
return "TLS Handshake Failure"
|
|
case SlowConsumerPendingBytes:
|
|
return "Slow Consumer (Pending Bytes)"
|
|
case SlowConsumerWriteDeadline:
|
|
return "Slow Consumer (Write Deadline)"
|
|
case WriteError:
|
|
return "Write Error"
|
|
case ReadError:
|
|
return "Read Error"
|
|
case ParseError:
|
|
return "Parse Error"
|
|
case StaleConnection:
|
|
return "Stale Connection"
|
|
case ProtocolViolation:
|
|
return "Protocol Violation"
|
|
case BadClientProtocolVersion:
|
|
return "Bad Client Protocol Version"
|
|
case WrongPort:
|
|
return "Incorrect Port"
|
|
case MaxConnectionsExceeded:
|
|
return "Maximum Connections Exceeded"
|
|
case MaxAccountConnectionsExceeded:
|
|
return "Maximum Account Connections Exceeded"
|
|
case MaxPayloadExceeded:
|
|
return "Maximum Message Payload Exceeded"
|
|
case MaxControlLineExceeded:
|
|
return "Maximum Control Line Exceeded"
|
|
case MaxSubscriptionsExceeded:
|
|
return "Maximum Subscriptions Exceeded"
|
|
case DuplicateRoute:
|
|
return "Duplicate Route"
|
|
case RouteRemoved:
|
|
return "Route Removed"
|
|
case ServerShutdown:
|
|
return "Server Shutdown"
|
|
case AuthenticationExpired:
|
|
return "Authentication Expired"
|
|
case WrongGateway:
|
|
return "Wrong Gateway"
|
|
case MissingAccount:
|
|
return "Missing Account"
|
|
}
|
|
return "Unknown State"
|
|
}
|