Changes for max log files option (active plus backups); remove redundant lexical sort of backups; adjust test

This commit is contained in:
Todd Beets
2023-09-15 22:08:09 -07:00
parent 46147cf0ea
commit 349e718d39
4 changed files with 48 additions and 52 deletions

View File

@@ -19,7 +19,6 @@ import (
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@@ -131,14 +130,14 @@ type fileLogger struct {
out int64 out int64
canRotate int32 canRotate int32
sync.Mutex sync.Mutex
l *Logger l *Logger
f writerAndCloser f writerAndCloser
limit int64 limit int64
olimit int64 olimit int64
pid string pid string
time bool time bool
closed bool closed bool
archiveLimit int maxNumFiles int
} }
func newFileLogger(filename, pidPrefix string, time bool) (*fileLogger, error) { func newFileLogger(filename, pidPrefix string, time bool) (*fileLogger, error) {
@@ -173,9 +172,9 @@ func (l *fileLogger) setLimit(limit int64) {
} }
} }
func (l *fileLogger) setArchiveLimit(limit int) { func (l *fileLogger) setMaxNumFiles(max int) {
l.Lock() l.Lock()
l.archiveLimit = limit l.maxNumFiles = max
l.Unlock() l.Unlock()
} }
@@ -200,9 +199,7 @@ func (l *fileLogger) logDirect(label, format string, v ...interface{}) int {
return len(entry) return len(entry)
} }
func (l *fileLogger) archivePurge(fname string) { func (l *fileLogger) logPurge(fname string) {
// Evaluate number of saved backups for purge
// l readlock held
var backups []string var backups []string
lDir := filepath.Dir(fname) lDir := filepath.Dir(fname)
lBase := filepath.Base(fname) lBase := filepath.Base(fname)
@@ -215,27 +212,24 @@ func (l *fileLogger) archivePurge(fname string) {
if entry.IsDir() || entry.Name() == lBase || !strings.HasPrefix(entry.Name(), lBase) { if entry.IsDir() || entry.Name() == lBase || !strings.HasPrefix(entry.Name(), lBase) {
continue continue
} }
stamp, found := strings.CutPrefix(entry.Name(), fmt.Sprintf("%s%s", lBase, ".")) if stamp, found := strings.CutPrefix(entry.Name(), fmt.Sprintf("%s%s", lBase, ".")); found {
if found {
_, err := time.Parse("2006:01:02:15:04:05.999999999", strings.Replace(stamp, ".", ":", 5)) _, err := time.Parse("2006:01:02:15:04:05.999999999", strings.Replace(stamp, ".", ":", 5))
if err == nil { if err == nil {
backups = append(backups, entry.Name()) backups = append(backups, entry.Name())
} }
} }
} }
archives := len(backups) currBackups := len(backups)
if archives > l.archiveLimit { maxBackups := l.maxNumFiles - 1
// Oldest to latest if currBackups > maxBackups {
sort.Slice(backups, func(i, j int) bool { // backups sorted oldest to latest based on timestamped lexical filename (ReadDir)
return backups[i] < backups[j] for i := 0; i < currBackups-maxBackups; i++ {
}) if err := os.Remove(filepath.Join(lDir, string(os.PathSeparator), backups[i])); err != nil {
for i := 0; i < archives-l.archiveLimit; i++ {
if err := os.Remove(fmt.Sprintf("%s%s%s", lDir, string(os.PathSeparator), backups[i])); err != nil {
l.logDirect(l.l.errorLabel, "Unable to remove backup log file %q (%v), will attempt next rotation", backups[i], err) l.logDirect(l.l.errorLabel, "Unable to remove backup log file %q (%v), will attempt next rotation", backups[i], err)
// Bail fast, we'll try again next rotation // Bail fast, we'll try again next rotation
return return
} }
l.logDirect(l.l.infoLabel, "Removed archived log file %q", backups[i]) l.logDirect(l.l.infoLabel, "Purged log file %q", backups[i])
} }
} }
} }
@@ -275,8 +269,8 @@ func (l *fileLogger) Write(b []byte) (int, error) {
n := l.logDirect(l.l.infoLabel, "Rotated log, backup saved as %q", bak) n := l.logDirect(l.l.infoLabel, "Rotated log, backup saved as %q", bak)
l.out = int64(n) l.out = int64(n)
l.limit = l.olimit l.limit = l.olimit
if l.archiveLimit > 0 { if l.maxNumFiles > 0 {
l.archivePurge(fname) l.logPurge(fname)
} }
} }
} }
@@ -310,16 +304,16 @@ func (l *Logger) SetSizeLimit(limit int64) error {
return nil return nil
} }
// SetArchiveLimit sets the number of archived log files that will be retained // SetMaxNumFiles sets the number of archived log files that will be retained
func (l *Logger) SetArchiveLimit(limit int) error { func (l *Logger) SetMaxNumFiles(max int) error {
l.Lock() l.Lock()
if l.fl == nil { if l.fl == nil {
l.Unlock() l.Unlock()
return fmt.Errorf("can set log archive limit only for file logger") return fmt.Errorf("can set log max number of files only for file logger")
} }
fl := l.fl fl := l.fl
l.Unlock() l.Unlock()
fl.setArchiveLimit(limit) fl.setMaxNumFiles(max)
return nil return nil
} }

View File

@@ -72,14 +72,14 @@ func (s *Server) ConfigureLogger() {
l.SetSizeLimit(opts.LogSizeLimit) l.SetSizeLimit(opts.LogSizeLimit)
} }
} }
if opts.LogMaxArchives > 0 { if opts.LogMaxFiles > 0 {
if l, ok := log.(*srvlog.Logger); ok { if l, ok := log.(*srvlog.Logger); ok {
al := int(opts.LogMaxArchives) al := int(opts.LogMaxFiles)
if int64(al) != opts.LogMaxArchives { if int64(al) != opts.LogMaxFiles {
// set to default (no max) on overflow // set to default (no max) on overflow
al = 0 al = 0
} }
l.SetArchiveLimit(al) l.SetMaxNumFiles(al)
} }
} }
} else if opts.RemoteSyslog != "" { } else if opts.RemoteSyslog != "" {

View File

@@ -311,7 +311,7 @@ type Options struct {
PortsFileDir string `json:"-"` PortsFileDir string `json:"-"`
LogFile string `json:"-"` LogFile string `json:"-"`
LogSizeLimit int64 `json:"-"` LogSizeLimit int64 `json:"-"`
LogMaxArchives int64 `json:"-"` LogMaxFiles int64 `json:"-"`
Syslog bool `json:"-"` Syslog bool `json:"-"`
RemoteSyslog string `json:"-"` RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"` Routes []*url.URL `json:"-"`
@@ -1000,8 +1000,8 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error
o.LogFile = v.(string) o.LogFile = v.(string)
case "logfile_size_limit", "log_size_limit": case "logfile_size_limit", "log_size_limit":
o.LogSizeLimit = v.(int64) o.LogSizeLimit = v.(int64)
case "logfile_max_archives", "log_max_archives": case "logfile_max_num", "log_max_num":
o.LogMaxArchives = v.(int64) o.LogMaxFiles = v.(int64)
case "syslog": case "syslog":
o.Syslog = v.(bool) o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog) trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)

View File

@@ -39,7 +39,7 @@ func TestLogMaxArchives(t *testing.T) {
totEntriesExpected int totEntriesExpected int
}{ }{
{ {
"Default implicit, no max archives, expect 0 purged archives", "Default implicit, no max logs, expect 0 purged logs",
` `
port: -1 port: -1
log_file: %s log_file: %s
@@ -48,52 +48,52 @@ func TestLogMaxArchives(t *testing.T) {
9, 9,
}, },
{ {
"Default explicit, no max archives, expect 0 purged archives", "Default explicit, no max logs, expect 0 purged logs",
` `
port: -1 port: -1
log_file: %s log_file: %s
logfile_size_limit: 100 logfile_size_limit: 100
logfile_max_archives: 0 logfile_max_num: 0
`, `,
9, 9,
}, },
{ {
"Default explicit - negative val, no max archives, expect 0 purged archives", "Default explicit - negative val, no max logs, expect 0 purged logs",
` `
port: -1 port: -1
log_file: %s log_file: %s
logfile_size_limit: 100 logfile_size_limit: 100
logfile_max_archives: -42 logfile_max_num: -42
`, `,
9, 9,
}, },
{ {
"1-archive limit, expect 7 purged archives", "1-max num, expect 8 purged logs",
` `
port: -1 port: -1
log_file: %s log_file: %s
logfile_size_limit: 100 logfile_size_limit: 100
logfile_max_archives: 1 logfile_max_num: 1
`, `,
2, 1,
}, },
{ {
"5-archive limit, expect 4 purged archives", "5-max num, expect 4 purged logs; use opt alias",
` `
port: -1 port: -1
log_file: %s log_file: %s
logfile_size_limit: 100 log_size_limit: 100
logfile_max_archives: 5 log_max_num: 5
`, `,
6, 5,
}, },
{ {
"100-archive limit, expect 0 purged archives", "100-max num, expect 0 purged logs",
` `
port: -1 port: -1
log_file: %s log_file: %s
logfile_size_limit: 100 logfile_size_limit: 100
logfile_max_archives: 100 logfile_max_num: 100
`, `,
9, 9,
}, },
@@ -117,6 +117,8 @@ func TestLogMaxArchives(t *testing.T) {
t.Fatalf("No NATS Server object returned") t.Fatalf("No NATS Server object returned")
} }
s.Shutdown() s.Shutdown()
// Windows filesystem can be a little pokey on the flush, so wait a bit after shutdown...
time.Sleep(500 * time.Millisecond)
entries, err := os.ReadDir(d) entries, err := os.ReadDir(d)
if err != nil { if err != nil {
t.Fatalf("Error reading dir: %v", err) t.Fatalf("Error reading dir: %v", err)