diff --git a/linux_test.go b/linux_test.go index 1c61399..2bd1684 100644 --- a/linux_test.go +++ b/linux_test.go @@ -6,6 +6,7 @@ import ( "os" "syscall" "testing" + "time" ) func TestMaintainMode(t *testing.T) { @@ -46,9 +47,9 @@ func TestMaintainMode(t *testing.T) { } func TestMaintainOwner(t *testing.T) { - fakeC := fakeChown{} - os_Chown = fakeC.Set - os_Stat = fakeStat + fakeFS := newFakeFS() + os_Chown = fakeFS.Chown + os_Stat = fakeFS.Stat defer func() { os_Chown = os.Chown os_Stat = os.Stat @@ -59,7 +60,45 @@ func TestMaintainOwner(t *testing.T) { filename := logFile(dir) + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644) + isNil(err, t) + f.Close() + + l := &Logger{ + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + equals(555, fakeFS.files[filename].uid, t) + equals(666, fakeFS.files[filename].gid, t) +} + +func TestCompressMaintainMode(t *testing.T) { + currentTime = fakeTime + + dir := makeTempDir("TestCompressMaintainMode", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + mode := os.FileMode(0600) + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode) + isNil(err, t) + f.Close() + l := &Logger{ + Compress: true, Filename: filename, MaxBackups: 1, MaxSize: 100, // megabytes @@ -75,27 +114,89 @@ func TestMaintainOwner(t *testing.T) { err = l.Rotate() isNil(err, t) - equals(555, fakeC.uid, t) - equals(666, fakeC.gid, t) + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist with the correct + // mode. + filename2 := backupFile(dir) + info, err := os.Stat(filename) + isNil(err, t) + info2, err := os.Stat(filename2+compressSuffix) + isNil(err, t) + equals(mode, info.Mode(), t) + equals(mode, info2.Mode(), t) +} + +func TestCompressMaintainOwner(t *testing.T) { + fakeFS := newFakeFS() + os_Chown = fakeFS.Chown + os_Stat = fakeFS.Stat + defer func() { + os_Chown = os.Chown + os_Stat = os.Stat + }() + currentTime = fakeTime + dir := makeTempDir("TestCompressMaintainOwner", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644) + isNil(err, t) + f.Close() + + l := &Logger{ + Compress: true, + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist with the correct + // owner. + filename2 := backupFile(dir) + equals(555, fakeFS.files[filename2+compressSuffix].uid, t) + equals(666, fakeFS.files[filename2+compressSuffix].gid, t) +} + +type fakeFile struct { + uid int + gid int +} + +type fakeFS struct { + files map[string]fakeFile } -type fakeChown struct { - name string - uid int - gid int +func newFakeFS() *fakeFS { + return &fakeFS{files: make(map[string]fakeFile)} } -func (f *fakeChown) Set(name string, uid, gid int) error { - f.name = name - f.uid = uid - f.gid = gid +func (fs *fakeFS) Chown(name string, uid, gid int) error { + fs.files[name] = fakeFile{uid: uid, gid: gid} return nil } -func fakeStat(name string) (os.FileInfo, error) { +func (fs *fakeFS) Stat(name string) (os.FileInfo, error) { info, err := os.Stat(name) if err != nil { - return info, err + return nil, err } stat := info.Sys().(*syscall.Stat_t) stat.Uid = 555 diff --git a/lumberjack.go b/lumberjack.go index b4bb54f..ca19da4 100644 --- a/lumberjack.go +++ b/lumberjack.go @@ -22,6 +22,8 @@ package lumberjack import ( + "compress/gzip" + "errors" "fmt" "io" "io/ioutil" @@ -35,6 +37,7 @@ import ( const ( backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" defaultMaxSize = 100 ) @@ -100,9 +103,16 @@ type Logger struct { // time. LocalTime bool `json:"localtime" yaml:"localtime"` + // Compress determines if the rotated log files should be compressed + // using gzip. + Compress bool `json:"compress" yaml:"compress"` + size int64 file *os.File mu sync.Mutex + + millCh chan bool + startMill sync.Once } var ( @@ -171,8 +181,8 @@ func (l *Logger) close() error { // Rotate causes Logger to close the existing log file and immediately create a // new one. This is a helper function for applications that want to initiate // rotations outside of the normal rotation rules, such as in response to -// SIGHUP. After rotating, this initiates a cleanup of old log files according -// to the normal rules. +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. func (l *Logger) Rotate() error { l.mu.Lock() defer l.mu.Unlock() @@ -181,16 +191,16 @@ func (l *Logger) Rotate() error { // rotate closes the current file, moves it aside with a timestamp in the name, // (if it exists), opens a new file with the original filename, and then runs -// cleanup. +// post-rotation processing and removal. func (l *Logger) rotate() error { if err := l.close(); err != nil { return err } - if err := l.openNew(); err != nil { return err } - return l.cleanup() + l.mill() + return nil } // openNew opens a new log file for writing, moving any old log file out of the @@ -252,6 +262,8 @@ func backupName(name string, local bool) string { // would not put it over MaxSize. If there is no such file or the write would // put it over the MaxSize, a new file is created. func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + filename := l.filename() info, err := os_Stat(filename) if os.IsNotExist(err) { @@ -285,10 +297,12 @@ func (l *Logger) filename() string { return filepath.Join(os.TempDir(), name) } -// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as // none of them are older than MaxAge. -func (l *Logger) cleanup() error { - if l.MaxBackups == 0 && l.MaxAge == 0 { +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { return nil } @@ -297,38 +311,87 @@ func (l *Logger) cleanup() error { return err } - var deletes []logInfo + var compress, remove []logInfo if l.MaxBackups > 0 && l.MaxBackups < len(files) { - deletes = files[l.MaxBackups:] - files = files[:l.MaxBackups] + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining } if l.MaxAge > 0 { diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) - cutoff := currentTime().Add(-1 * diff) + var remaining []logInfo for _, f := range files { if f.timestamp.Before(cutoff) { - deletes = append(deletes, f) + remove = append(remove, f) + } else { + remaining = append(remaining, f) } } + files = remaining } - if len(deletes) == 0 { - return nil + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } } - go deleteAll(l.dir(), deletes) + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } - return nil + return err } -func deleteAll(dir string, files []logInfo) { - // remove files on a separate goroutine - for _, f := range files { +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for _ = range l.millCh { // what am I going to do, log this? - _ = os.Remove(filepath.Join(dir, f.Name())) + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: } } @@ -347,13 +410,13 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { if f.IsDir() { continue } - name := l.timeFromName(f.Name(), prefix, ext) - if name == "" { + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) continue } - t, err := time.Parse(backupTimeFormat, name) - if err == nil { + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { logFiles = append(logFiles, logInfo{t, f}) + continue } // error parsing means that the suffix at the end was not generated // by lumberjack, and therefore it's not a backup file. @@ -367,17 +430,15 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { // timeFromName extracts the formatted time from the filename by stripping off // the filename's prefix and extension. This prevents someone's filename from // confusing time.parse. -func (l *Logger) timeFromName(filename, prefix, ext string) string { +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { if !strings.HasPrefix(filename, prefix) { - return "" + return time.Time{}, errors.New("mismatched prefix") } - filename = filename[len(prefix):] - if !strings.HasSuffix(filename, ext) { - return "" + return time.Time{}, errors.New("mismatched extension") } - filename = filename[:len(filename)-len(ext)] - return filename + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) } // max returns the maximum size in bytes of log files before rolling. @@ -402,6 +463,61 @@ func (l *Logger) prefixAndExt() (prefix, ext string) { return prefix, ext } +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := os_Stat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + // logInfo is a convenience struct to return the filename and its embedded // timestamp. type logInfo struct { diff --git a/lumberjack_test.go b/lumberjack_test.go index 5e8422a..58e165f 100644 --- a/lumberjack_test.go +++ b/lumberjack_test.go @@ -1,6 +1,8 @@ package lumberjack import ( + "bytes" + "compress/gzip" "encoding/json" "fmt" "io/ioutil" @@ -41,7 +43,7 @@ func TestNewFile(t *testing.T) { n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) - existsWithLen(logFile(dir), n, t) + existsWithContent(logFile(dir), b, t) fileCount(dir, 1, t) } @@ -54,7 +56,7 @@ func TestOpenExisting(t *testing.T) { data := []byte("foo!") err := ioutil.WriteFile(filename, data, 0644) isNil(err, t) - existsWithLen(filename, len(data), t) + existsWithContent(filename, data, t) l := &Logger{ Filename: filename, @@ -66,7 +68,7 @@ func TestOpenExisting(t *testing.T) { equals(len(b), n, t) // make sure the file got appended - existsWithLen(filename, len(data)+n, t) + existsWithContent(filename, append(data, b...), t) // make sure no other files were created fileCount(dir, 1, t) @@ -106,7 +108,7 @@ func TestMakeLogDir(t *testing.T) { n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) - existsWithLen(logFile(dir), n, t) + existsWithContent(logFile(dir), b, t) fileCount(dir, 1, t) } @@ -122,7 +124,7 @@ func TestDefaultFilename(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) } func TestAutoRotate(t *testing.T) { @@ -143,7 +145,7 @@ func TestAutoRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -155,10 +157,10 @@ func TestAutoRotate(t *testing.T) { // the old logfile should be moved aside and the main logfile should have // only the last write in it. - existsWithLen(filename, n, t) + existsWithContent(filename, b2, t) // the backup file will use the current fake time and have the old contents. - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) fileCount(dir, 2, t) } @@ -188,8 +190,8 @@ func TestFirstWriteRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) - existsWithLen(backupFile(dir), len(start), t) + existsWithContent(filename, b, t) + existsWithContent(backupFile(dir), start, t) fileCount(dir, 2, t) } @@ -212,7 +214,7 @@ func TestMaxBackups(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -225,25 +227,26 @@ func TestMaxBackups(t *testing.T) { // this will use the new fake time secondFilename := backupFile(dir) - existsWithLen(secondFilename, len(b), t) + existsWithContent(secondFilename, b, t) - // make sure the old file still exists with the same size. - existsWithLen(filename, n, t) + // make sure the old file still exists with the same content. + existsWithContent(filename, b2, t) fileCount(dir, 2, t) newFakeTime() // this will make us rotate again - n, err = l.Write(b2) + b3 := []byte("baaaaaar!") + n, err = l.Write(b3) isNil(err, t) - equals(len(b2), n, t) + equals(len(b3), n, t) // this will use the new fake time thirdFilename := backupFile(dir) - existsWithLen(thirdFilename, len(b2), t) + existsWithContent(thirdFilename, b2, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b3, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -253,7 +256,7 @@ func TestMaxBackups(t *testing.T) { fileCount(dir, 2, t) // second file name should still exist - existsWithLen(thirdFilename, len(b2), t) + existsWithContent(thirdFilename, b2, t) // should have deleted the first backup notExist(secondFilename, t) @@ -276,14 +279,24 @@ func TestMaxBackups(t *testing.T) { newFakeTime() + // this will use the new fake time + fourthFilename := backupFile(dir) + + // Create a log file that is/was being compressed - this should + // not be counted since both the compressed and the uncompressed + // log files still exist. + compLogFile := fourthFilename+compressSuffix + err = ioutil.WriteFile(compLogFile, []byte("compress"), 0644) + isNil(err, t) + // this will make us rotate again - n, err = l.Write(b2) + b4 := []byte("baaaaaaz!") + n, err = l.Write(b4) isNil(err, t) - equals(len(b2), n, t) + equals(len(b4), n, t) - // this will use the new fake time - fourthFilename := backupFile(dir) - existsWithLen(fourthFilename, len(b2), t) + existsWithContent(fourthFilename, b3, t) + existsWithContent(fourthFilename+compressSuffix, []byte("compress"), t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -291,12 +304,12 @@ func TestMaxBackups(t *testing.T) { // We should have four things in the directory now - the 2 log files, the // not log file, and the directory - fileCount(dir, 4, t) + fileCount(dir, 5, t) // third file name should still exist - existsWithLen(filename, n, t) + existsWithContent(filename, b4, t) - existsWithLen(fourthFilename, len(b2), t) + existsWithContent(fourthFilename, b3, t) // should have deleted the first filename notExist(thirdFilename, t) @@ -328,7 +341,7 @@ func TestCleanupExistingBackups(t *testing.T) { newFakeTime() backup = backupFile(dir) - err = ioutil.WriteFile(backup, data, 0644) + err = ioutil.WriteFile(backup+compressSuffix, data, 0644) isNil(err, t) newFakeTime() @@ -383,7 +396,7 @@ func TestMaxAge(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) // two days later @@ -393,7 +406,7 @@ func TestMaxAge(t *testing.T) { n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -403,19 +416,19 @@ func TestMaxAge(t *testing.T) { // created. fileCount(dir, 2, t) - existsWithLen(filename, len(b2), t) + existsWithContent(filename, b2, t) // we should have deleted the old file due to being too old - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) // two days later newFakeTime() - b3 := []byte("foooooo!") - n, err = l.Write(b2) + b3 := []byte("baaaaar!") + n, err = l.Write(b3) isNil(err, t) equals(len(b3), n, t) - existsWithLen(backupFile(dir), len(b2), t) + existsWithContent(backupFile(dir), b2, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -425,11 +438,10 @@ func TestMaxAge(t *testing.T) { // backup. The earlier backup is past the cutoff and should be gone. fileCount(dir, 2, t) - existsWithLen(filename, len(b3), t) + existsWithContent(filename, b3, t) // we should have deleted the old file due to being too old - existsWithLen(backupFile(dir), len(b2), t) - + existsWithContent(backupFile(dir), b2, t) } func TestOldLogFiles(t *testing.T) { @@ -475,17 +487,23 @@ func TestOldLogFiles(t *testing.T) { func TestTimeFromName(t *testing.T) { l := &Logger{Filename: "/var/log/myfoo/foo.log"} prefix, ext := l.prefixAndExt() - val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext) - equals("2014-05-04T14-44-33.555", val, t) - - val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext) - equals("", val, t) - val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext) - equals("", val, t) + tests := []struct { + filename string + want time.Time + wantErr bool + }{ + {"foo-2014-05-04T14-44-33.555.log", time.Date(2014, 5, 4, 14, 44, 33, 555000000, time.UTC), false}, + {"foo-2014-05-04T14-44-33.555", time.Time{}, true}, + {"2014-05-04T14-44-33.555.log", time.Time{}, true}, + {"foo.log", time.Time{}, true}, + } - val = l.timeFromName("foo.log", prefix, ext) - equals("", val, t) + for _, test := range tests { + got, err := l.timeFromName(test.filename, prefix, ext) + equals(got, test.want, t) + equals(err != nil, test.wantErr, t) + } } func TestLocalTime(t *testing.T) { @@ -511,8 +529,8 @@ func TestLocalTime(t *testing.T) { isNil(err, t) equals(len(b2), n2, t) - existsWithLen(logFile(dir), n2, t) - existsWithLen(backupFileLocal(dir), n, t) + existsWithContent(logFile(dir), b2, t) + existsWithContent(backupFileLocal(dir), b, t) } func TestRotate(t *testing.T) { @@ -533,7 +551,7 @@ func TestRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -546,8 +564,8 @@ func TestRotate(t *testing.T) { <-time.After(10 * time.Millisecond) filename2 := backupFile(dir) - existsWithLen(filename2, n, t) - existsWithLen(filename, 0, t) + existsWithContent(filename2, b, t) + existsWithContent(filename, []byte{}, t) fileCount(dir, 2, t) newFakeTime() @@ -559,8 +577,8 @@ func TestRotate(t *testing.T) { <-time.After(10 * time.Millisecond) filename3 := backupFile(dir) - existsWithLen(filename3, 0, t) - existsWithLen(filename, 0, t) + existsWithContent(filename3, []byte{}, t) + existsWithContent(filename, []byte{}, t) fileCount(dir, 2, t) b2 := []byte("foooooo!") @@ -569,7 +587,105 @@ func TestRotate(t *testing.T) { equals(len(b2), n, t) // this will use the new fake time - existsWithLen(filename, n, t) + existsWithContent(filename, b2, t) +} + +func TestCompressOnRotate(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestCompressOnRotate", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Compress: true, + Filename: filename, + MaxSize: 10, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithContent(filename, b, t) + fileCount(dir, 1, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // the old logfile should be moved aside and the main logfile should have + // nothing in it. + existsWithContent(filename, []byte{}, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist and the original + // should have been removed. + bc := new(bytes.Buffer) + gz := gzip.NewWriter(bc) + _, err = gz.Write(b) + isNil(err, t) + err = gz.Close() + isNil(err, t) + existsWithContent(backupFile(dir)+compressSuffix, bc.Bytes(), t) + notExist(backupFile(dir), t) + + fileCount(dir, 2, t) +} + +func TestCompressOnResume(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestCompressOnResume", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Compress: true, + Filename: filename, + MaxSize: 10, + } + defer l.Close() + + // Create a backup file and empty "compressed" file. + filename2 := backupFile(dir) + b := []byte("foo!") + err := ioutil.WriteFile(filename2, b, 0644) + isNil(err, t) + err = ioutil.WriteFile(filename2+compressSuffix, []byte{}, 0644) + isNil(err, t) + + newFakeTime() + + b2 := []byte("boo!") + n, err := l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + existsWithContent(filename, b2, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // The write should have started the compression - a compressed version of + // the log file should now exist and the original should have been removed. + bc := new(bytes.Buffer) + gz := gzip.NewWriter(bc) + _, err = gz.Write(b) + isNil(err, t) + err = gz.Close() + isNil(err, t) + existsWithContent(filename2+compressSuffix, bc.Bytes(), t) + notExist(filename2, t) + + fileCount(dir, 2, t) } func TestJson(t *testing.T) { @@ -579,7 +695,8 @@ func TestJson(t *testing.T) { "maxsize": 5, "maxage": 10, "maxbackups": 3, - "localtime": true + "localtime": true, + "compress": true }`[1:]) l := Logger{} @@ -590,6 +707,7 @@ func TestJson(t *testing.T) { equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) } func TestYaml(t *testing.T) { @@ -598,7 +716,8 @@ filename: foo maxsize: 5 maxage: 10 maxbackups: 3 -localtime: true`[1:]) +localtime: true +compress: true`[1:]) l := Logger{} err := yaml.Unmarshal(data, &l) @@ -608,6 +727,7 @@ localtime: true`[1:]) equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) } func TestToml(t *testing.T) { @@ -616,7 +736,8 @@ filename = "foo" maxsize = 5 maxage = 10 maxbackups = 3 -localtime = true`[1:] +localtime = true +compress = true`[1:] l := Logger{} md, err := toml.Decode(data, &l) @@ -626,6 +747,7 @@ localtime = true`[1:] equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) equals(0, len(md.Undecoded()), t) } @@ -639,11 +761,15 @@ func makeTempDir(name string, t testing.TB) string { return dir } -// existsWithLen checks that the given file exists and has the correct length. -func existsWithLen(path string, length int, t testing.TB) { +// existsWithContent checks that the given file exists and has the correct content. +func existsWithContent(path string, content []byte, t testing.TB) { info, err := os.Stat(path) isNilUp(err, t, 1) - equalsUp(int64(length), info.Size(), t, 1) + equalsUp(int64(len(content)), info.Size(), t, 1) + + b, err := ioutil.ReadFile(path) + isNilUp(err, t, 1) + equalsUp(content, b, t, 1) } // logFile returns the log file name in the given directory for the current fake