diff --git a/README.md b/README.md index 194b338..9a4248a 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,30 @@ import ( logger := lager.NewLogger("my-app") ``` +### Lager and [`log/slog`](https://pkg.go.dev/log/slog) +Lager was written long before Go 1.21 introduced structured logging in the standard library. +There are some wrapper functions for interoperability between Lager and `slog`, +which are only available when using Go 1.21 and higher. + +Lager can be used as an [`slog.Handler`](https://pkg.go.dev/log/slog#Handler) using the `NewHandler()` function: + +```go +func codeThatAcceptsSlog(l *slog.Logger) { ... } + +lagerLogger := lager.NewLogger("my-lager-logger") + +codeThatAcceptsSlog(slog.New(lager.NewHandler(lagerLogger))) +``` + +An `slog.Logger` can be used as a Lager `Sink` using the `NewSlogSink()` function: +```go +var *slog.Logger l = codeThatReturnsSlog() + +lagerLogger := lager.NewLogger("my-lager-logger") + +lagerLogger.RegisterSink(lager.NewSlogSink(l)) +``` + ### Sinks Lager can write logs to a variety of destinations. You can specify the destinations diff --git a/handler.go b/handler.go new file mode 100644 index 0000000..2cdaf7c --- /dev/null +++ b/handler.go @@ -0,0 +1,162 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "fmt" + "log/slog" +) + +// NewHandler wraps the logger as a slog.Handler +// The supplied Logger must be a lager.logger +// type created by lager.NewLogger(), otherwise +// it panics. +// +// Note the following log level conversions: +// +// slog.LevelDebug -> lager.DEBUG +// slog.LevelError -> lager.ERROR +// slog.LevelError -> lager.FATAL +// default -> lager.INFO +func NewHandler(l Logger) slog.Handler { + switch ll := l.(type) { + case *logger: + return &handler{logger: ll} + default: + panic("lager.Logger must be an instance of lager.logger") + } +} + +// Type decorator is used to decorate the attributes with groups and more attributes +type decorator func(map[string]any) map[string]any + +// Type handler is a slog.Handler that wraps a lager logger. +// It uses the logger concrete type rather than the Logger interface +// because it uses methods not available on the interface. +type handler struct { + logger *logger + decorators []decorator +} + +// Enabled always returns true +func (h *handler) Enabled(_ context.Context, _ slog.Level) bool { + return true +} + +// Handle converts a slog.Record into a lager.LogFormat and passes it to every Sink +func (h *handler) Handle(_ context.Context, r slog.Record) error { + log := LogFormat{ + time: r.Time, + Timestamp: formatTimestamp(r.Time), + Source: h.logger.component, + Message: fmt.Sprintf("%s.%s", h.logger.task, r.Message), + LogLevel: toLogLevel(r.Level), + Data: h.logger.baseData(h.decorate(attrFromRecord(r))), + } + + for _, sink := range h.logger.sinks { + sink.Log(log) + } + + return nil +} + +// WithAttrs returns a new slog.Handler which always adds the specified attributes +func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, attrDecorator(attrs)), + } +} + +// WithGroup returns a new slog.Handler which always logs attributes in the specified group +func (h *handler) WithGroup(name string) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, groupDecorator(name)), + } +} + +// decorate will decorate a body using the decorators that have been defined +func (h *handler) decorate(body map[string]any) map[string]any { + for i := len(h.decorators) - 1; i >= 0; i-- { // reverse iteration + body = h.decorators[i](body) + } + return body +} + +// attrDecorator returns a decorator for the specified attributes +func attrDecorator(attrs []slog.Attr) decorator { + return func(body map[string]any) map[string]any { + if body == nil { + body = make(map[string]any) + } + processAttrs(attrs, body) + return body + } +} + +// groupDecorator returns a decorator for the specified group name +func groupDecorator(group string) decorator { + return func(body map[string]any) map[string]any { + switch len(body) { + case 0: + return nil + default: + return map[string]any{group: body} + } + } +} + +// attrFromRecord extracts and processes the attributes from a record +func attrFromRecord(r slog.Record) map[string]any { + if r.NumAttrs() == 0 { + return nil + } + + body := make(map[string]any, r.NumAttrs()) + r.Attrs(func(attr slog.Attr) bool { + processAttr(attr, body) + return true + }) + + return body +} + +// processAttrs calls processAttr() for each attribute +func processAttrs(attrs []slog.Attr, target map[string]any) { + for _, attr := range attrs { + processAttr(attr, target) + } +} + +// processAttr adds the attribute to the target with appropriate transformations +func processAttr(attr slog.Attr, target map[string]any) { + rv := attr.Value.Resolve() + + switch { + case rv.Kind() == slog.KindGroup && attr.Key != "": + nt := make(map[string]any) + processAttrs(attr.Value.Group(), nt) + target[attr.Key] = nt + case rv.Kind() == slog.KindGroup && attr.Key == "": + processAttrs(attr.Value.Group(), target) + case attr.Key == "": + // skip + default: + target[attr.Key] = rv.Any() + } +} + +// toLogLevel converts from slog levels to lager levels +func toLogLevel(l slog.Level) LogLevel { + switch l { + case slog.LevelDebug: + return DEBUG + case slog.LevelError, slog.LevelWarn: + return ERROR + default: + return INFO + } +} diff --git a/handler_test.go b/handler_test.go new file mode 100644 index 0000000..8d087bc --- /dev/null +++ b/handler_test.go @@ -0,0 +1,100 @@ +//go:build go1.21 + +package lager_test + +import ( + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/lager/v3/lagertest" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "log/slog" + "strconv" + "strings" + "testing/slogtest" + "time" +) + +var _ = Describe("NewHandler", func() { + var ( + s *lagertest.TestSink + l lager.Logger + h slog.Handler + ) + + BeforeEach(func() { + s = lagertest.NewTestSink() + l = lager.NewLogger("test") + l.RegisterSink(s) + + h = lager.NewHandler(l) + }) + + It("logs a message", func() { + slog.New(h).Info("foo", "bar", "baz") + logs := s.Logs() + Expect(logs).To(ConsistOf(MatchFields(IgnoreExtras, Fields{ + "Source": Equal("test"), + "Message": Equal("test.foo"), + "Data": SatisfyAll( + HaveLen(1), + HaveKeyWithValue("bar", "baz"), + ), + "LogLevel": Equal(lager.INFO), + }))) + }) + + It("logs a debug message", func() { + slog.New(h).Debug("foo", "bar", 3, slog.Int("baz", 42)) + logs := s.Logs() + Expect(logs).To(ConsistOf(MatchFields(IgnoreExtras, Fields{ + "Source": Equal("test"), + "Message": Equal("test.foo"), + "Data": SatisfyAll( + HaveLen(2), + HaveKeyWithValue("bar", float64(3)), + HaveKeyWithValue("baz", float64(42)), + ), + "LogLevel": Equal(lager.DEBUG), + }))) + }) + + It("behaves like a slog.NewHandler", func() { + results := func() (result []map[string]any) { + for _, l := range s.Logs() { + d := l.Data + + t := parseTimestamp(l.Timestamp) + if !t.IsZero() { + d["time"] = t + } + + d["level"] = l.LogLevel + d["msg"] = strings.TrimPrefix(l.Message, "test.") + result = append(result, d) + } + return result + } + + Expect(slogtest.TestHandler(h, results)).To(Succeed()) + }) +}) + +// parseTimestamp turns a lager timestamp back into a time.Time +// with a special case for the formatting of time.Time{} because +// there is a test that check time.Time{} is ignored as a time value +func parseTimestamp(input string) time.Time { + GinkgoHelper() + + // This is what time.Time{} gets formatted as + if input == "-6795364578.871345520" { + return time.Time{} + } + + f64, err := strconv.ParseFloat(input, 64) + Expect(err).NotTo(HaveOccurred()) + + secs := int64(f64) + nanos := int64((f64 - float64(secs)) * 1e9) + return time.Unix(secs, nanos) +} diff --git a/slog_sink.go b/slog_sink.go new file mode 100644 index 0000000..095e16a --- /dev/null +++ b/slog_sink.go @@ -0,0 +1,63 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "log/slog" +) + +// Type slogSink wraps an slog.Logger as a Sink +type slogSink struct { + logger *slog.Logger +} + +// NewSlogSink wraps a slog.Logger as a lager Sink +// This allows code using slog to integrate with code that uses lager +// Note the following log level conversions: +// +// lager.DEBUG -> slog.LevelDebug +// lager.ERROR -> slog.LevelError +// lager.FATAL -> slog.LevelError +// default -> slog.LevelInfo +func NewSlogSink(l *slog.Logger) Sink { + return &slogSink{logger: l} +} + +// Log exists to implement the lager.Sink interface. +func (l *slogSink) Log(f LogFormat) { + // For lager.Error() and lager.Fatal() the error (and stacktrace) are already in f.Data + r := slog.NewRecord(f.time, toSlogLevel(f.LogLevel), f.Message, 0) + r.AddAttrs(toAttr(f.Data)...) + + // By calling the handler directly we can pass through the original timestamp, + // whereas calling a method on the logger would generate a new timestamp + l.logger.Handler().Handle(context.Background(), r) +} + +// toAttr converts a lager.Data into []slog.Attr +func toAttr(d Data) []slog.Attr { + l := len(d) + if l == 0 { + return nil + } + + attr := make([]slog.Attr, 0, l) + for k, v := range d { + attr = append(attr, slog.Any(k, v)) + } + + return attr +} + +// toSlogLevel converts lager log levels to slog levels +func toSlogLevel(l LogLevel) slog.Level { + switch l { + case DEBUG: + return slog.LevelDebug + case ERROR, FATAL: + return slog.LevelError + default: + return slog.LevelInfo + } +} diff --git a/slog_sink_test.go b/slog_sink_test.go new file mode 100644 index 0000000..ba3f3ed --- /dev/null +++ b/slog_sink_test.go @@ -0,0 +1,83 @@ +//go:build go1.21 + +package lager_test + +import ( + "bytes" + "code.cloudfoundry.org/lager/v3" + "encoding/json" + "fmt" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "log/slog" +) + +var _ = Describe("NewSlogSink", func() { + var ( + buf bytes.Buffer + logger lager.Logger + ) + + matchTimestamp := MatchRegexp(`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{5,9}Z$`) + + parsedLogMessage := func() (receiver map[string]any) { + Expect(json.Unmarshal(buf.Bytes(), &receiver)).To(Succeed()) + return + } + + BeforeEach(func() { + buf = bytes.Buffer{} + logger = lager.NewLogger("fake-component") + logger.RegisterSink(lager.NewSlogSink(slog.New(slog.NewJSONHandler(&buf, nil)))) + }) + + It("logs Info()", func() { + logger.Info("fake-info", lager.Data{"foo": "bar"}) + + Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ + "time": matchTimestamp, + "level": Equal("INFO"), + "msg": Equal("fake-component.fake-info"), + "foo": Equal("bar"), + })) + }) + + It("logs Debug()", func() { + logger.Debug("fake-debug", lager.Data{"foo": "bar"}) + + Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ + "time": matchTimestamp, + "level": Equal("DEBUG"), + "msg": Equal("fake-component.fake-debug"), + "foo": Equal("bar"), + })) + }) + + It("logs Error()", func() { + logger.Error("fake-error", fmt.Errorf("boom"), lager.Data{"foo": "bar"}) + + Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ + "time": matchTimestamp, + "error": Equal("boom"), + "level": Equal("ERROR"), + "msg": Equal("fake-component.fake-error"), + "foo": Equal("bar"), + })) + }) + + It("logs Fatal()", func() { + Expect(func() { + logger.Fatal("fake-fatal", fmt.Errorf("boom"), lager.Data{"foo": "bar"}) + }).To(Panic()) + + Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ + "time": matchTimestamp, + "error": Equal("boom"), + "level": Equal("ERROR"), + "msg": Equal("fake-component.fake-fatal"), + "foo": Equal("bar"), + "trace": ContainSubstring(`code.cloudfoundry.org/lager/v3.(*logger).Fatal`), + })) + }) +})