Commit 481296c7 authored by Nick Thomas's avatar Nick Thomas

Merge branch 'use-labkit-raven' into 'master'

Use the LabKit logging infrastructure

See merge request gitlab-org/gitlab-workhorse!402
parents 2aeae5a5 c2aa2d57
...@@ -5,7 +5,7 @@ import ( ...@@ -5,7 +5,7 @@ import (
"net/http" "net/http"
"os" "os"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
) )
func main() { func main() {
......
...@@ -14,7 +14,7 @@ import ( ...@@ -14,7 +14,7 @@ import (
"time" "time"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/api" "gitlab.com/gitlab-org/gitlab-workhorse/internal/api"
) )
...@@ -172,10 +172,16 @@ func startWebsocketServer(subprotocols ...string) (chan connWithReq, *httptest.S ...@@ -172,10 +172,16 @@ func startWebsocketServer(subprotocols ...string) (chan connWithReq, *httptest.S
connCh := make(chan connWithReq, 1) connCh := make(chan connWithReq, 1)
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Println("WEBSOCKET", r.Method, r.URL, r.Header) logEntry := log.WithFields(log.Fields{
"method": r.Method,
"url": r.URL,
"headers": r.Header,
})
logEntry.Info("WEBSOCKET")
conn, err := upgrader.Upgrade(w, r, nil) conn, err := upgrader.Upgrade(w, r, nil)
if err != nil { if err != nil {
log.Println("WEBSOCKET", r.Method, r.URL, "Upgrade failed", err) logEntry.WithError(err).Error("WEBSOCKET Upgrade failed")
return return
} }
connCh <- connWithReq{conn, r} connCh <- connWithReq{conn, r}
......
...@@ -8,7 +8,8 @@ import ( ...@@ -8,7 +8,8 @@ import (
"io" "io"
"os" "os"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/zipartifacts" "gitlab.com/gitlab-org/gitlab-workhorse/internal/zipartifacts"
) )
...@@ -37,7 +38,7 @@ func main() { ...@@ -37,7 +38,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
scrubbedArchivePath := helper.ScrubURLParams(archivePath) scrubbedArchivePath := mask.URL(archivePath)
fileName, err := zipartifacts.DecodeFileEntry(encodedFileName) fileName, err := zipartifacts.DecodeFileEntry(encodedFileName)
if err != nil { if err != nil {
......
...@@ -4,7 +4,7 @@ import ( ...@@ -4,7 +4,7 @@ import (
"os" "os"
"testing" "testing"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/testhelper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/testhelper"
) )
......
...@@ -12,8 +12,9 @@ import ( ...@@ -12,8 +12,9 @@ import (
"strings" "strings"
"syscall" "syscall"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/senddata" "gitlab.com/gitlab-org/gitlab-workhorse/internal/senddata"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/zipartifacts" "gitlab.com/gitlab-org/gitlab-workhorse/internal/zipartifacts"
) )
...@@ -31,7 +32,7 @@ func (e *entry) Inject(w http.ResponseWriter, r *http.Request, sendData string) ...@@ -31,7 +32,7 @@ func (e *entry) Inject(w http.ResponseWriter, r *http.Request, sendData string)
return return
} }
log.WithFields(r.Context(), log.Fields{ log.WithContextFields(r.Context(), log.Fields{
"entry": params.Entry, "entry": params.Entry,
"archive": params.Archive, "archive": params.Archive,
"path": r.URL.Path, "path": r.URL.Path,
......
...@@ -7,9 +7,10 @@ import ( ...@@ -7,9 +7,10 @@ import (
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/api" "gitlab.com/gitlab-org/gitlab-workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
) )
var ( var (
...@@ -45,7 +46,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS ...@@ -45,7 +46,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS
server, err := connectToServer(settings, r) server, err := connectToServer(settings, r)
if err != nil { if err != nil {
helper.Fail500(w, r, err) helper.Fail500(w, r, err)
log.WithError(r.Context(), err).Print("Channel: connecting to server failed") log.ContextLogger(r.Context()).WithError(err).Print("Channel: connecting to server failed")
return return
} }
defer server.UnderlyingConn().Close() defer server.UnderlyingConn().Close()
...@@ -53,7 +54,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS ...@@ -53,7 +54,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS
client, err := upgradeClient(w, r) client, err := upgradeClient(w, r)
if err != nil { if err != nil {
log.WithError(r.Context(), err).Print("Channel: upgrading client to websocket failed") log.ContextLogger(r.Context()).WithError(err).Print("Channel: upgrading client to websocket failed")
return return
} }
...@@ -64,7 +65,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS ...@@ -64,7 +65,7 @@ func ProxyChannel(w http.ResponseWriter, r *http.Request, settings *api.ChannelS
defer client.UnderlyingConn().Close() defer client.UnderlyingConn().Close()
clientAddr := getClientAddr(r) // We can't know the port with confidence clientAddr := getClientAddr(r) // We can't know the port with confidence
logEntry := log.WithFields(r.Context(), log.Fields{ logEntry := log.WithContextFields(r.Context(), log.Fields{
"clientAddr": clientAddr, "clientAddr": clientAddr,
"serverAddr": serverAddr, "serverAddr": serverAddr,
}) })
......
package helper
import (
"io/ioutil"
"testing"
"time"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestAccessLogFormatter_Format(t *testing.T) {
discardLogger := log.New()
discardLogger.Out = ioutil.Discard
tests := []struct {
name string
entry *log.Entry
want string
}{
{
"blank",
discardLogger.WithField("blank", ""),
"- - - [2018/01/07:00:00:00 +0000] \" \" 0 0 \"\" \"\" 0.000\n",
},
{
"full",
discardLogger.WithFields(log.Fields{
"host": "gitlab.com",
"remoteAddr": "127.0.0.1",
"method": "GET",
"uri": "/",
"proto": "HTTP/1.1",
"status": 200,
"written": 100,
"referer": "http://localhost",
"userAgent": "Mozilla/1.0",
"duration": 5.0,
}),
"gitlab.com 127.0.0.1 - - [2018/01/07:00:00:00 +0000] \"GET / HTTP/1.1\" 200 100 \"http://localhost\" \"Mozilla/1.0\" 5.000\n",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f := &accessLogFormatter{clock: &StubClock{time.Unix(1515283200, 0).UTC()}}
got, err := f.Format(tt.entry)
if err != nil {
t.Errorf("AccessLogFormatter.Format() error = %v", err)
return
}
assert.Equal(t, tt.want, string(got))
})
}
}
...@@ -10,13 +10,12 @@ import ( ...@@ -10,13 +10,12 @@ import (
"net/url" "net/url"
"os" "os"
"os/exec" "os/exec"
"regexp"
"strings" "strings"
"syscall" "syscall"
"github.com/sebest/xff" "github.com/sebest/xff"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log" "gitlab.com/gitlab-org/labkit/mask"
) )
const NginxResponseBufferHeader = "X-Accel-Buffering" const NginxResponseBufferHeader = "X-Accel-Buffering"
...@@ -44,12 +43,12 @@ func RequestEntityTooLarge(w http.ResponseWriter, r *http.Request, err error) { ...@@ -44,12 +43,12 @@ func RequestEntityTooLarge(w http.ResponseWriter, r *http.Request, err error) {
func printError(r *http.Request, err error) { func printError(r *http.Request, err error) {
if r != nil { if r != nil {
log.WithFields(r.Context(), log.Fields{ log.WithContextFields(r.Context(), log.Fields{
"method": r.Method, "method": r.Method,
"uri": ScrubURLParams(r.RequestURI), "uri": mask.URL(r.RequestURI),
}).WithError(err).Error("error") }).WithError(err).Error("error")
} else { } else {
log.NoContext().WithError(err).Error("unknown error") log.WithError(err).Error("unknown error")
} }
} }
...@@ -92,7 +91,7 @@ func OpenFile(path string) (file *os.File, fi os.FileInfo, err error) { ...@@ -92,7 +91,7 @@ func OpenFile(path string) (file *os.File, fi os.FileInfo, err error) {
func URLMustParse(s string) *url.URL { func URLMustParse(s string) *url.URL {
u, err := url.Parse(s) u, err := url.Parse(s)
if err != nil { if err != nil {
log.NoContext().WithField("url", s).WithError(err).Fatal("urlMustParse") log.WithError(err).WithField("url", s).Fatal("urlMustParse")
} }
return u return u
} }
...@@ -202,84 +201,3 @@ func CloneRequestWithNewBody(r *http.Request, body []byte) *http.Request { ...@@ -202,84 +201,3 @@ func CloneRequestWithNewBody(r *http.Request, body []byte) *http.Request {
newReq.ContentLength = int64(len(body)) newReq.ContentLength = int64(len(body))
return &newReq return &newReq
} }
// Based on https://stackoverflow.com/a/52965552/474597
// ScrubURLParams replaces the content of any sensitive query string parameters
// in an URL with `[FILTERED]`
func ScrubURLParams(originalURL string) string {
u, err := url.Parse(originalURL)
if err != nil {
return "<invalid URL>"
}
buf := bytes.NewBuffer(make([]byte, 0, len(originalURL)))
for i, queryPart := range bytes.Split([]byte(u.RawQuery), []byte("&")) {
if i != 0 {
buf.WriteByte(byte('&'))
}
splitParam := bytes.SplitN(queryPart, []byte("="), 2)
if len(splitParam) == 2 {
buf.Write(splitParam[0])
buf.WriteByte(byte('='))
if isParamSensitive(splitParam[0]) {
buf.Write([]byte("[FILTERED]"))
} else {
buf.Write(splitParam[1])
}
} else {
buf.Write(queryPart)
}
}
u.RawQuery = buf.String()
return u.String()
}
// Remember to keep in sync with Rails' filter_parameters
var sensitiveRegexps = []*regexp.Regexp{
regexp.MustCompile(`token$`),
regexp.MustCompile(`key$`),
regexp.MustCompile(`(?i)(?:X\-AMZ\-)?Signature`),
}
// Not in regexp due to SA6004.
// Not in string for performance.
var sensitivePartialMatch = [][]byte{
[]byte("password"),
[]byte("secret"),
}
var sensitiveExactMatch = []string{
"certificate",
"hook",
"import_url",
"otp_attempt",
"sentry_dsn",
"trace",
"variables",
"content",
"sharedSecret",
}
func isParamSensitive(name []byte) bool {
for _, s := range sensitiveExactMatch {
if string(name) == s {
return true
}
}
for _, r := range sensitiveRegexps {
if r.Match(name) {
return true
}
}
for _, s := range sensitivePartialMatch {
if bytes.Contains(name, s) {
return true
}
}
return false
}
...@@ -2,11 +2,13 @@ package helper ...@@ -2,11 +2,13 @@ package helper
import ( import (
"bytes" "bytes"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -142,45 +144,69 @@ func TestFail500WorksWithNils(t *testing.T) { ...@@ -142,45 +144,69 @@ func TestFail500WorksWithNils(t *testing.T) {
assert.Equal(t, "Internal server error\n", body.String()) assert.Equal(t, "Internal server error\n", body.String())
} }
func TestScrubURLParams(t *testing.T) { func TestLogError(t *testing.T) {
for before, expected := range map[string]string{ tests := []struct {
"http://example.com": "http://example.com", name string
"http://example.com?foo=1": "http://example.com?foo=1", method string
"http://example.com?title=token": "http://example.com?title=token", uri string
"http://example.com?authenticity_token=1": "http://example.com?authenticity_token=[FILTERED]", err error
"http://example.com?private_token=1": "http://example.com?private_token=[FILTERED]", logMatchers []string
"http://example.com?rss_token=1": "http://example.com?rss_token=[FILTERED]", }{
"http://example.com?access_token=1": "http://example.com?access_token=[FILTERED]", {
"http://example.com?refresh_token=1": "http://example.com?refresh_token=[FILTERED]", name: "nil_request",
"http://example.com?foo&authenticity_token=blahblah&bar": "http://example.com?foo&authenticity_token=[FILTERED]&bar", err: fmt.Errorf("crash"),
"http://example.com?private-token=1": "http://example.com?private-token=[FILTERED]", logMatchers: []string{
"http://example.com?foo&private-token=blahblah&bar": "http://example.com?foo&private-token=[FILTERED]&bar", `level=error msg="unknown error" error=crash`,
"http://example.com?private-token=foo&authenticity_token=bar": "http://example.com?private-token=[FILTERED]&authenticity_token=[FILTERED]", },
"https://example.com:8080?private-token=foo&authenticity_token=bar": "https://example.com:8080?private-token=[FILTERED]&authenticity_token=[FILTERED]", },
"/?private-token=foo&authenticity_token=bar": "/?private-token=[FILTERED]&authenticity_token=[FILTERED]", {
"?private-token=&authenticity_token=&bar": "?private-token=[FILTERED]&authenticity_token=[FILTERED]&bar", name: "nil_request_nil_error",
"?private-token=foo&authenticity_token=bar": "?private-token=[FILTERED]&authenticity_token=[FILTERED]", err: nil,
"?private_token=foo&authenticity-token=bar": "?private_token=[FILTERED]&authenticity-token=[FILTERED]", logMatchers: []string{
"?X-AMZ-Signature=foo": "?X-AMZ-Signature=[FILTERED]", `level=error msg="unknown error" error="<nil>"`,
"?x-amz-signature=foo": "?x-amz-signature=[FILTERED]", },
"?Signature=foo": "?Signature=[FILTERED]", },
"?confirmation_password=foo": "?confirmation_password=[FILTERED]", {
"?pos_secret_number=foo": "?pos_secret_number=[FILTERED]", name: "basic_url",
"?sharedSecret=foo": "?sharedSecret=[FILTERED]", method: "GET",
"?book_key=foo": "?book_key=[FILTERED]", uri: "http://localhost:3000/",
"?certificate=foo": "?certificate=[FILTERED]", err: fmt.Errorf("error"),
"?hook=foo": "?hook=[FILTERED]", logMatchers: []string{
"?import_url=foo": "?import_url=[FILTERED]", `level=error msg=error correlation_id= error=error method=GET uri="http://localhost:3000/"`,
"?otp_attempt=foo": "?otp_attempt=[FILTERED]", },
"?sentry_dsn=foo": "?sentry_dsn=[FILTERED]", },
"?trace=foo": "?trace=[FILTERED]", {
"?variables=foo": "?variables=[FILTERED]", name: "secret_url",
"?content=foo": "?content=[FILTERED]", method: "GET",
"?content=e=mc2": "?content=[FILTERED]", uri: "http://localhost:3000/path?certificate=123&sharedSecret=123&import_url=the_url&my_password_string=password",
"?formula=e=mc2": "?formula=e=mc2", err: fmt.Errorf("error"),
"http://%41:8080/": "<invalid URL>", logMatchers: []string{
} { `level=error msg=error correlation_id= error=error method=GET uri="http://localhost:3000/path\?certificate=\[FILTERED\]&sharedSecret=\[FILTERED\]&import_url=\[FILTERED\]&my_password_string=\[FILTERED\]"`,
after := ScrubURLParams(before) },
assert.Equal(t, expected, after, "Scrubbing %q", before) },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
oldOut := logrus.StandardLogger().Out
logrus.StandardLogger().Out = buf
defer func() {
logrus.StandardLogger().Out = oldOut
}()
var r *http.Request
if tt.uri != "" {
r = httptest.NewRequest(tt.method, tt.uri, nil)
}
LogError(r, tt.err)
logString := buf.String()
for _, v := range tt.logMatchers {
require.Regexp(t, v, logString)
}
})
} }
} }
package helper
import (
"bufio"
"net"
"net/http"
"time"
log "github.com/sirupsen/logrus"
logging "gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
)
var (
accessLogEntry *log.Entry
)
// SetAccessLoggerEntry sets the access logger used in the system
func SetAccessLoggerEntry(logEntry *log.Entry) {
accessLogEntry = logEntry
}
type LoggingResponseWriter interface {
http.ResponseWriter
RequestFinished(r *http.Request)
}
type statsCollectingResponseWriter struct {
rw http.ResponseWriter
status int
wroteHeader bool
written int64
started time.Time
}
type hijackingResponseWriter struct {
statsCollectingResponseWriter
}
func NewStatsCollectingResponseWriter(rw http.ResponseWriter) LoggingResponseWriter {
out := statsCollectingResponseWriter{
rw: rw,
started: time.Now(),
}
if _, ok := rw.(http.Hijacker); ok {
return &hijackingResponseWriter{out}
}
return &out
}
func (l *hijackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
// The only way to get here is through NewStatsCollectingResponseWriter(), which
// checks that this cast will be valid.
hijacker := l.rw.(http.Hijacker)
return hijacker.Hijack()
}
func (l *statsCollectingResponseWriter) Header() http.Header {
return l.rw.Header()
}
func (l *statsCollectingResponseWriter) Write(data []byte) (int, error) {
if !l.wroteHeader {
l.WriteHeader(http.StatusOK)
}
n, err := l.rw.Write(data)
l.written += int64(n)
return n, err
}
func (l *statsCollectingResponseWriter) WriteHeader(status int) {
if l.wroteHeader {
return
}
l.wroteHeader = true
l.status = status
l.rw.WriteHeader(status)
}
func (l *statsCollectingResponseWriter) writeAccessLog(r *http.Request) {
if accessLogEntry == nil {
return
}
logEntry := accessLogEntry.WithFields(l.accessLogFields(r))
logging.WrapEntry(r.Context(), logEntry).Info("access")
}
func (l *statsCollectingResponseWriter) accessLogFields(r *http.Request) log.Fields {
duration := time.Since(l.started)
ip, _, _ := net.SplitHostPort(r.RemoteAddr)
return log.Fields{
"host": r.Host,
"remoteIp": ip,
"remoteAddr": r.RemoteAddr,
"method": r.Method,
"uri": ScrubURLParams(r.RequestURI),
"proto": r.Proto,
"status": l.status,
"written": l.written,
"referer": ScrubURLParams(r.Referer()),
"userAgent": r.UserAgent(),
"duration": duration.Seconds(),
}
}
func (l *statsCollectingResponseWriter) RequestFinished(r *http.Request) {
l.writeAccessLog(r)
}
package helper
import (
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_statsCollectingResponseWriter_remoteIp_accessLogFields(t *testing.T) {
testCases := []struct {
remoteAddr string
expected string
}{
{remoteAddr: "", expected: ""},
{remoteAddr: "bogus", expected: ""},
{remoteAddr: "8.8.8.8:1234", expected: "8.8.8.8"},
{remoteAddr: "8.8.8.8", expected: ""},
{remoteAddr: "[2001:db8:85a3:8d3:1319:8a2e:370:7348]", expected: ""},
{remoteAddr: "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443", expected: "2001:db8:85a3:8d3:1319:8a2e:370:7348"},
}
for _, tc := range testCases {
req, err := http.NewRequest("GET", "/blah", nil)
req.RemoteAddr = tc.remoteAddr
assert.Nil(t, err)
l := &statsCollectingResponseWriter{
rw: nil,
status: 200,
wroteHeader: true,
written: 50,
started: time.Now(),
}
fields := l.accessLogFields(req)
ip := fields["remoteIp"].(string)
assert.Equal(t, tc.expected, ip)
}
}
func Test_statsCollectingResponseWriter_accessLogFields(t *testing.T) {
passwords := []string{
"should_be_filtered", // Basic case
"should+++filtered", // Password contains +
"this/is/a/to--ken", // Password contains on / and -
"%E9%A9%AE%E9%A9%AC", // Password contains URI Encoded chars
"should_be_%252E%252E%252F", // Password is mixed
}
queryStrings := []string{
"private-token=%s",
"authenticity-token=%s",
"rss-token=%s",
"access-token=%s",
"refresh-token=%s",
"private_token=%s",
"authenticity_token=%s",
"rss_token=%s",
"access_token=%s",
"refresh_token=%s",
"private-token=%s&authenticity-token=%s",
"private_token=%s&authenticity_token=%s",
"param=not_private&private-token=%s", // Non-private fields prefixed
"private-token=%s&param=not_private", // Non-private fields suffixed
}
paths := []string{
"",
"/",
"/groups/private_token/",
}
for i, password := range passwords {
for j, path := range paths {
for k, qs := range queryStrings {
queryString := strings.Replace(qs, "%s", password, -1)
t.Run(fmt.Sprintf("Test #%v %v %v", i, j, k), func(t *testing.T) {
resource := path + "?" + queryString
// Ensure the Referer is scrubbed too
req, err := http.NewRequest("GET", "/blah"+resource, nil)
assert.NoError(t, err, "GET %q: %v", resource, err)
req.Header.Set("Referer", "http://referer.example.com"+resource)
req.RequestURI = resource
l := &statsCollectingResponseWriter{
rw: nil,
status: 200,
wroteHeader: true,
written: 50,
started: time.Now(),
}
fields := l.accessLogFields(req)
uri := fields["uri"].(string)
assert.NotEmpty(t, uri, "uri is empty")
assert.Contains(t, uri, path, "GET %q: path not logged", resource)
assert.NotContains(t, uri, password, "GET %q: log not filtered correctly", resource)
referer := fields["referer"].(string)
assert.NotEmpty(t, referer, "referer is empty")
assert.Contains(t, referer, path, "GET %q: path not logged", resource)
assert.NotContains(t, referer, password, "GET %q: log not filtered correctly", resource)
assert.NotContains(t, fmt.Sprintf("%#v", fields), password, "password leaked into fields", fields)
if strings.Contains(queryString, "param=not_private") {
assert.Contains(t, uri, "param=not_private", "Missing non-private parameters in uri", uri)
assert.Contains(t, referer, "param=not_private", "Missing non-private parameters in referer", referer)
}
})
}
}
}
}
...@@ -23,6 +23,7 @@ func captureRavenError(r *http.Request, err error) { ...@@ -23,6 +23,7 @@ func captureRavenError(r *http.Request, err error) {
CleanHeadersForRaven(r) CleanHeadersForRaven(r)
interfaces = append(interfaces, raven.NewHttp(r)) interfaces = append(interfaces, raven.NewHttp(r))
//lint:ignore SA1019 this was recently deprecated. Update workhorse to use labkit errortracking package.
extra = correlation.SetExtra(r.Context(), extra) extra = correlation.SetExtra(r.Context(), extra)
} }
......
package log
import (
"context"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/labkit/correlation"
)
// Fields type, an helper to avoid importing logrus.Fields
type Fields map[string]interface{}
func toLogrusFields(f Fields) logrus.Fields {
lFields := logrus.Fields{}
for k, v := range f {
lFields[k] = v
}
return lFields
}
func getCorrelationID(ctx context.Context) string {
correlationID := correlation.ExtractFromContext(ctx)
if correlationID == "" {
return "[MISSING]"
}
return correlationID
}
// WithContext provides a *logrus.Entry with the proper "correlation_id" field.
//
// "[MISSING]" will be used when ctx has no value for KeyCorrelationID
func WithContext(ctx context.Context) *logrus.Entry {
return logrus.WithField("correlation_id", getCorrelationID(ctx))
}
// NoContext provides logrus.StandardLogger()
func NoContext() *logrus.Logger {
return logrus.StandardLogger()
}
// WrapEntry adds the proper "correlation_id" field to the provided *logrus.Entry
func WrapEntry(ctx context.Context, e *logrus.Entry) *logrus.Entry {
return e.WithField("correlation_id", getCorrelationID(ctx))
}
// WithFields decorates logrus.WithFields with the proper "correlation_id"
func WithFields(ctx context.Context, f Fields) *logrus.Entry {
return WithContext(ctx).WithFields(toLogrusFields(f))
}
// WithField decorates logrus.WithField with the proper "correlation_id"
func WithField(ctx context.Context, key string, value interface{}) *logrus.Entry {
return WithContext(ctx).WithField(key, value)
}
// WithError decorates logrus.WithError with the proper "correlation_id"
func WithError(ctx context.Context, err error) *logrus.Entry {
return WithContext(ctx).WithError(err)
}
package log_test
import (
"context"
"errors"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
)
func requireCorrelationID(t *testing.T, getEntry func(ctx context.Context) *logrus.Entry) *logrus.Entry {
id := "test-id"
ctx := correlation.ContextWithCorrelation(context.Background(), id)
e := getEntry(ctx)
require.NotNil(t, e)
require.Contains(t, e.Data, "correlation_id")
require.Equal(t, id, e.Data["correlation_id"])
return e
}
func TestWithContext(t *testing.T) {
e := requireCorrelationID(t, log.WithContext)
require.Len(t, e.Data, 1)
}
func TestWithFields(t *testing.T) {
fields := log.Fields{
"one": "ok",
"two": 2,
}
toTest := func(ctx context.Context) *logrus.Entry {
return log.WithFields(ctx, fields)
}
e := requireCorrelationID(t, toTest)
for key, value := range fields {
require.Contains(t, e.Data, key)
require.Equal(t, value, e.Data[key])
}
require.Len(t, e.Data, len(fields)+1)
}
func TestWithField(t *testing.T) {
key := "key"
value := struct{ Name string }{"Test"}
toTest := func(ctx context.Context) *logrus.Entry {
return log.WithField(ctx, key, value)
}
e := requireCorrelationID(t, toTest)
require.Contains(t, e.Data, key)
require.Equal(t, value, e.Data[key])
require.Len(t, e.Data, 2)
}
func TestWithError(t *testing.T) {
err := errors.New("An error")
toTest := func(ctx context.Context) *logrus.Entry {
return log.WithError(ctx, err)
}
e := requireCorrelationID(t, toTest)
require.Contains(t, e.Data, logrus.ErrorKey)
require.Equal(t, err, e.Data[logrus.ErrorKey])
require.Len(t, e.Data, 2)
}
func TestNoContext(t *testing.T) {
key := "key"
value := struct{ Name string }{"Test"}
logger := log.NoContext()
require.Equal(t, logrus.StandardLogger(), logger)
e := logger.WithField(key, value)
require.NotContains(t, e.Data, "correlation_id")
require.Contains(t, e.Data, key)
require.Equal(t, value, e.Data[key])
require.Len(t, e.Data, 1)
}
...@@ -12,9 +12,8 @@ import ( ...@@ -12,9 +12,8 @@ import (
"os" "os"
"time" "time"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
// ErrNotEnoughParts will be used when writing more than size * len(partURLs) // ErrNotEnoughParts will be used when writing more than size * len(partURLs)
...@@ -133,12 +132,12 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error { ...@@ -133,12 +132,12 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error {
resp, err := httpClient.Do(req) resp, err := httpClient.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("CompleteMultipartUpload request %q: %v", helper.ScrubURLParams(m.CompleteURL), err) return fmt.Errorf("CompleteMultipartUpload request %q: %v", mask.URL(m.CompleteURL), err)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return fmt.Errorf("CompleteMultipartUpload request %v returned: %s", helper.ScrubURLParams(m.CompleteURL), resp.Status) return fmt.Errorf("CompleteMultipartUpload request %v returned: %s", mask.URL(m.CompleteURL), resp.Status)
} }
result := &compoundCompleteMultipartUploadResult{} result := &compoundCompleteMultipartUploadResult{}
......
...@@ -10,9 +10,8 @@ import ( ...@@ -10,9 +10,8 @@ import (
"time" "time"
"gitlab.com/gitlab-org/labkit/correlation" "gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/labkit/tracing" "gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
// httpTransport defines a http.Transport with values // httpTransport defines a http.Transport with values
...@@ -63,7 +62,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str ...@@ -63,7 +62,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str
if metrics { if metrics {
objectStorageUploadRequestsRequestFailed.Inc() objectStorageUploadRequestsRequestFailed.Inc()
} }
return nil, fmt.Errorf("PUT %q: %v", helper.ScrubURLParams(putURL), err) return nil, fmt.Errorf("PUT %q: %v", mask.URL(putURL), err)
} }
req.ContentLength = size req.ContentLength = size
...@@ -111,7 +110,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str ...@@ -111,7 +110,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str
if metrics { if metrics {
objectStorageUploadRequestsRequestFailed.Inc() objectStorageUploadRequestsRequestFailed.Inc()
} }
o.uploadError = fmt.Errorf("PUT request %q: %v", helper.ScrubURLParams(o.PutURL), err) o.uploadError = fmt.Errorf("PUT request %q: %v", mask.URL(o.PutURL), err)
return return
} }
defer resp.Body.Close() defer resp.Body.Close()
...@@ -120,7 +119,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str ...@@ -120,7 +119,7 @@ func newObject(ctx context.Context, putURL, deleteURL string, putHeaders map[str
if metrics { if metrics {
objectStorageUploadRequestsInvalidStatus.Inc() objectStorageUploadRequestsInvalidStatus.Inc()
} }
o.uploadError = StatusCodeError(fmt.Errorf("PUT request %v returned: %s", helper.ScrubURLParams(o.PutURL), resp.Status)) o.uploadError = StatusCodeError(fmt.Errorf("PUT request %v returned: %s", mask.URL(o.PutURL), resp.Status))
return return
} }
......
...@@ -8,9 +8,8 @@ import ( ...@@ -8,9 +8,8 @@ import (
"io" "io"
"net/http" "net/http"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
// Upload represents an upload to an ObjectStorage provider // Upload represents an upload to an ObjectStorage provider
...@@ -76,7 +75,7 @@ func (u *uploader) syncAndDelete(url string) { ...@@ -76,7 +75,7 @@ func (u *uploader) syncAndDelete(url string) {
req, err := http.NewRequest("DELETE", url, nil) req, err := http.NewRequest("DELETE", url, nil)
if err != nil { if err != nil {
log.WithError(err).WithField("object", helper.ScrubURLParams(url)).Warning("Delete failed") log.WithError(err).WithField("object", mask.URL(url)).Warning("Delete failed")
return return
} }
// TODO: consider adding the context to the outgoing request for better instrumentation // TODO: consider adding the context to the outgoing request for better instrumentation
...@@ -84,7 +83,7 @@ func (u *uploader) syncAndDelete(url string) { ...@@ -84,7 +83,7 @@ func (u *uploader) syncAndDelete(url string) {
// here we are not using u.ctx because we must perform cleanup regardless of parent context // here we are not using u.ctx because we must perform cleanup regardless of parent context
resp, err := httpClient.Do(req) resp, err := httpClient.Do(req)
if err != nil { if err != nil {
log.WithError(err).WithField("object", helper.ScrubURLParams(url)).Warning("Delete failed") log.WithError(err).WithField("object", mask.URL(url)).Warning("Delete failed")
return return
} }
resp.Body.Close() resp.Body.Close()
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
"github.com/gomodule/redigo/redis" "github.com/gomodule/redigo/redis"
"github.com/jpillora/backoff" "github.com/jpillora/backoff"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
...@@ -103,7 +103,7 @@ func dialPubSub(dialer redisDialerFunc) (redis.Conn, error) { ...@@ -103,7 +103,7 @@ func dialPubSub(dialer redisDialerFunc) (redis.Conn, error) {
// //
// NOTE: There Can Only Be One! // NOTE: There Can Only Be One!
func Process() { func Process() {
log.Print("keywatcher: starting process loop") log.Info("keywatcher: starting process loop")
for { for {
conn, err := dialPubSub(workerDialFunc) conn, err := dialPubSub(workerDialFunc)
if err != nil { if err != nil {
......
...@@ -10,7 +10,7 @@ import ( ...@@ -10,7 +10,7 @@ import (
"github.com/FZambia/sentinel" "github.com/FZambia/sentinel"
"github.com/gomodule/redigo/redis" "github.com/gomodule/redigo/redis"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config" "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
......
...@@ -15,9 +15,11 @@ import ( ...@@ -15,9 +15,11 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/headers" "gitlab.com/gitlab-org/gitlab-workhorse/internal/headers"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
) )
var ( var (
...@@ -105,10 +107,10 @@ func (s *sendFileResponseWriter) WriteHeader(status int) { ...@@ -105,10 +107,10 @@ func (s *sendFileResponseWriter) WriteHeader(status int) {
} }
func sendFileFromDisk(w http.ResponseWriter, r *http.Request, file string) { func sendFileFromDisk(w http.ResponseWriter, r *http.Request, file string) {
log.WithFields(r.Context(), log.Fields{ log.WithContextFields(r.Context(), log.Fields{
"file": file, "file": file,
"method": r.Method, "method": r.Method,
"uri": helper.ScrubURLParams(r.RequestURI), "uri": mask.URL(r.RequestURI),
}).Print("Send file") }).Print("Send file")
contentTypeHeaderPresent := false contentTypeHeaderPresent := false
......
...@@ -10,10 +10,11 @@ import ( ...@@ -10,10 +10,11 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"gitlab.com/gitlab-org/labkit/correlation" "gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/labkit/tracing" "gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/senddata" "gitlab.com/gitlab-org/gitlab-workhorse/internal/senddata"
) )
...@@ -100,10 +101,10 @@ func (e *entry) Inject(w http.ResponseWriter, r *http.Request, sendData string) ...@@ -100,10 +101,10 @@ func (e *entry) Inject(w http.ResponseWriter, r *http.Request, sendData string)
return return
} }
log.WithFields(r.Context(), log.Fields{ log.WithContextFields(r.Context(), log.Fields{
"url": helper.ScrubURLParams(params.URL), "url": mask.URL(params.URL),
"path": r.URL.Path, "path": r.URL.Path,
}).Print("SendURL: sending") }).Info("SendURL: sending")
if params.URL == "" { if params.URL == "" {
sendURLRequestsInvalidData.Inc() sendURLRequestsInvalidData.Inc()
......
...@@ -7,8 +7,10 @@ import ( ...@@ -7,8 +7,10 @@ import (
"strings" "strings"
"time" "time"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/urlprefix" "gitlab.com/gitlab-org/gitlab-workhorse/internal/urlprefix"
) )
...@@ -70,12 +72,13 @@ func (s *Static) ServeExisting(prefix urlprefix.Prefix, cache CacheMode, notFoun ...@@ -70,12 +72,13 @@ func (s *Static) ServeExisting(prefix urlprefix.Prefix, cache CacheMode, notFoun
w.Header().Set("Expires", cacheUntil) w.Header().Set("Expires", cacheUntil)
} }
log.WithFields(r.Context(), log.Fields{ log.WithContextFields(r.Context(), log.Fields{
"file": file, "file": file,
"encoding": w.Header().Get("Content-Encoding"), "encoding": w.Header().Get("Content-Encoding"),
"method": r.Method, "method": r.Method,
"uri": helper.ScrubURLParams(r.RequestURI), "uri": mask.URL(r.RequestURI),
}).Printf("Send static file") }).Info("Send static file")
http.ServeContent(w, r, filepath.Base(file), fi.ModTime(), content) http.ServeContent(w, r, filepath.Base(file), fi.ModTime(), content)
}) })
} }
...@@ -10,8 +10,8 @@ import ( ...@@ -10,8 +10,8 @@ import (
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb" "gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/labkit/log"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
...@@ -38,10 +38,10 @@ var ( ...@@ -38,10 +38,10 @@ var (
func init() { func init() {
var err error var err error
if GitalyReceivePackResponseMock, err = ioutil.ReadFile(path.Join(RootDir(), "testdata/receive-pack-fixture.txt")); err != nil { if GitalyReceivePackResponseMock, err = ioutil.ReadFile(path.Join(RootDir(), "testdata/receive-pack-fixture.txt")); err != nil {
log.Fatal(err) log.WithError(err).Fatal("Unable to read pack response")
} }
if GitalyUploadPackResponseMock, err = ioutil.ReadFile(path.Join(RootDir(), "testdata/upload-pack-fixture.txt")); err != nil { if GitalyUploadPackResponseMock, err = ioutil.ReadFile(path.Join(RootDir(), "testdata/upload-pack-fixture.txt")); err != nil {
log.Fatal(err) log.WithError(err).Fatal("Unable to read pack response")
} }
} }
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
"strings" "strings"
"testing" "testing"
log "github.com/sirupsen/logrus" "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/secret" "gitlab.com/gitlab-org/gitlab-workhorse/internal/secret"
) )
...@@ -109,14 +109,20 @@ func assertHeaderExists(t *testing.T, header string, actual, expected []string) ...@@ -109,14 +109,20 @@ func assertHeaderExists(t *testing.T, header string, actual, expected []string)
func TestServerWithHandler(url *regexp.Regexp, handler http.HandlerFunc) *httptest.Server { func TestServerWithHandler(url *regexp.Regexp, handler http.HandlerFunc) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logEntry := log.WithFields(log.Fields{
"method": r.Method,
"url": r.URL,
"action": "DENY",
})
if url != nil && !url.MatchString(r.URL.Path) { if url != nil && !url.MatchString(r.URL.Path) {
log.Println("UPSTREAM", r.Method, r.URL, "DENY") logEntry.Info("UPSTREAM")
w.WriteHeader(404) w.WriteHeader(404)
return return
} }
if version := r.Header.Get("Gitlab-Workhorse"); version == "" { if version := r.Header.Get("Gitlab-Workhorse"); version == "" {
log.Println("UPSTREAM", r.Method, r.URL, "DENY") logEntry.Info("UPSTREAM")
w.WriteHeader(403) w.WriteHeader(403)
return return
} }
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
"os/exec" "os/exec"
"regexp" "regexp"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log" "gitlab.com/gitlab-org/labkit/log"
) )
var ErrRemovingExif = errors.New("error while removing EXIF") var ErrRemovingExif = errors.New("error while removing EXIF")
...@@ -40,7 +40,7 @@ func (c *cleaner) Read(p []byte) (int, error) { ...@@ -40,7 +40,7 @@ func (c *cleaner) Read(p []byte) (int, error) {
n, err := c.stdout.Read(p) n, err := c.stdout.Read(p)
if err == io.EOF { if err == io.EOF {
if waitErr := c.wait(); waitErr != nil { if waitErr := c.wait(); waitErr != nil {
log.WithFields(c.ctx, log.Fields{ log.WithContextFields(c.ctx, log.Fields{
"command": c.cmd.Args, "command": c.cmd.Args,
"stderr": c.stderr.String(), "stderr": c.stderr.String(),
"error": waitErr.Error(), "error": waitErr.Error(),
......
...@@ -9,10 +9,10 @@ import ( ...@@ -9,10 +9,10 @@ import (
"strings" "strings"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/api" "gitlab.com/gitlab-org/gitlab-workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/filestore" "gitlab.com/gitlab-org/gitlab-workhorse/internal/filestore"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/upload/exif" "gitlab.com/gitlab-org/gitlab-workhorse/internal/upload/exif"
) )
...@@ -117,7 +117,7 @@ func (rew *rewriter) handleFilePart(ctx context.Context, name string, p *multipa ...@@ -117,7 +117,7 @@ func (rew *rewriter) handleFilePart(ctx context.Context, name string, p *multipa
var inputReader io.Reader var inputReader io.Reader
if exif.IsExifFile(filename) { if exif.IsExifFile(filename) {
log.WithFields(ctx, log.Fields{ log.WithContextFields(ctx, log.Fields{
"filename": filename, "filename": filename,
}).Print("running exiftool to remove any metadata") }).Print("running exiftool to remove any metadata")
......
...@@ -8,10 +8,13 @@ package upstream ...@@ -8,10 +8,13 @@ package upstream
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/labkit/correlation" "gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config" "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
...@@ -34,7 +37,7 @@ type upstream struct { ...@@ -34,7 +37,7 @@ type upstream struct {
RoundTripper http.RoundTripper RoundTripper http.RoundTripper
} }
func NewUpstream(cfg config.Config) http.Handler { func NewUpstream(cfg config.Config, accessLogger *logrus.Logger) http.Handler {
up := upstream{ up := upstream{
Config: cfg, Config: cfg,
} }
...@@ -45,7 +48,9 @@ func NewUpstream(cfg config.Config) http.Handler { ...@@ -45,7 +48,9 @@ func NewUpstream(cfg config.Config) http.Handler {
up.configureURLPrefix() up.configureURLPrefix()
up.configureRoutes() up.configureRoutes()
return correlation.InjectCorrelationID(&up) handler := log.AccessLogger(&up, log.WithAccessLogger(accessLogger))
handler = correlation.InjectCorrelationID(handler)
return handler
} }
func (u *upstream) configureURLPrefix() { func (u *upstream) configureURLPrefix() {
...@@ -56,12 +61,9 @@ func (u *upstream) configureURLPrefix() { ...@@ -56,12 +61,9 @@ func (u *upstream) configureURLPrefix() {
u.URLPrefix = urlprefix.Prefix(relativeURLRoot) u.URLPrefix = urlprefix.Prefix(relativeURLRoot)
} }
func (u *upstream) ServeHTTP(ow http.ResponseWriter, r *http.Request) { func (u *upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
helper.FixRemoteAddr(r) helper.FixRemoteAddr(r)
w := helper.NewStatsCollectingResponseWriter(ow)
defer w.RequestFinished(r)
helper.DisableResponseBuffering(w) helper.DisableResponseBuffering(w)
// Drop RequestURI == "*" (FIXME: why?) // Drop RequestURI == "*" (FIXME: why?)
......
...@@ -14,9 +14,8 @@ import ( ...@@ -14,9 +14,8 @@ import (
"github.com/jfbus/httprs" "github.com/jfbus/httprs"
"gitlab.com/gitlab-org/labkit/correlation" "gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/mask"
"gitlab.com/gitlab-org/labkit/tracing" "gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
// ErrNotAZip will be used when the file is not a zip archive // ErrNotAZip will be used when the file is not a zip archive
...@@ -56,7 +55,7 @@ func isURL(path string) bool { ...@@ -56,7 +55,7 @@ func isURL(path string) bool {
} }
func openHTTPArchive(ctx context.Context, archivePath string) (*zip.Reader, error) { func openHTTPArchive(ctx context.Context, archivePath string) (*zip.Reader, error) {
scrubbedArchivePath := helper.ScrubURLParams(archivePath) scrubbedArchivePath := mask.URL(archivePath)
req, err := http.NewRequest(http.MethodGet, archivePath, nil) req, err := http.NewRequest(http.MethodGet, archivePath, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("can't create HTTP GET %q: %v", scrubbedArchivePath, err) return nil, fmt.Errorf("can't create HTTP GET %q: %v", scrubbedArchivePath, err)
......
package main package main
import ( import (
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
goLog "log" goLog "log"
"os" "os"
"os/signal"
"syscall"
"github.com/client9/reopen"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
logkit "gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
) )
func reopenLogWriter(l reopen.WriteCloser, sighup chan os.Signal) {
for range sighup {
log.Print("Reopening log file")
l.Reopen()
}
}
func prepareLoggingFile(logFile string) *reopen.FileWriter {
file, err := reopen.NewFileWriter(logFile)
if err != nil {
goLog.Fatalf("Unable to set output log: %s", err)
}
sighup := make(chan os.Signal, 1)
signal.Notify(sighup, syscall.SIGHUP)
go reopenLogWriter(file, sighup)
return file
}
const ( const (
jsonLogFormat = "json" jsonLogFormat = "json"
textLogFormat = "text" textLogFormat = "text"
...@@ -46,43 +23,57 @@ type logConfiguration struct { ...@@ -46,43 +23,57 @@ type logConfiguration struct {
logFormat string logFormat string
} }
func startLogging(config logConfiguration) { func startLogging(config logConfiguration) (io.Closer, error) {
var accessLogEntry *log.Entry // Golog always goes to stderr
var logOutputWriter io.Writer goLog.SetOutput(os.Stderr)
if config.logFile != "" { logFile := config.logFile
logOutputWriter = prepareLoggingFile(config.logFile) if logFile == "" {
} else { logFile = "stderr"
logOutputWriter = os.Stderr
} }
switch config.logFormat { switch config.logFormat {
case noneLogType: case noneLogType:
accessLogEntry = nil return logkit.Initialize(logkit.WithWriter(ioutil.Discard))
logOutputWriter = ioutil.Discard
case jsonLogFormat: case jsonLogFormat:
accessLogEntry = log.WithField("system", "http") return logkit.Initialize(
log.SetFormatter(&log.JSONFormatter{}) logkit.WithOutputName(logFile),
logkit.WithFormatter("json"),
)
case textLogFormat: case textLogFormat:
accessLogger := log.New() // In this mode, default (non-access) logs will always go to stderr
accessLogger.Formatter = helper.NewAccessLogFormatter() return logkit.Initialize(
accessLogger.Out = logOutputWriter logkit.WithOutputName("stderr"),
accessLogger.SetLevel(log.InfoLevel) logkit.WithFormatter("text"),
accessLogEntry = accessLogger.WithField("system", "http") )
log.SetFormatter(&log.TextFormatter{})
case structuredFormat: case structuredFormat:
formatter := &log.TextFormatter{ForceColors: true, EnvironmentOverrideColors: true} return logkit.Initialize(
log.SetFormatter(formatter) logkit.WithOutputName(logFile),
accessLogEntry = log.WithField("system", "http") logkit.WithFormatter("color"),
default: )
log.WithField("logFormat", config.logFormat).Fatal("Unknown logFormat configured")
} }
helper.SetAccessLoggerEntry(accessLogEntry) return nil, fmt.Errorf("unknown logFormat: %v", config.logFormat)
log.SetOutput(logOutputWriter) }
// Golog always goes to stderr // In text format, we use a separate logger for access logs
goLog.SetOutput(os.Stderr) func getAccessLogger(config logConfiguration) (*log.Logger, io.Closer, error) {
if config.logFormat != "text" {
return log.StandardLogger(), nil, nil
}
logFile := config.logFile
if logFile == "" {
logFile = "stderr"
}
accessLogger := log.New()
accessLogger.SetLevel(log.InfoLevel)
closer, err := logkit.Initialize(
logkit.WithLogger(accessLogger), // Configure `accessLogger`
logkit.WithFormatter("combined"), // Use the combined formatter
logkit.WithOutputName(logFile),
)
return accessLogger, closer, err
} }
...@@ -24,11 +24,10 @@ import ( ...@@ -24,11 +24,10 @@ import (
"time" "time"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/labkit/tracing" "gitlab.com/gitlab-org/labkit/tracing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config" "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/queueing" "gitlab.com/gitlab-org/gitlab-workhorse/internal/queueing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/redis" "gitlab.com/gitlab-org/gitlab-workhorse/internal/redis"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/secret" "gitlab.com/gitlab-org/gitlab-workhorse/internal/secret"
...@@ -78,22 +77,25 @@ func main() { ...@@ -78,22 +77,25 @@ func main() {
os.Exit(0) os.Exit(0)
} }
startLogging(logConfig) closer, err := startLogging(logConfig)
logger := log.NoContext() if err != nil {
log.WithError(err).Fatal("Unable to configure logger")
}
defer closer.Close()
tracing.Initialize(tracing.WithServiceName("gitlab-workhorse")) tracing.Initialize(tracing.WithServiceName("gitlab-workhorse"))
backendURL, err := parseAuthBackend(*authBackend) backendURL, err := parseAuthBackend(*authBackend)
if err != nil { if err != nil {
logger.WithError(err).Fatal("invalid authBackend") log.WithError(err).Fatal("Invalid authBackend")
} }
logger.WithField("version", version).Print("Starting") log.WithField("version", version).Print("Starting")
// Good housekeeping for Unix sockets: unlink before binding // Good housekeeping for Unix sockets: unlink before binding
if *listenNetwork == "unix" { if *listenNetwork == "unix" {
if err := os.Remove(*listenAddr); err != nil && !os.IsNotExist(err) { if err := os.Remove(*listenAddr); err != nil && !os.IsNotExist(err) {
logger.Fatal(err) log.WithError(err).Fatal("Failed to remove socket")
} }
} }
...@@ -102,7 +104,7 @@ func main() { ...@@ -102,7 +104,7 @@ func main() {
listener, err := net.Listen(*listenNetwork, *listenAddr) listener, err := net.Listen(*listenNetwork, *listenAddr)
syscall.Umask(oldUmask) syscall.Umask(oldUmask)
if err != nil { if err != nil {
logger.Fatal(err) log.WithError(err).Fatal("Failed to listen")
} }
// The profiler will only be activated by HTTP requests. HTTP // The profiler will only be activated by HTTP requests. HTTP
...@@ -111,7 +113,10 @@ func main() { ...@@ -111,7 +113,10 @@ func main() {
// effectively disabled by default. // effectively disabled by default.
if *pprofListenAddr != "" { if *pprofListenAddr != "" {
go func() { go func() {
logger.Print(http.ListenAndServe(*pprofListenAddr, nil)) err := http.ListenAndServe(*pprofListenAddr, nil)
if err != nil {
log.WithError(err).Error("Failed to start pprof listener")
}
}() }()
} }
...@@ -119,7 +124,10 @@ func main() { ...@@ -119,7 +124,10 @@ func main() {
promMux := http.NewServeMux() promMux := http.NewServeMux()
promMux.Handle("/metrics", promhttp.Handler()) promMux.Handle("/metrics", promhttp.Handler())
go func() { go func() {
logger.Print(http.ListenAndServe(*prometheusListenAddr, promMux)) err := http.ListenAndServe(*prometheusListenAddr, promMux)
if err != nil {
log.WithError(err).Error("Failed to start prometheus listener")
}
}() }()
} }
...@@ -140,7 +148,7 @@ func main() { ...@@ -140,7 +148,7 @@ func main() {
if *configFile != "" { if *configFile != "" {
cfgFromFile, err := config.LoadConfig(*configFile) cfgFromFile, err := config.LoadConfig(*configFile)
if err != nil { if err != nil {
logger.WithField("configFile", *configFile).WithError(err).Fatal("Can not load config file") log.WithField("configFile", *configFile).WithError(err).Fatal("Can not load config file")
} }
cfg.Redis = cfgFromFile.Redis cfg.Redis = cfgFromFile.Redis
...@@ -151,7 +159,18 @@ func main() { ...@@ -151,7 +159,18 @@ func main() {
} }
} }
up := wrapRaven(upstream.NewUpstream(cfg)) accessLogger, accessCloser, err := getAccessLogger(logConfig)
if err != nil {
log.WithError(err).Fatal("Unable to configure access logger")
}
if accessCloser != nil {
defer accessCloser.Close()
}
logger.Fatal(http.Serve(listener, up)) up := wrapRaven(upstream.NewUpstream(cfg, accessLogger))
err = http.Serve(listener, up)
if err != nil {
log.WithError(err).Fatal("Unable to serve")
}
} }
...@@ -17,11 +17,11 @@ import ( ...@@ -17,11 +17,11 @@ import (
"testing" "testing"
"time" "time"
log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb" "gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/labkit/log"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/api" "gitlab.com/gitlab-org/gitlab-workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config" "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
...@@ -43,7 +43,7 @@ var cacheDir = path.Join(scratchDir, "cache") ...@@ -43,7 +43,7 @@ var cacheDir = path.Join(scratchDir, "cache")
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
if _, err := os.Stat(path.Join(testRepoRoot, testRepo)); os.IsNotExist(err) { if _, err := os.Stat(path.Join(testRepoRoot, testRepo)); os.IsNotExist(err) {
log.Fatal("cannot find test repository. Please run 'make prepare-tests'") log.WithError(err).Fatal("cannot find test repository. Please run 'make prepare-tests'")
} }
if err := testhelper.BuildExecutables(); err != nil { if err := testhelper.BuildExecutables(); err != nil {
...@@ -162,7 +162,7 @@ func TestStaticFileRelativeURL(t *testing.T) { ...@@ -162,7 +162,7 @@ func TestStaticFileRelativeURL(t *testing.T) {
ts := testhelper.TestServerWithHandler(regexp.MustCompile(`.`), http.HandlerFunc(http.NotFound)) ts := testhelper.TestServerWithHandler(regexp.MustCompile(`.`), http.HandlerFunc(http.NotFound))
defer ts.Close() defer ts.Close()
backendURLString := ts.URL + "/my-relative-url" backendURLString := ts.URL + "/my-relative-url"
log.Print(backendURLString) log.Info(backendURLString)
ws := startWorkhorseServer(backendURLString) ws := startWorkhorseServer(backendURLString)
defer ws.Close() defer ws.Close()
...@@ -496,11 +496,17 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf ...@@ -496,11 +496,17 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf
return testhelper.TestServerWithHandler(url, func(w http.ResponseWriter, r *http.Request) { return testhelper.TestServerWithHandler(url, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", api.ResponseContentType) w.Header().Set("Content-Type", api.ResponseContentType)
logEntry := log.WithFields(log.Fields{
"method": r.Method,
"url": r.URL,
})
logEntryWithCode := logEntry.WithField("code", code)
if params != nil { if params != nil {
currentParams := r.URL.Query() currentParams := r.URL.Query()
for key := range params { for key := range params {
if currentParams.Get(key) != params.Get(key) { if currentParams.Get(key) != params.Get(key) {
log.Println("UPSTREAM", r.Method, r.URL, "DENY", "invalid auth server params") logEntry.Info("UPSTREAM", "DENY", "invalid auth server params")
w.WriteHeader(http.StatusForbidden) w.WriteHeader(http.StatusForbidden)
return return
} }
...@@ -509,7 +515,8 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf ...@@ -509,7 +515,8 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf
// Write pure string // Write pure string
if data, ok := body.(string); ok { if data, ok := body.(string); ok {
log.Println("UPSTREAM", r.Method, r.URL, code) logEntryWithCode.Info("UPSTREAM")
w.WriteHeader(code) w.WriteHeader(code)
fmt.Fprint(w, data) fmt.Fprint(w, data)
return return
...@@ -518,13 +525,15 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf ...@@ -518,13 +525,15 @@ func testAuthServer(url *regexp.Regexp, params url.Values, code int, body interf
// Write json string // Write json string
data, err := json.Marshal(body) data, err := json.Marshal(body)
if err != nil { if err != nil {
log.Println("UPSTREAM", r.Method, r.URL, "FAILURE", err) logEntry.WithError(err).Error("UPSTREAM")
w.WriteHeader(503) w.WriteHeader(503)
fmt.Fprint(w, err) fmt.Fprint(w, err)
return return
} }
log.Println("UPSTREAM", r.Method, r.URL, code) logEntryWithCode.Info("UPSTREAM")
w.WriteHeader(code) w.WriteHeader(code)
w.Write(data) w.Write(data)
}) })
...@@ -544,7 +553,7 @@ func startWorkhorseServer(authBackend string) *httptest.Server { ...@@ -544,7 +553,7 @@ func startWorkhorseServer(authBackend string) *httptest.Server {
func startWorkhorseServerWithConfig(cfg *config.Config) *httptest.Server { func startWorkhorseServerWithConfig(cfg *config.Config) *httptest.Server {
testhelper.ConfigureSecret() testhelper.ConfigureSecret()
u := upstream.NewUpstream(*cfg) u := upstream.NewUpstream(*cfg, logrus.StandardLogger())
return httptest.NewServer(u) return httptest.NewServer(u)
} }
......
...@@ -10,8 +10,8 @@ import ( ...@@ -10,8 +10,8 @@ import (
"path" "path"
"testing" "testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/labkit/log"
) )
func TestDeniedLfsDownload(t *testing.T) { func TestDeniedLfsDownload(t *testing.T) {
...@@ -36,7 +36,8 @@ func allowedXSendfileDownload(t *testing.T, contentFilename string, filePath str ...@@ -36,7 +36,8 @@ func allowedXSendfileDownload(t *testing.T, contentFilename string, filePath str
// Prepare test server and backend // Prepare test server and backend
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Println("UPSTREAM", r.Method, r.URL) log.WithFields(log.Fields{"method": r.Method, "url": r.URL}).Info("UPSTREAM")
require.Equal(t, "X-Sendfile", r.Header.Get("X-Sendfile-Type")) require.Equal(t, "X-Sendfile", r.Header.Get("X-Sendfile-Type"))
w.Header().Set("X-Sendfile", contentPath) w.Header().Set("X-Sendfile", contentPath)
...@@ -69,7 +70,8 @@ func deniedXSendfileDownload(t *testing.T, contentFilename string, filePath stri ...@@ -69,7 +70,8 @@ func deniedXSendfileDownload(t *testing.T, contentFilename string, filePath stri
// Prepare test server and backend // Prepare test server and backend
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Println("UPSTREAM", r.Method, r.URL) log.WithFields(log.Fields{"method": r.Method, "url": r.URL}).Info("UPSTREAM")
require.Equal(t, "X-Sendfile", r.Header.Get("X-Sendfile-Type")) require.Equal(t, "X-Sendfile", r.Header.Get("X-Sendfile-Type"))
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, contentFilename)) w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, contentFilename))
......
package correlation
const FieldName = "correlation_id"
...@@ -25,12 +25,12 @@ func InjectCorrelationID(h http.Handler, opts ...InboundHandlerOption) http.Hand ...@@ -25,12 +25,12 @@ func InjectCorrelationID(h http.Handler, opts ...InboundHandlerOption) http.Hand
correlationID = generateRandomCorrelationIDWithFallback(r) correlationID = generateRandomCorrelationIDWithFallback(r)
} }
ctx := ContextWithCorrelation(parent, correlationID)
h.ServeHTTP(w, r.WithContext(ctx))
if config.sendResponseHeader { if config.sendResponseHeader {
setResponseHeader(w, correlationID) setResponseHeader(w, correlationID)
} }
ctx := ContextWithCorrelation(parent, correlationID)
h.ServeHTTP(w, r.WithContext(ctx))
}) })
} }
...@@ -38,6 +38,12 @@ func extractFromRequest(r *http.Request) string { ...@@ -38,6 +38,12 @@ func extractFromRequest(r *http.Request) string {
return r.Header.Get(propagationHeader) return r.Header.Get(propagationHeader)
} }
// setResponseHeader will set the response header, if it has not already
// been set by an downstream response
func setResponseHeader(w http.ResponseWriter, correlationID string) { func setResponseHeader(w http.ResponseWriter, correlationID string) {
w.Header().Set(propagationHeader, correlationID) header := w.Header()
_, exists := header[http.CanonicalHeaderKey(propagationHeader)]
if !exists {
header.Set(propagationHeader, correlationID)
}
} }
// Package raven is allows correlation information to be added to raven requests.
//
// Deprecated: Use gitlab.com/gitlab-org/labkit/errortracking instead.
package raven
...@@ -10,9 +10,11 @@ import ( ...@@ -10,9 +10,11 @@ import (
const ravenSentryExtraKey = "gitlab.CorrelationID" const ravenSentryExtraKey = "gitlab.CorrelationID"
// SetExtra will augment a raven message with the CorrelationID // SetExtra will augment a raven message with the CorrelationID.
// An existing `extra` can be passed in, but if it's nil // An existing `extra` can be passed in, but if it's nil
// a new one will be created // a new one will be created.
//
// Deprecated: Use gitlab.com/gitlab-org/labkit/errortracking instead.
func SetExtra(ctx context.Context, extra raven.Extra) raven.Extra { func SetExtra(ctx context.Context, extra raven.Extra) raven.Extra {
if extra == nil { if extra == nil {
extra = raven.Extra{} extra = raven.Extra{}
......
package log
import (
"bufio"
"net"
"net/http"
"time"
"github.com/sebest/xff"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/labkit/correlation"
"gitlab.com/gitlab-org/labkit/mask"
)
// AccessLogger will generate access logs for the service.
func AccessLogger(h http.Handler, opts ...AccessLoggerOption) http.Handler {
config := applyAccessLoggerOptions(opts)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := newLoggingResponseWriter(w, &config)
defer lrw.requestFinished(r)
h.ServeHTTP(lrw, r)
})
}
func newLoggingResponseWriter(rw http.ResponseWriter, config *accessLoggerConfig) notifiableResponseWriter {
out := loggingResponseWriter{
rw: rw,
started: time.Now(),
config: config,
}
// If the underlying response writer supports hijacking,
// we need to ensure that we do too
if _, ok := rw.(http.Hijacker); ok {
return &hijackingResponseWriter{out}
}
return &out
}
// notifiableResponseWriter is a response writer that can be notified when the request is complete,
// via the requestFinished method
type notifiableResponseWriter interface {
http.ResponseWriter
// requestFinished is called by the middleware when the request has completed
requestFinished(r *http.Request)
}
type loggingResponseWriter struct {
rw http.ResponseWriter
status int
wroteHeader bool
written int64
started time.Time
config *accessLoggerConfig
}
func (l *loggingResponseWriter) Header() http.Header {
return l.rw.Header()
}
func (l *loggingResponseWriter) Write(data []byte) (int, error) {
if !l.wroteHeader {
l.WriteHeader(http.StatusOK)
}
n, err := l.rw.Write(data)
l.written += int64(n)
return n, err
}
func (l *loggingResponseWriter) WriteHeader(status int) {
if l.wroteHeader {
return
}
l.wroteHeader = true
l.status = status
l.rw.WriteHeader(status)
}
func (l *loggingResponseWriter) accessLogFields(r *http.Request) logrus.Fields {
duration := time.Since(l.started)
fields := l.config.extraFields(r)
fieldsBitMask := l.config.fields
// Optionally add built in fields
if fieldsBitMask&CorrelationID != 0 {
fields[correlation.FieldName] = correlation.ExtractFromContext(r.Context())
}
if fieldsBitMask&HTTPHost != 0 {
fields[httpHostField] = r.Host
}
if fieldsBitMask&HTTPRemoteIP != 0 {
fields[httpRemoteIPField] = getRemoteIP(r)
}
if fieldsBitMask&HTTPRemoteAddr != 0 {
fields[httpRemoteAddrField] = r.RemoteAddr
}
if fieldsBitMask&HTTPRequestMethod != 0 {
fields[httpRequestMethodField] = r.Method
}
if fieldsBitMask&HTTPURI != 0 {
fields[httpURIField] = mask.URL(r.RequestURI)
}
if fieldsBitMask&HTTPProto != 0 {
fields[httpProtoField] = r.Proto
}
if fieldsBitMask&HTTPResponseStatusCode != 0 {
fields[httpResponseStatusCodeField] = l.status
}
if fieldsBitMask&HTTPResponseSize != 0 {
fields[httpResponseSizeField] = l.written
}
if fieldsBitMask&HTTPRequestReferrer != 0 {
fields[httpRequestReferrerField] = mask.URL(r.Referer())
}
if fieldsBitMask&HTTPUserAgent != 0 {
fields[httpUserAgentField] = r.UserAgent()
}
if fieldsBitMask&RequestDuration != 0 {
fields[requestDurationField] = int64(duration / time.Millisecond)
}
if fieldsBitMask&System != 0 {
fields[systemField] = "http"
}
return fields
}
func (l *loggingResponseWriter) requestFinished(r *http.Request) {
l.config.logger.WithFields(l.accessLogFields(r)).Info("access")
}
// hijackingResponseWriter is like a loggingResponseWriter that supports the http.Hijacker interface
type hijackingResponseWriter struct {
loggingResponseWriter
}
func (l *hijackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
// The only way to get here is through NewStatsCollectingResponseWriter(), which
// checks that this cast will be valid.
hijacker := l.rw.(http.Hijacker)
return hijacker.Hijack()
}
func getRemoteIP(r *http.Request) string {
remoteAddr := xff.GetRemoteAddr(r)
host, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}
package log
// AccessLogField is used to select which fields are recorded in the access log. See WithoutFields.
type AccessLogField uint16
const (
// CorrelationID field will record the Correlation-ID in the access log.
CorrelationID AccessLogField = 1 << iota
// HTTPHost field will record the Host Header in the access log.
HTTPHost
// HTTPRemoteIP field will record the remote caller in the access log, taking Real-IP and X-Forwarded-For headers into account.
HTTPRemoteIP
// HTTPRemoteAddr field will record the remote socket endpoint in the access log.
HTTPRemoteAddr
// HTTPRequestMethod field will record the HTTP method in the access log.
HTTPRequestMethod
// HTTPURI field will record the URI, including parameters.
HTTPURI
// HTTPProto field will record the protocol used to make the request in the access log.
HTTPProto
// HTTPResponseStatusCode field will record the response HTTP status code in the access log.
HTTPResponseStatusCode
// HTTPResponseSize field will record the response size, in bytes, in the access log.
HTTPResponseSize
// HTTPRequestReferrer field will record the referer header in the access log.
HTTPRequestReferrer
// HTTPUserAgent field will record the useragent header in the access log.
HTTPUserAgent
// RequestDuration field will record the request duration in the access log.
RequestDuration
// System field will record the system for the log entry.
System
)
const defaultEnabledFields = ^AccessLogField(0) // By default, all fields are enabled
// For field definitions, consult the Elastic Common Schema field reference
// https://www.elastic.co/guide/en/ecs/current/ecs-field-reference.html.
const (
httpHostField = "host" // ESC: url.domain
httpRemoteIPField = "remote_ip" // ESC: client.ip
httpRemoteAddrField = "remote_addr" // ESC: client.address
httpRequestMethodField = "method" // ESC: http.request.method
httpURIField = "uri" // ESC url.path + `?` + url.query
httpProtoField = "proto" // ESC: url.scheme
httpResponseStatusCodeField = "status" // ESC: http.response.status_code
httpResponseSizeField = "written_bytes" // ESC: http.response.body.bytes
httpRequestReferrerField = "referrer" // ESC: http.request.referrer
httpUserAgentField = "user_agent" // ESC: user_agent.original
requestDurationField = "duration_ms" // ESC: no mapping
systemField = "system" // ESC: no mapping
)
package log
import (
"net/http"
"github.com/sirupsen/logrus"
)
// ExtraFieldsGeneratorFunc allows extra fields to be included in the access log.
type ExtraFieldsGeneratorFunc func(r *http.Request) Fields
// The configuration for an access logger
type accessLoggerConfig struct {
logger *logrus.Logger
extraFields ExtraFieldsGeneratorFunc
fields AccessLogField
}
func nullExtraFieldsGenerator(r *http.Request) Fields {
return Fields{}
}
// AccessLoggerOption will configure a access logger handler.
type AccessLoggerOption func(*accessLoggerConfig)
func applyAccessLoggerOptions(opts []AccessLoggerOption) accessLoggerConfig {
config := accessLoggerConfig{
logger: logger,
extraFields: nullExtraFieldsGenerator,
fields: defaultEnabledFields,
}
for _, v := range opts {
v(&config)
}
return config
}
// WithExtraFields allows extra fields to be passed into the access logger, based on the request.
func WithExtraFields(f ExtraFieldsGeneratorFunc) AccessLoggerOption {
return func(config *accessLoggerConfig) {
config.extraFields = f
}
}
// WithFieldsExcluded allows fields to be excluded from the access log. For example, backend services may not require the referer or user agent fields.
func WithFieldsExcluded(fields AccessLogField) AccessLoggerOption {
return func(config *accessLoggerConfig) {
config.fields = config.fields &^ fields
}
}
// WithAccessLogger configures the logger to be used with the access logger.
func WithAccessLogger(logger *logrus.Logger) AccessLoggerOption {
return func(config *accessLoggerConfig) {
config.logger = logger
}
}
package helper package log
import ( import (
"time" "time"
) )
// Clock interface provides the time // Clock interface provides the time
type Clock interface { type clock interface {
Now() time.Time Now() time.Time
} }
// RealClock is the default time implementation // realClock is the default time implementation
type RealClock struct{} type realClock struct{}
// Now returns the time // Now returns the time
func (RealClock) Now() time.Time { return time.Now() } func (realClock) Now() time.Time { return time.Now() }
// StubClock is the default time implementation // stubClock is the default time implementation
type StubClock struct { type stubClock struct {
StubTime time.Time StubTime time.Time
} }
// Now returns a stub time // Now returns a stub time
func (c *StubClock) Now() time.Time { return c.StubTime } func (c *stubClock) Now() time.Time { return c.StubTime }
package helper package log
import ( import (
"bytes" "bytes"
...@@ -15,24 +15,24 @@ func forNil(v interface{}, otherwise interface{}) interface{} { ...@@ -15,24 +15,24 @@ func forNil(v interface{}, otherwise interface{}) interface{} {
return v return v
} }
// accessLogFormatter formats logs into a format similar to the combined access log format // combinedAccessLogFormatter formats logs into a format similar to the combined access log format
// See https://httpd.apache.org/docs/1.3/logs.html#combined // See https://httpd.apache.org/docs/1.3/logs.html#combined
type accessLogFormatter struct { type combinedAccessLogFormatter struct {
clock Clock clock clock
} }
// Format renders a single log entry // Format renders a single log entry
func (f *accessLogFormatter) Format(entry *log.Entry) ([]byte, error) { func (f *combinedAccessLogFormatter) Format(entry *log.Entry) ([]byte, error) {
host := forNil(entry.Data["host"], "-") host := forNil(entry.Data[httpHostField], "-")
remoteAddr := forNil(entry.Data["remoteAddr"], "") remoteAddr := forNil(entry.Data[httpRemoteIPField], "")
method := forNil(entry.Data["method"], "") method := forNil(entry.Data[httpRequestMethodField], "")
uri := forNil(entry.Data["uri"], "") uri := forNil(entry.Data[httpURIField], "")
proto := forNil(entry.Data["proto"], "") proto := forNil(entry.Data[httpProtoField], "")
status := forNil(entry.Data["status"], 0) status := forNil(entry.Data[httpResponseStatusCodeField], 0)
written := forNil(entry.Data["written"], 0) written := forNil(entry.Data[httpResponseSizeField], 0)
referer := forNil(entry.Data["referer"], "") referer := forNil(entry.Data[httpRequestReferrerField], "")
userAgent := forNil(entry.Data["userAgent"], "") userAgent := forNil(entry.Data[httpUserAgentField], "")
duration := forNil(entry.Data["duration"], 0.0) duration := forNil(entry.Data[requestDurationField], 0.0)
now := f.clock.Now().Format("2006/01/02:15:04:05 -0700") now := f.clock.Now().Format("2006/01/02:15:04:05 -0700")
...@@ -47,7 +47,7 @@ func (f *accessLogFormatter) Format(entry *log.Entry) ([]byte, error) { ...@@ -47,7 +47,7 @@ func (f *accessLogFormatter) Format(entry *log.Entry) ([]byte, error) {
return buf.Bytes(), err return buf.Bytes(), err
} }
// NewAccessLogFormatter returns a new formatter for combined access logs // newCombinedcombinedAccessLogFormatter returns a new formatter for combined access logs
func NewAccessLogFormatter() log.Formatter { func newCombinedcombinedAccessLogFormatter() log.Formatter {
return &accessLogFormatter{clock: &RealClock{}} return &combinedAccessLogFormatter{clock: &realClock{}}
} }
/*
Package log provides sensible logging defaults for Labkit.
Labkit uses Logrus for logging.
Applications that use Labkit should rely directly on Labkit.
This package provides some utility methods for working with Labkit, including:
1. Initialize Logrus in a consistent manner
1. Using GitLab's defaults with logrus
1. Passing correlation-ids between contexts and logrus
Please review the examples for more details of how to use this package.
*/
package log
package log
import (
"io"
"log"
"os"
"os/signal"
"syscall"
"github.com/client9/reopen"
"github.com/sirupsen/logrus"
)
type nopCloser struct{}
func (nopCloser) Close() error { return nil }
// Initialize will configure the logger based on the options passed. It will
// validate the options and if validation fails drop to the defaults while
// logging a message to STDERR.
func Initialize(opts ...LoggerOption) (io.Closer, error) {
conf := applyLoggerOptions(opts)
// Being unable to open the output file will cause an error
writer, closer, err := getOutputWriter(conf)
if err != nil {
return closer, err
}
conf.logger.SetFormatter(conf.formatter)
conf.logger.SetLevel(conf.level)
conf.logger.SetOutput(writer)
// Only output the warnings _after_ the logger has been configured
for _, warning := range conf.warnings {
conf.logger.Warn(warning)
}
return closer, nil
}
func getOutputWriter(conf *loggerConfig) (io.Writer, io.Closer, error) {
if conf.writer != nil {
return conf.writer, nopCloser{}, nil
}
// When writing to a file, use `reopen` so that we can
// reopen the file on SIGHUP signals
f, err := reopen.NewFileWriterMode(conf.outputPath, 0644)
if err != nil {
return f, nopCloser{}, err
}
isMainLogger := conf.logger == logger
sighup := make(chan os.Signal, 1)
signal.Notify(sighup, syscall.SIGHUP)
go listenForSignalHangup(f, isMainLogger, conf.outputPath, sighup)
return f, f, nil
}
// Will listen for SIGHUP signals and reopen the underlying file
func listenForSignalHangup(l reopen.WriteCloser, isMainLogger bool, logFilePath string, sighup chan os.Signal) {
for v := range sighup {
// Specifically, do _not_ write to the log that is being reopened,
// but log this to the _main_ log file instead as the actual log
// might be specialised, eg: an access logger leading to an incorrect entry
logger.WithFields(logrus.Fields{"signal": v, "path": logFilePath}).Print("Reopening log file on signal")
err := l.Reopen()
if err != nil {
if isMainLogger {
// Main logger failed to reopen, last ditch effort to notify the user, but don't
// do this for auxillary loggers, since we may get double-logs
log.Printf("Unable to reopen log file '%s' after %v. Error %v", logFilePath, v, err)
} else {
logger.WithError(err).WithFields(logrus.Fields{"signal": v, "path": logFilePath}).Print("Failed to reopen log file")
}
}
}
}
package log
import (
"context"
"os"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/labkit/correlation"
)
var logger = logrus.StandardLogger()
func init() {
// Enfore our own defaults on the logrus stdlogger
logger.Out = os.Stderr
logger.Formatter = &logrus.TextFormatter{}
logger.Level = logrus.InfoLevel
}
// ContextLogger will set the correlation id in the logger based on the context.
// This reference should not be held outside of the scope of the context
func ContextLogger(ctx context.Context) *logrus.Entry {
return logger.WithFields(ContextFields(ctx))
}
// WithContextFields is a utility method for logging with context and fields
func WithContextFields(ctx context.Context, fields Fields) *logrus.Entry {
return logger.WithFields(ContextFields(ctx)).WithFields(fields)
}
// ContextFields a logrus fields structure with the CorrelationID set
func ContextFields(ctx context.Context) Fields {
correlationID := correlation.ExtractFromContext(ctx)
return logrus.Fields{correlation.FieldName: correlationID}
}
package log
import (
"fmt"
"io"
"os"
"github.com/sirupsen/logrus"
)
type loggerConfig struct {
logger *logrus.Logger
level logrus.Level
formatter logrus.Formatter
outputPath string
writer io.Writer
// A list of warnings that will be emitted once the logger is configured
warnings []string
}
// LoggerOption will configure a new logrus Logger
type LoggerOption func(*loggerConfig)
func applyLoggerOptions(opts []LoggerOption) *loggerConfig {
conf := loggerConfig{
logger: logger,
level: logrus.InfoLevel,
formatter: &logrus.TextFormatter{},
writer: os.Stdout,
}
for _, v := range opts {
v(&conf)
}
return &conf
}
// WithFormatter allows setting the format to `text`, `json`, `color` or `combined`. In case
// the input is not recognized it defaults to text with a warning.
// More details of these formats:
// * `text` - human readable.
// * `json` - computer readable, new-line delimited JSON.
// * `color` - human readable, in color. Useful for development.
// * `combined` - httpd access logs. Good for legacy access log parsers.
func WithFormatter(format string) LoggerOption {
return func(conf *loggerConfig) {
switch format {
case "text":
conf.formatter = &logrus.TextFormatter{}
case "color":
conf.formatter = &logrus.TextFormatter{ForceColors: true, EnvironmentOverrideColors: true}
case "json":
conf.formatter = &logrus.JSONFormatter{}
case "combined":
conf.formatter = newCombinedcombinedAccessLogFormatter()
default:
conf.warnings = append(conf.warnings, fmt.Sprintf("unknown logging format %s, ignoring option", format))
}
}
}
// WithLogLevel is used to set the log level when defaulting to `info` is not
// wanted. Other options are: `debug`, `warn`, `error`, `fatal`, and `panic`.
func WithLogLevel(level string) LoggerOption {
return func(conf *loggerConfig) {
logrusLevel, err := logrus.ParseLevel(level)
if err != nil {
conf.warnings = append(conf.warnings, fmt.Sprintf("unknown log level, ignoring option: %v", err))
} else {
conf.level = logrusLevel
}
}
}
// WithOutputName allows customization of the sink of the logger. Output is either:
// `stdout`, `stderr`, or a path to a file.
func WithOutputName(outputName string) LoggerOption {
return func(conf *loggerConfig) {
switch outputName {
case "stdout":
conf.writer = os.Stdout
case "stderr":
conf.writer = os.Stderr
default:
conf.writer = nil
conf.outputPath = outputName
}
}
}
// WithWriter allows the writer to be customized. The application is responsible for closing the writer manually.
func WithWriter(writer io.Writer) LoggerOption {
return func(conf *loggerConfig) {
conf.writer = writer
}
}
// WithLogger allows you to configure a proprietary logger using the `Initialize` method
func WithLogger(logger *logrus.Logger) LoggerOption {
return func(conf *loggerConfig) {
conf.logger = logger
}
}
package log
import (
"github.com/sirupsen/logrus"
)
// Note that we specifically discourage the use of Fatal, Error by excluding them from the API.
// Since we prefer structured logging with `.WithError(err)`
// Fields is an alias for the underlying logger Fields type
// Using this alias saves clients from having to import
// two distinct logging packages, which can be confusing
type Fields = logrus.Fields
// New is a delegator method for logrus.New
func New() *logrus.Logger {
return logrus.New()
}
// Info is a delegator method for logrus.Info
// Info is an exception to our rule about discouraging non-structured use, as there are valid
// reasons for simply emitting a single log line.
func Info(args ...interface{}) {
logger.Info(args...)
}
// WithField is a delegator method for logrus.WithField
func WithField(key string, value interface{}) *logrus.Entry {
return logger.WithField(key, value)
}
// WithFields is a delegator method for logrus.WithFields
func WithFields(fields Fields) *logrus.Entry {
return logger.WithFields(fields)
}
// WithError is a delegator method for logrus.WithError
func WithError(err error) *logrus.Entry {
return logger.WithError(err)
}
package mask
// RedactionString represents the filtered value used in place of sensitive data in the mask package
const RedactionString = "[FILTERED]"
/*
Package mask offers the functionality for non-reversible masking
of sensitive data in the GitLab application.
It provides the ability to determine whether or not a URL parameter or
Header should be considered sensitive.
Additionally, it provides masking functionality to mask sensitive
information that gets logged via logging, structured logging, sentry
and distributed tracing.
*/
package mask
package mask
import (
"bytes"
"regexp"
)
var parameterMatches = []string{
`token$`,
`password`,
`secret`,
`key$`,
`signature`,
`^authorization$`,
`^note$`,
`^certificate$`,
`^encrypted_key$`,
`^hook$`,
`^import_url$`,
`^otp_attempt$`,
`^sentry_dsn$`,
`^trace$`,
`^variables$`,
`^content$`,
}
var headerMatches = []string{
`token$`,
`password`,
`secret`,
`key$`,
`signature`,
`^authorization$`,
}
// parameterMatcher is precompiled for performance reasons. Keep in mind that
// `IsSensitiveParam`, `IsSensitiveHeader` and `URL` may be used in tight loops
// which may be sensitive to performance degradations.
var parameterMatcher = compileRegexpFromStrings(parameterMatches)
// headerMatcher is precompiled for performance reasons, same as `parameterMatcher`
var headerMatcher = compileRegexpFromStrings(headerMatches)
func compileRegexpFromStrings(paramNames []string) *regexp.Regexp {
var buffer bytes.Buffer
buffer.WriteString("(?i)")
for i, v := range paramNames {
if i > 0 {
buffer.WriteString("|")
}
buffer.WriteString(v)
}
return regexp.MustCompile(buffer.String())
}
package mask
// IsSensitiveParam will return true if the given parameter name should be masked for sensitivity
func IsSensitiveParam(name string) bool {
return parameterMatcher.MatchString(name)
}
// IsSensitiveHeader will return true if the given parameter name should be masked for sensitivity
func IsSensitiveHeader(name string) bool {
return headerMatcher.MatchString(name)
}
package mask
import (
"bytes"
"net/url"
)
// URL will mask the sensitive components in an URL with `[FILTERED]`.
// This list should maintain parity with the list in
// GitLab-CE, maintained at https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/application.rb.
// Based on https://stackoverflow.com/a/52965552/474597.
func URL(originalURL string) string {
u, err := url.Parse(originalURL)
if err != nil {
return "<invalid URL>"
}
redactionBytes := []byte(RedactionString)
buf := bytes.NewBuffer(make([]byte, 0, len(originalURL)))
for i, queryPart := range bytes.Split([]byte(u.RawQuery), []byte("&")) {
if i != 0 {
buf.WriteByte('&')
}
splitParam := bytes.SplitN(queryPart, []byte("="), 2)
if len(splitParam) == 2 {
buf.Write(splitParam[0])
buf.WriteByte('=')
if parameterMatcher.Match(splitParam[0]) {
buf.Write(redactionBytes)
} else {
buf.Write(splitParam[1])
}
} else {
buf.Write(queryPart)
}
}
u.RawQuery = buf.String()
return u.String()
}
/* /*
Package `tracing` is the primary entrypoint into LabKit's distributed tracing functionality. Package tracing is the primary entrypoint into LabKit's distributed tracing functionality.
(This documentation assumes some minimal knowledge of Distributed Tracing, and uses (This documentation assumes some minimal knowledge of Distributed Tracing, and uses
tracing terminology without providing definitions. Please review tracing terminology without providing definitions. Please review
...@@ -82,5 +82,17 @@ headers is implementation specific. ...@@ -82,5 +82,17 @@ headers is implementation specific.
It is important to ensure that the context is passed into the outgoing request, using `req.WithContext(ctx)` It is important to ensure that the context is passed into the outgoing request, using `req.WithContext(ctx)`
so that the correct span information can be injected into the request headers. so that the correct span information can be injected into the request headers.
*Propagating tracing information to child processes*
Sometimes we want a trace to continue from a parent process to a spawned child process. For this,
the tracing package provides `tracing.NewEnvInjector()` and `tracing.ExtractFromEnv()`, for the
parent and child processes respectively.
NewEnvInjector() will configure a []string array of environment variables, ensuring they have the
correct tracing configuration and any trace and span identifiers. NewEnvInjector() should be called
in the child process and will extract the trace and span information from the environment.
Please review the examples in the godocs for details of how to implement both approaches.
*/ */
package tracing package tracing
package tracing
import (
"context"
"os"
"strings"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"gitlab.com/gitlab-org/labkit/correlation"
)
// ExtractFromEnv will extract a span from the environment after it has been passed in
// from the parent process. Returns a new context, and a defer'able function, which
// should be called on process termination
func ExtractFromEnv(ctx context.Context, opts ...ExtractFromEnvOption) (context.Context, func()) {
/* config not yet used */ applyExtractFromEnvOptions(opts)
tracer := opentracing.GlobalTracer()
// Extract the Correlation-ID
envMap := environAsMap(os.Environ())
correlationID := envMap[envCorrelationIDKey]
if correlationID != "" {
ctx = correlation.ContextWithCorrelation(ctx, correlationID)
}
// Attempt to deserialize tracing identifiers
wireContext, err := tracer.Extract(
opentracing.TextMap,
opentracing.TextMapCarrier(envMap))
if err != nil {
/* Clients could send bad data, in which case we simply ignore it */
return ctx, func() {}
}
// Create the span referring to the RPC client if available.
// If wireContext == nil, a root span will be created.
additionalStartSpanOpts := []opentracing.StartSpanOption{
ext.RPCServerOption(wireContext),
}
if correlationID != "" {
additionalStartSpanOpts = append(additionalStartSpanOpts, opentracing.Tag{Key: "correlation_id", Value: correlationID})
}
serverSpan := opentracing.StartSpan(
"execute",
additionalStartSpanOpts...,
)
ctx = opentracing.ContextWithSpan(ctx, serverSpan)
return ctx, func() { serverSpan.Finish() }
}
func environAsMap(env []string) map[string]string {
envMap := make(map[string]string, len(env))
for _, v := range env {
s := strings.SplitN(v, "=", 2)
envMap[s[0]] = s[1]
}
return envMap
}
package tracing
type extractFromEnvConfig struct{}
// ExtractFromEnvOption will configure an environment injector
type ExtractFromEnvOption func(*extractFromEnvConfig)
func applyExtractFromEnvOptions(opts []ExtractFromEnvOption) extractFromEnvConfig {
config := extractFromEnvConfig{}
for _, v := range opts {
v(&config)
}
return config
}
package tracing
import (
"context"
"fmt"
"os"
"sort"
opentracing "github.com/opentracing/opentracing-go"
"gitlab.com/gitlab-org/labkit/correlation"
logkit "gitlab.com/gitlab-org/labkit/log"
)
// envCorrelationIDKey is used to pass the current correlation-id over to the child process
const envCorrelationIDKey = "CORRELATION_ID"
// EnvInjector will inject tracing information into an environment in preparation for
// spawning a child process. This includes trace and span identifiers, as well
// as the GITLAB_TRACING configuration. Will gracefully degrade if tracing is
// not configured, or an active span is not currently available.
type EnvInjector func(ctx context.Context, env []string) []string
// NewEnvInjector will create a new environment injector
func NewEnvInjector(opts ...EnvInjectorOption) EnvInjector {
/* config not yet used */ applyEnvInjectorOptions(opts)
return func(ctx context.Context, env []string) []string {
envMap := map[string]string{}
// Pass the Correlation-ID through the environment if set
correlationID := correlation.ExtractFromContext(ctx)
if correlationID != "" {
envMap[envCorrelationIDKey] = correlationID
}
// Also include the GITLAB_TRACING configuration so that
// the child process knows how to configure itself
v, ok := os.LookupEnv(tracingEnvKey)
if ok {
envMap[tracingEnvKey] = v
}
span := opentracing.SpanFromContext(ctx)
if span == nil {
// If no active span, short circuit
return appendMapToEnv(env, envMap)
}
carrier := opentracing.TextMapCarrier(envMap)
err := span.Tracer().Inject(span.Context(), opentracing.TextMap, carrier)
if err != nil {
logkit.ContextLogger(ctx).WithError(err).Error("tracing span injection failed")
}
return appendMapToEnv(env, envMap)
}
}
// appendMapToEnv takes a map of key,value pairs and appends it to an
// array of environment variable pairs in `K=V` string pairs
func appendMapToEnv(env []string, envMap map[string]string) []string {
additionalItems := []string{}
for k, v := range envMap {
additionalItems = append(additionalItems, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(additionalItems)
return append(env, additionalItems...)
}
package tracing
type envInjectorConfig struct{}
// EnvInjectorOption will configure an environment injector
type EnvInjectorOption func(*envInjectorConfig)
func applyEnvInjectorOptions(opts []EnvInjectorOption) envInjectorConfig {
config := envInjectorConfig{}
for _, v := range opts {
v(&config)
}
return config
}
...@@ -5,10 +5,10 @@ package impl ...@@ -5,10 +5,10 @@ package impl
import ( import (
"fmt" "fmt"
"io" "io"
"log"
"strconv" "strconv"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
jaegercfg "github.com/uber/jaeger-client-go/config" jaegercfg "github.com/uber/jaeger-client-go/config"
jaegerlog "github.com/uber/jaeger-client-go/log" jaegerlog "github.com/uber/jaeger-client-go/log"
) )
......
...@@ -6,10 +6,10 @@ import ( ...@@ -6,10 +6,10 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"log"
lightstep "github.com/lightstep/lightstep-tracer-go" lightstep "github.com/lightstep/lightstep-tracer-go"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
) )
type lightstepCloser struct { type lightstepCloser struct {
......
...@@ -2,9 +2,9 @@ package tracing ...@@ -2,9 +2,9 @@ package tracing
import ( import (
"io" "io"
"log"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/labkit/tracing/connstr" "gitlab.com/gitlab-org/labkit/tracing/connstr"
"gitlab.com/gitlab-org/labkit/tracing/impl" "gitlab.com/gitlab-org/labkit/tracing/impl"
) )
...@@ -25,7 +25,7 @@ func Initialize(opts ...InitializationOption) io.Closer { ...@@ -25,7 +25,7 @@ func Initialize(opts ...InitializationOption) io.Closer {
driverName, options, err := connstr.Parse(config.connectionString) driverName, options, err := connstr.Parse(config.connectionString)
if err != nil { if err != nil {
log.Printf("unable to parse connection: %v", err) log.WithError(err).Infoln("unable to parse connection")
return &nopCloser{} return &nopCloser{}
} }
...@@ -35,14 +35,14 @@ func Initialize(opts ...InitializationOption) io.Closer { ...@@ -35,14 +35,14 @@ func Initialize(opts ...InitializationOption) io.Closer {
tracer, closer, err := impl.New(driverName, options) tracer, closer, err := impl.New(driverName, options)
if err != nil { if err != nil {
log.Printf("skipping tracing configuration step: %v", err) log.WithError(err).Warn("skipping tracing configuration step")
return &nopCloser{} return &nopCloser{}
} }
if tracer == nil { if tracer == nil {
log.Printf("no tracer provided, tracing will be disabled") log.Warn("no tracer provided, tracing will be disabled")
} else { } else {
log.Printf("Tracing enabled") log.Info("Tracing enabled")
opentracing.SetGlobalTracer(tracer) opentracing.SetGlobalTracer(tracer)
} }
......
...@@ -5,6 +5,8 @@ import ( ...@@ -5,6 +5,8 @@ import (
"path" "path"
) )
const tracingEnvKey = "GITLAB_TRACING"
// The configuration for InjectCorrelationID // The configuration for InjectCorrelationID
type initializationConfig struct { type initializationConfig struct {
serviceName string serviceName string
...@@ -17,7 +19,7 @@ type InitializationOption func(*initializationConfig) ...@@ -17,7 +19,7 @@ type InitializationOption func(*initializationConfig)
func applyInitializationOptions(opts []InitializationOption) initializationConfig { func applyInitializationOptions(opts []InitializationOption) initializationConfig {
config := initializationConfig{ config := initializationConfig{
serviceName: path.Base(os.Args[0]), serviceName: path.Base(os.Args[0]),
connectionString: os.Getenv("GITLAB_TRACING"), connectionString: os.Getenv(tracingEnvKey),
} }
for _, v := range opts { for _, v := range opts {
v(&config) v(&config)
......
...@@ -2,13 +2,13 @@ package tracing ...@@ -2,13 +2,13 @@ package tracing
import ( import (
"crypto/tls" "crypto/tls"
"log"
"net/http" "net/http"
"net/http/httptrace" "net/http/httptrace"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log" otlog "github.com/opentracing/opentracing-go/log"
logkit "gitlab.com/gitlab-org/labkit/log"
) )
type tracingRoundTripper struct { type tracingRoundTripper struct {
...@@ -52,7 +52,7 @@ func (c tracingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, e ...@@ -52,7 +52,7 @@ func (c tracingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, e
err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, carrier) err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, carrier)
if err != nil { if err != nil {
log.Printf("tracing span injection failed: %v", err) logkit.ContextLogger(ctx).WithError(err).Error("tracing span injection failed")
} }
response, err := c.delegate.RoundTrip(req) response, err := c.delegate.RoundTrip(req)
......
...@@ -601,58 +601,74 @@ ...@@ -601,58 +601,74 @@
"versionExact": "v0.71.0" "versionExact": "v0.71.0"
}, },
{ {
"checksumSHA1": "WMOuBgCyclqy+Mqunb0NbykaC4Y=", "checksumSHA1": "86VccaJApU9EEq4KKen8DX9gMUM=",
"path": "gitlab.com/gitlab-org/labkit/correlation", "path": "gitlab.com/gitlab-org/labkit/correlation",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "UFBFulprWZHuL9GHhjCKoHXm+Ww=", "checksumSHA1": "UFBFulprWZHuL9GHhjCKoHXm+Ww=",
"path": "gitlab.com/gitlab-org/labkit/correlation/grpc", "path": "gitlab.com/gitlab-org/labkit/correlation/grpc",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "YyeJ2t/STDZj3X4xetPzTC0I9hU=", "checksumSHA1": "jg7lyicpIcxuznj6pg+Wg0MKm7o=",
"path": "gitlab.com/gitlab-org/labkit/correlation/raven", "path": "gitlab.com/gitlab-org/labkit/correlation/raven",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "27mCUL3y0j+3NB/+YLWxQLTiSIU=", "checksumSHA1": "3/0keVU4UZOoFSNajbtnexV6Ou4=",
"path": "gitlab.com/gitlab-org/labkit/log",
"revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-07-12T13:09:19Z",
"version": "master",
"versionExact": "master"
},
{
"checksumSHA1": "yWZietMQ0YjAXj4NEVrQAypOIBU=",
"path": "gitlab.com/gitlab-org/labkit/mask",
"revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-07-12T13:09:19Z",
"version": "master",
"versionExact": "master"
},
{
"checksumSHA1": "ZxaskgDxW6muJqpq1qihus7rzGU=",
"path": "gitlab.com/gitlab-org/labkit/tracing", "path": "gitlab.com/gitlab-org/labkit/tracing",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "uIvjqXAsMQK/Y5FgWRaGydYGbYs=", "checksumSHA1": "uIvjqXAsMQK/Y5FgWRaGydYGbYs=",
"path": "gitlab.com/gitlab-org/labkit/tracing/connstr", "path": "gitlab.com/gitlab-org/labkit/tracing/connstr",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "IE38In/zPKpmKqvWAAThyaufQak=", "checksumSHA1": "IE38In/zPKpmKqvWAAThyaufQak=",
"path": "gitlab.com/gitlab-org/labkit/tracing/grpc", "path": "gitlab.com/gitlab-org/labkit/tracing/grpc",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
{ {
"checksumSHA1": "hB59Es/WTWfBPLSAheQaRyHGSXA=", "checksumSHA1": "8KqfTzukh6BKKnDa7AlpeHNW8A4=",
"path": "gitlab.com/gitlab-org/labkit/tracing/impl", "path": "gitlab.com/gitlab-org/labkit/tracing/impl",
"revision": "4b0e4d5df664c0f0680f3479ce7cc3378f04d40e", "revision": "503c162155664d5a8d6d84a7dac51960dc0119ec",
"revisionTime": "2019-02-11T18:00:21Z", "revisionTime": "2019-07-12T13:09:19Z",
"version": "master", "version": "master",
"versionExact": "master" "versionExact": "master"
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment