Commit 337a4220 authored by Aleksei Lipniagov's avatar Aleksei Lipniagov Committed by Jacob Vosmaer

Add additional logging for dynamic img resizing

parent fb1c84c8
...@@ -33,11 +33,16 @@ var SendScaledImage = &resizer{"send-scaled-img:"} ...@@ -33,11 +33,16 @@ var SendScaledImage = &resizer{"send-scaled-img:"}
type resizeParams struct { type resizeParams struct {
Location string Location string
Width uint Width uint
Format string
} }
const maxImageScalerProcs = 100 type processCounter struct {
n int32
}
var numScalerProcs processCounter
var numScalerProcs int32 = 0 const maxImageScalerProcs = 100
// Images might be located remotely in object storage, in which case we need to stream // Images might be located remotely in object storage, in which case we need to stream
// it via http(s) // it via http(s)
...@@ -58,20 +63,37 @@ var httpClient = &http.Client{ ...@@ -58,20 +63,37 @@ var httpClient = &http.Client{
Transport: httpTransport, Transport: httpTransport,
} }
var imageResizeConcurrencyMax = prometheus.NewCounter( var (
prometheus.CounterOpts{ imageResizeConcurrencyLimitExceeds = prometheus.NewCounter(
Name: "gitlab_workhorse_max_image_resize_requests_exceeded_total", prometheus.CounterOpts{
Help: "Amount of image resizing requests that exceed the maximum allowed scaler processes", Name: "gitlab_workhorse_image_resize_concurrency_limit_exceeds_total",
}, Help: "Amount of image resizing requests that exceeded the maximum allowed scaler processes",
},
)
imageResizeProcesses = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_image_resize_processes",
Help: "Amount of image resizing scaler processes working now",
},
)
imageResizeCompleted = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_image_resize_completed_total",
Help: "Amount of image resizing processes sucessfully completed",
},
)
) )
func init() { func init() {
prometheus.MustRegister(imageResizeConcurrencyMax) prometheus.MustRegister(imageResizeConcurrencyLimitExceeds)
prometheus.MustRegister(imageResizeProcesses)
prometheus.MustRegister(imageResizeCompleted)
} }
// This Injecter forks into graphicsmagick to resize an image identified by path or URL // This Injecter forks into graphicsmagick to resize an image identified by path or URL
// and streams the resized image back to the client // and streams the resized image back to the client
func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData string) { func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData string) {
start := time.Now()
logger := log.ContextLogger(req.Context()) logger := log.ContextLogger(req.Context())
params, err := r.unpackParameters(paramsData) params, err := r.unpackParameters(paramsData)
if err != nil { if err != nil {
...@@ -81,7 +103,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st ...@@ -81,7 +103,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
return return
} }
sourceImageReader, err := openSourceImage(params.Location) sourceImageReader, filesize, err := openSourceImage(params.Location)
if err != nil { if err != nil {
// This means we cannot even read the input image; fail fast. // This means we cannot even read the input image; fail fast.
helper.Fail500(w, req, fmt.Errorf("ImageResizer: Failed opening image data stream: %v", err)) helper.Fail500(w, req, fmt.Errorf("ImageResizer: Failed opening image data stream: %v", err))
...@@ -93,6 +115,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st ...@@ -93,6 +115,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
// simply fail over to rendering out the original image unchanged. // simply fail over to rendering out the original image unchanged.
imageReader, resizeCmd := tryResizeImage(req.Context(), sourceImageReader, params.Width, logger) imageReader, resizeCmd := tryResizeImage(req.Context(), sourceImageReader, params.Width, logger)
defer helper.CleanUpProcessGroup(resizeCmd) defer helper.CleanUpProcessGroup(resizeCmd)
imageResizeCompleted.Inc()
w.Header().Del("Content-Length") w.Header().Del("Content-Length")
bytesWritten, err := io.Copy(w, imageReader) bytesWritten, err := io.Copy(w, imageReader)
...@@ -101,7 +124,13 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st ...@@ -101,7 +124,13 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
return return
} }
logger.WithField("bytes_written", bytesWritten).Print("ImageResizer: success") logger.WithFields(log.Fields{
"bytes_written": bytesWritten,
"duration_s": time.Since(start).Seconds(),
"target_width": params.Width,
"format": params.Format,
"original_filesize": filesize,
}).Printf("ImageResizer: Success")
} }
func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) { func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) {
...@@ -119,17 +148,13 @@ func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) { ...@@ -119,17 +148,13 @@ func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) {
// Attempts to rescale the given image data, or in case of errors, falls back to the original image. // Attempts to rescale the given image data, or in case of errors, falls back to the original image.
func tryResizeImage(ctx context.Context, r io.Reader, width uint, logger *logrus.Entry) (io.Reader, *exec.Cmd) { func tryResizeImage(ctx context.Context, r io.Reader, width uint, logger *logrus.Entry) (io.Reader, *exec.Cmd) {
// Only allow more scaling requests if we haven't yet reached the maximum allows number if !numScalerProcs.tryIncrement() {
// of concurrent graphicsmagick processes
if n := atomic.AddInt32(&numScalerProcs, 1); n > maxImageScalerProcs {
atomic.AddInt32(&numScalerProcs, -1)
imageResizeConcurrencyMax.Inc()
return r, nil return r, nil
} }
go func() { go func() {
<-ctx.Done() <-ctx.Done()
atomic.AddInt32(&numScalerProcs, -1) numScalerProcs.decrement()
}() }()
resizeCmd, resizedImageReader, err := startResizeImageCommand(ctx, r, logger.Writer(), width) resizeCmd, resizedImageReader, err := startResizeImageCommand(ctx, r, logger.Writer(), width)
...@@ -162,22 +187,60 @@ func isURL(location string) bool { ...@@ -162,22 +187,60 @@ func isURL(location string) bool {
return strings.HasPrefix(location, "http://") || strings.HasPrefix(location, "https://") return strings.HasPrefix(location, "http://") || strings.HasPrefix(location, "https://")
} }
func openSourceImage(location string) (io.ReadCloser, error) { func openSourceImage(location string) (io.ReadCloser, int64, error) {
if !isURL(location) { if isURL(location) {
return os.Open(location) return openFromUrl(location)
} }
return openFromFile(location)
}
func openFromUrl(location string) (io.ReadCloser, int64, error) {
res, err := httpClient.Get(location) res, err := httpClient.Get(location)
if err != nil { if err != nil {
return nil, err return nil, 0, err
} }
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
res.Body.Close() res.Body.Close()
return nil, fmt.Errorf("ImageResizer: cannot read data from %q: %d %s", return nil, 0, fmt.Errorf("ImageResizer: cannot read data from %q: %d %s",
location, res.StatusCode, res.Status) location, res.StatusCode, res.Status)
} }
return res.Body, nil return res.Body, res.ContentLength, nil
}
func openFromFile(location string) (io.ReadCloser, int64, error) {
file, err := os.Open(location)
if err != nil {
return file, 0, err
}
fi, err := file.Stat()
if err != nil {
return file, 0, err
}
return file, fi.Size(), nil
}
// Only allow more scaling requests if we haven't yet reached the maximum
// allowed number of concurrent scaler processes
func (c *processCounter) tryIncrement() bool {
if p := atomic.AddInt32(&c.n, 1); p > maxImageScalerProcs {
c.decrement()
imageResizeConcurrencyLimitExceeds.Inc()
return false
}
imageResizeProcesses.Set(float64(c.n))
return true
}
func (c *processCounter) decrement() {
atomic.AddInt32(&c.n, -1)
imageResizeProcesses.Set(float64(c.n))
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment