// API calls related to authentication
package api

import (
	"gitlab.com/gitlab-org/gitlab-workhorse/internal/badgateway"
	"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
	proxypkg "gitlab.com/gitlab-org/gitlab-workhorse/internal/proxy"
	"gitlab.com/gitlab-org/gitlab-workhorse/internal/senddata"
	"gitlab.com/gitlab-org/gitlab-workhorse/internal/sendfile"
	"fmt"
	"net/http"
	"net/http/httptest"
	"net/url"
	"strings"
	"sync"
	"time"
)

// Reply from auth backend, e.g. for "download from repo" authorization request
type AuthReply struct {
	// raw reply from auth backend & PreAuthorizeHandler().
	// recorded so we can replay it from auth cache to each client in full
	// if e.g. access is rejected.
	RawReply *httptest.ResponseRecorder

	// decoded auth reply
	Response
}

// Entry in authorization reply cache
type AuthCacheEntry struct {
	// FIXME we need to lock the entry only to "correctly" update Nhit on
	// read side, but we can tolerate some looses in Nhit and update it
	// without mutex or atomic. Only -race complains...
	// ( we could use atomic.Value for atomic whole cache entry updates from
	//   refresher without need for locks on readers side, but the need to do
	//   .Nhit++ on readers side ruins that )
	sync.Mutex

	AuthReply

	Tauth int64         // in seconds
	Nhit  int64         // how many times this entry was hit when querying auth cache during the last refresh period.
	ready chan struct{} // closed when entry is ready
}

// Entries are keyed by project + credentials
type AuthCacheKey struct {
	project  string
	userinfo string // user[:password] or ""
	query    string // e.g. with passing in private_token=...
	header   string // request header url-encoded, e.g. PRIVATE-TOKEN=...
}

// Authorization reply cache
// {} AuthCacheKey -> *AuthCacheEntry
type AuthCache struct {
	a *API // for which API we cache auth

	mu     sync.RWMutex // guards .cached
	cached map[AuthCacheKey]*AuthCacheEntry
}

func NewAuthCache(a *API) *AuthCache {
	return &AuthCache{a: a, cached: make(map[AuthCacheKey]*AuthCacheEntry)}
}

// Verify that download access is ok or not.
// first we try to use the cache; if information is not there -> ask auth backend
// download is ok if AuthReply.RepoPath != ""
func (c *AuthCache) VerifyDownloadAccess(project string, userinfo *url.Userinfo, query string, header http.Header) AuthReply {
	// In addition to userinfo:
	u := ""
	if userinfo != nil {
		u = userinfo.String()
	}

	// Use only tokens from query/header and selected cookies to minimize cache and avoid
	// creating redundant cache entries because of e.g. unrelated headers.
	queryValues, _ := url.ParseQuery(query) // this is what URL.Query() does
	q := url.Values{}
	for k, v := range queryValues {
		if strings.HasSuffix(k, "_token") {
			q[k] = v
		}
	}
	h := url.Values{}
	for k, v := range header {
		if strings.HasSuffix(strings.ToUpper(k), "-TOKEN") {
			h[k] = v
		}

	}
	// r: net.readCookies() is private - workaround via http.Request.Cookies().
	// rc: cookie composition is exposed to http.Request only - use it.
	r := http.Request{Header: header}
	rc := http.Request{Header: make(http.Header)}
	for _, c := range r.Cookies() {
		switch c.Name {
		case
			"_gitlab_session":
			rc.AddCookie(c)
		}
	}
	if hc := rc.Header.Get("Cookie"); hc != "" {
		h["Cookie"] = []string{hc}
	}

	key := AuthCacheKey{project, u, q.Encode(), h.Encode()}
	return c.verifyDownloadAccess(key)
}

func (c *AuthCache) verifyDownloadAccess(key AuthCacheKey) AuthReply {
	var authReply AuthReply

	// first try to read from cache in parallel with other readers
	c.mu.RLock()
	auth := c.cached[key]
	c.mu.RUnlock()

have_entry:
	// entry in cache - use it
	if auth != nil {
		<-auth.ready // wait until it is ready

		auth.Lock()
		auth.Nhit++
		//log.Printf("authReply for %v cached ago: %v  (hits: %v)",
		//	key,
		//	time.Since(time.Unix(auth.Tauth, 0)),
		//	auth.Nhit)
		authReply = auth.AuthReply
		auth.Unlock()

	} else {
		// no entry - relock the cache in exclusive mode, create empty entry,
		// and start filling it

		c.mu.Lock()
		// another ex-reader could be trying to create this entry
		// simultaneously with us - recheck
		auth = c.cached[key]
		if auth != nil {
			c.mu.Unlock()
			goto have_entry
		}

		// new not-yet-ready entry
		auth = &AuthCacheEntry{ready: make(chan struct{})}
		c.cached[key] = auth
		c.mu.Unlock()

		// this goroutine becomes responsible for querying auth backend
		auth.AuthReply = c.askAuthBackend(key)
		auth.Tauth = time.Now().Unix()
		auth.Nhit = 0

		authReply = auth.AuthReply

		// broadcast to other goroutines that this entry is ready
		close(auth.ready)

		// launch entry refresher
		go c.refreshEntry(auth, key)
	}

	return authReply
}

// Time period for refreshing / removing unused entires in authCache
const authCacheRefresh = 30 * time.Second

// Goroutine to refresh auth cache entry periodically while it is used.
// if the entry is detected to be not used - remove it from cache and stop refreshing.
func (c *AuthCache) refreshEntry(auth *AuthCacheEntry, key AuthCacheKey) {
	for {
		time.Sleep(authCacheRefresh)

		auth.Lock()
		nhit := auth.Nhit
		auth.Unlock()

		// clear cache entry if it is not used
		//log.Printf("AUTH refresh - %v #hit: %v", key, nhit)
		if nhit == 0 { // not used - we can remove and stop refreshing
			//log.Printf("AUTH - removing %v", key)
			// NOTE it is ok even if someone gets this auth in this time window
			//      and use it for some time
			c.mu.Lock()
			delete(c.cached, key)
			c.mu.Unlock()
			break
		}

		//log.Printf("AUTH - refreshing %v", key)
		authReply := c.askAuthBackend(key)

		auth.Lock()
		auth.AuthReply = authReply
		auth.Tauth = time.Now().Unix()
		auth.Nhit = 0
		auth.Unlock()
	}
}

// Ask auth backend about cache key
func (c *AuthCache) askAuthBackend(key AuthCacheKey) AuthReply {
	// key.userinfo -> url.Userinfo
	var user *url.Userinfo
	if key.userinfo != "" {
		u, err := url.Parse("http://" + key.userinfo + "@/")
		// url prepared-to-parse userinfo must be valid
		if err != nil {
			panic(err)
		}
		if u.User == nil {
			panic(fmt.Errorf("userinfo parse: `%s` -> empty", key.userinfo))
		}
		user = u.User
	}

	// key.header -> url.Values -> http.Header
	hv, err := url.ParseQuery(key.header)
	if err != nil {
		// we prepared key.header ourselves via url-encoding in AuthCache.VerifyDownloadAccess().
		// It must be ok
		panic(err)
	}
	header := make(http.Header)
	for k, v := range hv {
		header[k] = v
	}

	return c.a.verifyDownloadAccess(key.project, user, key.query, header)
}

// for detecting whether archive download is ok via senddata mechanism
type testDownloadOkViaSendArchive struct {
	senddata.Prefix
	authReply *AuthReply
}

func (aok *testDownloadOkViaSendArchive) Inject(w http.ResponseWriter, r *http.Request, sendData string) {
	var param struct{ RepoPath string }
	if err := aok.Unpack(&param, sendData); err != nil {
		helper.Fail500(w, r, fmt.Errorf("testDownloadOkViaSendArchive: unpack sendData: %v", err))
		return
	}

	// if we ever get to this point - auth handler approved
	// access and thus it is ok to download
	aok.authReply.RepoPath = param.RepoPath
}

// Ask auth backend about whether download is ok for a project.
// Authorization is approved if AuthReply.RepoPath != "" on return
// Raw auth backend response is emitted to AuthReply.RawReply
//
// Replies from authentication backend are cached for 30 seconds as each
// request to Rails code is heavy and slow.
func (a *API) VerifyDownloadAccess(project string, user *url.Userinfo, query string, header http.Header) AuthReply {
	return a.authCache.VerifyDownloadAccess(project, user, query, header)
}

// like Userinfo.Password(), "" if unset
func xpassword(user *url.Userinfo) string {
	password, _ := user.Password()
	return password
}

func (a *API) verifyDownloadAccess(project string, user *url.Userinfo, query string, header http.Header) AuthReply {
	authReply := AuthReply{
		RawReply: httptest.NewRecorder(),
	}

	// Request to auth backend to verify whether download is possible.
	// - first option is via asking as `git fetch` would do, but on Rails
	//   side this supports only basic auth, not private token.
	// - that's why we first ask auth backend to authenticate as if it was
	//   request to get repo archive and propagate request query and header.
	url := project + "/repository/archive.zip"
	if query != "" {
		url += "?" + query
	}
	reqDownloadAccess, err := http.NewRequest("GET", url, nil)
        q := http.Request{Header: header}
	if err != nil {
		helper.Fail500(authReply.RawReply, &q, fmt.Errorf("GET git-upload-pack: %v", err))
		return authReply
	}
	if user != nil {
		// just in case - Rails does not support HTTP Basic Auth for usual requests
		reqDownloadAccess.SetBasicAuth(user.Username(), xpassword(user))
	}
	for k, v := range header {
		reqDownloadAccess.Header[k] = v
	}

	// Send request to auth backend and detect via aok senddata hook
	// whether access is permitted.
	aok := &testDownloadOkViaSendArchive{"git-archive:", &authReply}
	authProxy := senddata.SendData(
		sendfile.SendFile(proxypkg.NewProxy(
			a.URL,
			a.Version,
			a.Client.Transport.(*badgateway.RoundTripper),
		)),
		aok,
	)
	authProxy.ServeHTTP(authReply.RawReply, reqDownloadAccess)

	// If not successful and any token was not provided neither in
	// query nor in header, retry authenticating as `git fetch` would do.
	//
	// The reason we want to do this second try is that HTTP auth is
	// handled by upstream auth backend for git requests only, and we might
	// want to use e.g. https://gitlab-ci-token:token@/.../raw/...
	if authReply.RepoPath != "" || query != "" || len(header) != 0 {
		return authReply
	}
	if user == nil {
		// backend gives "302 location: .../users/sign_in" when rejecting access.
		// transform this to HTTP Basic Auth challenge if we don't have user/password already.
		//
		// reason: many clients - e.g. wget, chrome, even when given user/password,
		// first send request without auth set and expect server to send them auth challenge.
		// and only after the challenge they retry the request with Authorization header set.
		if authReply.RawReply.Code == http.StatusFound &&
			strings.HasSuffix(authReply.RawReply.HeaderMap.Get("location"), "/users/sign_in") {
			r := httptest.NewRecorder()
			r.Header().Set("WWW-Authenticate", "Basic realm=\"\"")
			r.WriteHeader(http.StatusUnauthorized)
			authReply.RawReply = r
		}
		return authReply
	}

	url = project + ".git/info/refs?service=git-upload-pack"
	reqFetchAccess, err := http.NewRequest("GET", url, nil)
	if err != nil {
		helper.Fail500(authReply.RawReply, &q, fmt.Errorf("GET git-upload-pack: %v", err))
		return authReply
	}
	reqFetchAccess.SetBasicAuth(user.Username(), xpassword(user))
	for k, v := range header {
		reqFetchAccess.Header[k] = v
	}

	// reset RawReply - if failed we will return to client what this latter -
	// - request to auth backend as `git fetch` - replies.
	authReply.RawReply = httptest.NewRecorder()
	a.PreAuthorizeHandler(
		func(w http.ResponseWriter, req *http.Request, resp *Response) {
			// if we ever get to this point - auth handler approved
			// access and thus it is ok to download
			authReply.Response = *resp
		}, "").ServeHTTP(authReply.RawReply, reqFetchAccess)

	return authReply
}