.domains file"
+ }
+ if code == fasthttp.StatusFailedDependency {
+ message += " - target repo/branch doesn't exist or is private"
+ }
+ // TODO: use template engine?
+ ctx.Response.SetBody(bytes.ReplaceAll(NotFoundPage, []byte("%status"), []byte(strconv.Itoa(code)+" "+message)))
+}
diff --git a/html/html.go b/html/html.go
index ff8b6b4..d223e15 100644
--- a/html/html.go
+++ b/html/html.go
@@ -1,53 +1,6 @@
package html
-import (
- _ "embed"
- "net/http"
- "text/template" // do not use html/template here, we sanitize the message before passing it to the template
+import _ "embed"
- "codeberg.org/codeberg/pages/server/context"
- "github.com/microcosm-cc/bluemonday"
- "github.com/rs/zerolog/log"
-)
-
-//go:embed templates/error.html
-var errorPage string
-
-var (
- errorTemplate = template.Must(template.New("error").Parse(errorPage))
- sanitizer = createBlueMondayPolicy()
-)
-
-type TemplateContext struct {
- StatusCode int
- StatusText string
- Message string
-}
-
-// ReturnErrorPage sets the response status code and writes the error page to the response body.
-// The error page contains a sanitized version of the message and the statusCode both in text and numeric form.
-//
-// Currently, only the following html tags are supported:
-func ReturnErrorPage(ctx *context.Context, msg string, statusCode int) {
- ctx.RespWriter.Header().Set("Content-Type", "text/html; charset=utf-8")
- ctx.RespWriter.WriteHeader(statusCode)
-
- templateContext := TemplateContext{
- StatusCode: statusCode,
- StatusText: http.StatusText(statusCode),
- Message: sanitizer.Sanitize(msg),
- }
-
- err := errorTemplate.Execute(ctx.RespWriter, templateContext)
- if err != nil {
- log.Err(err).Str("message", msg).Int("status", statusCode).Msg("could not write response")
- }
-}
-
-func createBlueMondayPolicy() *bluemonday.Policy {
- p := bluemonday.NewPolicy()
-
- p.AllowElements("code")
-
- return p
-}
+//go:embed 404.html
+var NotFoundPage []byte
diff --git a/html/html_test.go b/html/html_test.go
deleted file mode 100644
index b395bb2..0000000
--- a/html/html_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package html
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestSanitizerSimpleString(t *testing.T) {
- str := "simple text message without any html elements"
-
- assert.Equal(t, str, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithCodeTag(t *testing.T) {
- str := "simple text message with html tag"
-
- assert.Equal(t, str, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithCodeTagWithAttribute(t *testing.T) {
- str := "simple text message with html tag"
- expected := "simple text message with html tag"
-
- assert.Equal(t, expected, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithATag(t *testing.T) {
- str := "simple text message with a link to another page"
- expected := "simple text message with a link to another page"
-
- assert.Equal(t, expected, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithATagAndHref(t *testing.T) {
- str := "simple text message with a link to another page"
- expected := "simple text message with a link to another page"
-
- assert.Equal(t, expected, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithImgTag(t *testing.T) {
- str := "simple text message with a
"
- expected := "simple text message with a "
-
- assert.Equal(t, expected, sanitizer.Sanitize(str))
-}
-
-func TestSanitizerStringWithImgTagAndOnerrorAttribute(t *testing.T) {
- str := "simple text message with a
"
- expected := "simple text message with a "
-
- assert.Equal(t, expected, sanitizer.Sanitize(str))
-}
diff --git a/html/templates/error.html b/html/templates/error.html
deleted file mode 100644
index 6094a26..0000000
--- a/html/templates/error.html
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
-
-
- {{.StatusText}}
-
-
-
-
-
-
-
-
- {{.StatusText}} ({{.StatusCode}})!
-
-
Sorry, but this page couldn't be served.
- "{{.Message}}"
-
- We hope this isn't a problem on our end ;) - Make sure to check the
- troubleshooting section in the Docs!
-
-
-
-
- Static pages made easy -
- Codeberg Pages
-
-
-
diff --git a/integration/get_test.go b/integration/get_test.go
index cfb7188..6054e17 100644
--- a/integration/get_test.go
+++ b/integration/get_test.go
@@ -20,20 +20,18 @@ func TestGetRedirect(t *testing.T) {
log.Println("=== TestGetRedirect ===")
// test custom domain redirect
resp, err := getTestHTTPSClient().Get("https://calciumdibromid.localhost.mock.directory:4430")
- if !assert.NoError(t, err) {
- t.FailNow()
- }
+ assert.NoError(t, err)
if !assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode) {
t.FailNow()
}
assert.EqualValues(t, "https://www.cabr2.de/", resp.Header.Get("Location"))
- assert.EqualValues(t, `Temporary Redirect.`, strings.TrimSpace(string(getBytes(resp.Body))))
+ assert.EqualValues(t, 0, getSize(resp.Body))
}
func TestGetContent(t *testing.T) {
log.Println("=== TestGetContent ===")
// test get image
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/images/827679288a.jpg")
+ resp, err := getTestHTTPSClient().Get("https://magiclike.localhost.mock.directory:4430/images/827679288a.jpg")
assert.NoError(t, err)
if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
@@ -44,218 +42,81 @@ func TestGetContent(t *testing.T) {
assert.Len(t, resp.Header.Get("ETag"), 42)
// specify branch
- resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pag/@master/")
+ resp, err = getTestHTTPSClient().Get("https://momar.localhost.mock.directory:4430/pag/@master/")
assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.True(t, getSize(resp.Body) > 1000)
- assert.Len(t, resp.Header.Get("ETag"), 44)
+ assert.Len(t, resp.Header.Get("ETag"), 42)
// access branch name contains '/'
- resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/blumia/@docs~main/")
+ resp, err = getTestHTTPSClient().Get("https://blumia.localhost.mock.directory:4430/pages-server-integration-tests/@docs~main/")
assert.NoError(t, err)
if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.True(t, getSize(resp.Body) > 100)
- assert.Len(t, resp.Header.Get("ETag"), 44)
+ assert.Len(t, resp.Header.Get("ETag"), 42)
- // TODO: test get of non cacheable content (content size > fileCacheSizeLimit)
+ // TODO: test get of non cachable content (content size > fileCacheSizeLimit)
}
func TestCustomDomain(t *testing.T) {
log.Println("=== TestCustomDomain ===")
resp, err := getTestHTTPSClient().Get("https://mock-pages.codeberg-test.org:4430/README.md")
assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/markdown; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "106", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 106, getSize(resp.Body))
}
-func TestCustomDomainRedirects(t *testing.T) {
- log.Println("=== TestCustomDomainRedirects ===")
- // test redirect from default pages domain to custom domain
- resp, err := getTestHTTPSClient().Get("https://6543.localhost.mock.directory:4430/test_pages-server_custom-mock-domain/@main/README.md")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
- assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
- // TODO: custom port is not evaluated (witch does hurt tests & dev env only)
- // assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/@main/README.md", resp.Header.Get("Location"))
- assert.EqualValues(t, "https://mock-pages.codeberg-test.org/@main/README.md", resp.Header.Get("Location"))
- assert.EqualValues(t, `https:/codeberg.org/6543/test_pages-server_custom-mock-domain/src/branch/main/README.md; rel="canonical"; rel="canonical"`, resp.Header.Get("Link"))
-
- // test redirect from an custom domain to the primary custom domain (www.example.com -> example.com)
- // regression test to https://codeberg.org/Codeberg/pages-server/issues/153
- resp, err = getTestHTTPSClient().Get("https://mock-pages-redirect.codeberg-test.org:4430/README.md")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
- assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
- // TODO: custom port is not evaluated (witch does hurt tests & dev env only)
- // assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
- assert.EqualValues(t, "https://mock-pages.codeberg-test.org/README.md", resp.Header.Get("Location"))
-}
-
-func TestRawCustomDomain(t *testing.T) {
- log.Println("=== TestRawCustomDomain ===")
- // test raw domain response for custom domain branch
- resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/example") // need cb_pages_tests fork
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
- assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
- assert.EqualValues(t, "76", resp.Header.Get("Content-Length"))
- assert.EqualValues(t, 76, getSize(resp.Body))
-}
-
-func TestRawIndex(t *testing.T) {
- log.Println("=== TestRawIndex ===")
- // test raw domain response for index.html
- resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/@branch-test/index.html") // need cb_pages_tests fork
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
- assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
- assert.EqualValues(t, "597", resp.Header.Get("Content-Length"))
- assert.EqualValues(t, 597, getSize(resp.Body))
-}
-
func TestGetNotFound(t *testing.T) {
log.Println("=== TestGetNotFound ===")
// test custom not found pages
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pages-404-demo/blah")
+ resp, err := getTestHTTPSClient().Get("https://crystal.localhost.mock.directory:4430/pages-404-demo/blah")
assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
+ if !assert.EqualValues(t, http.StatusNotFound, resp.StatusCode) {
t.FailNow()
}
- assert.EqualValues(t, http.StatusNotFound, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "37", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 37, getSize(resp.Body))
}
-func TestRedirect(t *testing.T) {
- log.Println("=== TestRedirect ===")
- // test redirects
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/redirect")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
- assert.EqualValues(t, "https://example.com/", resp.Header.Get("Location"))
-}
-
-func TestSPARedirect(t *testing.T) {
- log.Println("=== TestSPARedirect ===")
- // test SPA redirects
- url := "https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/app/aqdjw"
- resp, err := getTestHTTPSClient().Get(url)
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
- assert.EqualValues(t, url, resp.Request.URL.String())
- assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
- assert.EqualValues(t, "258", resp.Header.Get("Content-Length"))
- assert.EqualValues(t, 258, getSize(resp.Body))
-}
-
-func TestSplatRedirect(t *testing.T) {
- log.Println("=== TestSplatRedirect ===")
- // test splat redirects
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/articles/qfopefe")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
- assert.EqualValues(t, "/posts/qfopefe", resp.Header.Get("Location"))
-}
-
func TestFollowSymlink(t *testing.T) {
log.Printf("=== TestFollowSymlink ===\n")
- // file symlink
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/link")
+ resp, err := getTestHTTPSClient().Get("https://6543.localhost.mock.directory:4430/tests_for_pages-server/@main/link")
assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "application/octet-stream", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "4", resp.Header.Get("Content-Length"))
body := getBytes(resp.Body)
assert.EqualValues(t, 4, len(body))
assert.EqualValues(t, "abc\n", string(body))
-
- // relative file links (../index.html file in this case)
- resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/dir_aim/some/")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
- assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
- assert.EqualValues(t, "an index\n", string(getBytes(resp.Body)))
}
func TestLFSSupport(t *testing.T) {
log.Printf("=== TestLFSSupport ===\n")
- resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/lfs.txt")
+ resp, err := getTestHTTPSClient().Get("https://6543.localhost.mock.directory:4430/tests_for_pages-server/@main/lfs.txt")
assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
- assert.EqualValues(t, http.StatusOK, resp.StatusCode)
body := strings.TrimSpace(string(getBytes(resp.Body)))
assert.EqualValues(t, 12, len(body))
assert.EqualValues(t, "actual value", body)
}
-func TestGetOptions(t *testing.T) {
- log.Println("=== TestGetOptions ===")
- req, _ := http.NewRequest(http.MethodOptions, "https://mock-pages.codeberg-test.org:4430/README.md", http.NoBody)
- resp, err := getTestHTTPSClient().Do(req)
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusNoContent, resp.StatusCode)
- assert.EqualValues(t, "GET, HEAD, OPTIONS", resp.Header.Get("Allow"))
-}
-
-func TestHttpRedirect(t *testing.T) {
- log.Println("=== TestHttpRedirect ===")
- resp, err := getTestHTTPSClient().Get("http://mock-pages.codeberg-test.org:8880/README.md")
- assert.NoError(t, err)
- if !assert.NotNil(t, resp) {
- t.FailNow()
- }
- assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
- assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
- assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
-}
-
func getTestHTTPSClient() *http.Client {
cookieJar, _ := cookiejar.New(nil)
return &http.Client{
diff --git a/integration/main_test.go b/integration/main_test.go
index 6566f78..06d553f 100644
--- a/integration/main_test.go
+++ b/integration/main_test.go
@@ -23,12 +23,12 @@ func TestMain(m *testing.M) {
}
defer func() {
serverCancel()
- log.Println("=== TestMain: Server STOPPED ===")
+ log.Println("=== TestMain: Server STOPED ===")
}()
time.Sleep(10 * time.Second)
- m.Run()
+ os.Exit(m.Run())
}
func startServer(ctx context.Context) error {
@@ -39,16 +39,12 @@ func startServer(ctx context.Context) error {
setEnvIfNotSet("ACME_API", "https://acme.mock.directory")
setEnvIfNotSet("PAGES_DOMAIN", "localhost.mock.directory")
setEnvIfNotSet("RAW_DOMAIN", "raw.localhost.mock.directory")
- setEnvIfNotSet("PAGES_BRANCHES", "pages,main,master")
setEnvIfNotSet("PORT", "4430")
- setEnvIfNotSet("HTTP_PORT", "8880")
- setEnvIfNotSet("ENABLE_HTTP_SERVER", "true")
- setEnvIfNotSet("DB_TYPE", "sqlite3")
app := cli.NewApp()
app.Name = "pages-server"
app.Action = cmd.Serve
- app.Flags = cmd.ServerFlags
+ app.Flags = cmd.ServeFlags
go func() {
if err := app.RunContext(ctx, args); err != nil {
diff --git a/main.go b/main.go
index 6c1d0cc..2836b86 100644
--- a/main.go
+++ b/main.go
@@ -8,16 +8,18 @@ import (
"github.com/urfave/cli/v2"
"codeberg.org/codeberg/pages/cmd"
- "codeberg.org/codeberg/pages/server/version"
)
+// can be changed with -X on compile
+var version = "dev"
+
func main() {
app := cli.NewApp()
app.Name = "pages-server"
- app.Version = version.Version
+ app.Version = version
app.Usage = "pages server"
app.Action = cmd.Serve
- app.Flags = cmd.ServerFlags
+ app.Flags = cmd.ServeFlags
app.Commands = []*cli.Command{
cmd.Certs,
}
diff --git a/server/certificates/acme_client.go b/server/certificates/acme_client.go
deleted file mode 100644
index ba83e50..0000000
--- a/server/certificates/acme_client.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package certificates
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/go-acme/lego/v4/lego"
- "github.com/go-acme/lego/v4/providers/dns"
- "github.com/reugn/equalizer"
- "github.com/rs/zerolog/log"
-
- "codeberg.org/codeberg/pages/server/cache"
-)
-
-type AcmeClient struct {
- legoClient *lego.Client
- dnsChallengerLegoClient *lego.Client
-
- obtainLocks sync.Map
-
- acmeUseRateLimits bool
-
- // limiter
- acmeClientOrderLimit *equalizer.TokenBucket
- acmeClientRequestLimit *equalizer.TokenBucket
- acmeClientFailLimit *equalizer.TokenBucket
- acmeClientCertificateLimitPerUser map[string]*equalizer.TokenBucket
-}
-
-func NewAcmeClient(acmeAccountConf, acmeAPI, acmeMail, acmeEabHmac, acmeEabKID, dnsProvider string, acmeAcceptTerms, enableHTTPServer, acmeUseRateLimits bool, challengeCache cache.SetGetKey) (*AcmeClient, error) {
- acmeConfig, err := setupAcmeConfig(acmeAccountConf, acmeAPI, acmeMail, acmeEabHmac, acmeEabKID, acmeAcceptTerms)
- if err != nil {
- return nil, err
- }
-
- acmeClient, err := lego.NewClient(acmeConfig)
- if err != nil {
- log.Fatal().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
- } else {
- err = acmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
- if err != nil {
- log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
- }
- if enableHTTPServer {
- err = acmeClient.Challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{challengeCache})
- if err != nil {
- log.Error().Err(err).Msg("Can't create HTTP-01 provider")
- }
- }
- }
-
- mainDomainAcmeClient, err := lego.NewClient(acmeConfig)
- if err != nil {
- log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
- } else {
- if dnsProvider == "" {
- // using mock server, don't use wildcard certs
- err := mainDomainAcmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
- if err != nil {
- log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
- }
- } else {
- // use DNS-Challenge https://go-acme.github.io/lego/dns/
- provider, err := dns.NewDNSChallengeProviderByName(dnsProvider)
- if err != nil {
- return nil, fmt.Errorf("can not create DNS Challenge provider: %w", err)
- }
- if err := mainDomainAcmeClient.Challenge.SetDNS01Provider(provider); err != nil {
- return nil, fmt.Errorf("can not create DNS-01 provider: %w", err)
- }
- }
- }
-
- return &AcmeClient{
- legoClient: acmeClient,
- dnsChallengerLegoClient: mainDomainAcmeClient,
-
- acmeUseRateLimits: acmeUseRateLimits,
-
- obtainLocks: sync.Map{},
-
- // limiter
-
- // rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
- // TODO: when this is used a lot, we probably have to think of a somewhat better solution?
- acmeClientOrderLimit: equalizer.NewTokenBucket(25, 15*time.Minute),
- // rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
- acmeClientRequestLimit: equalizer.NewTokenBucket(5, 1*time.Second),
- // rate limit is 5 / hour https://letsencrypt.org/docs/failed-validation-limit/
- acmeClientFailLimit: equalizer.NewTokenBucket(5, 1*time.Hour),
- // checkUserLimit() use this to rate also per user
- acmeClientCertificateLimitPerUser: map[string]*equalizer.TokenBucket{},
- }, nil
-}
diff --git a/server/certificates/acme_config.go b/server/certificates/acme_config.go
deleted file mode 100644
index 12ad7c6..0000000
--- a/server/certificates/acme_config.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package certificates
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "encoding/json"
- "fmt"
- "os"
-
- "github.com/go-acme/lego/v4/certcrypto"
- "github.com/go-acme/lego/v4/lego"
- "github.com/go-acme/lego/v4/registration"
- "github.com/rs/zerolog/log"
-)
-
-const challengePath = "/.well-known/acme-challenge/"
-
-func setupAcmeConfig(configFile, acmeAPI, acmeMail, acmeEabHmac, acmeEabKID string, acmeAcceptTerms bool) (*lego.Config, error) {
- var myAcmeAccount AcmeAccount
- var myAcmeConfig *lego.Config
-
- if account, err := os.ReadFile(configFile); err == nil {
- log.Info().Msgf("found existing acme account config file '%s'", configFile)
- if err := json.Unmarshal(account, &myAcmeAccount); err != nil {
- return nil, err
- }
- myAcmeAccount.Key, err = certcrypto.ParsePEMPrivateKey([]byte(myAcmeAccount.KeyPEM))
- if err != nil {
- return nil, err
- }
- myAcmeConfig = lego.NewConfig(&myAcmeAccount)
- myAcmeConfig.CADirURL = acmeAPI
- myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
-
- // Validate Config
- _, err := lego.NewClient(myAcmeConfig)
- if err != nil {
- log.Info().Err(err).Msg("config validation failed, you might just delete the config file and let it recreate")
- return nil, fmt.Errorf("acme config validation failed: %w", err)
- }
- return myAcmeConfig, nil
- } else if !os.IsNotExist(err) {
- return nil, err
- }
-
- log.Info().Msgf("no existing acme account config found, try to create a new one")
-
- privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- return nil, err
- }
- myAcmeAccount = AcmeAccount{
- Email: acmeMail,
- Key: privateKey,
- KeyPEM: string(certcrypto.PEMEncode(privateKey)),
- }
- myAcmeConfig = lego.NewConfig(&myAcmeAccount)
- myAcmeConfig.CADirURL = acmeAPI
- myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
- tempClient, err := lego.NewClient(myAcmeConfig)
- if err != nil {
- log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
- } else {
- // accept terms & log in to EAB
- if acmeEabKID == "" || acmeEabHmac == "" {
- reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: acmeAcceptTerms})
- if err != nil {
- log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
- } else {
- myAcmeAccount.Registration = reg
- }
- } else {
- reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
- TermsOfServiceAgreed: acmeAcceptTerms,
- Kid: acmeEabKID,
- HmacEncoded: acmeEabHmac,
- })
- if err != nil {
- log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
- } else {
- myAcmeAccount.Registration = reg
- }
- }
-
- if myAcmeAccount.Registration != nil {
- acmeAccountJSON, err := json.Marshal(myAcmeAccount)
- if err != nil {
- log.Error().Err(err).Msg("json.Marshalfailed, waiting for manual restart to avoid rate limits")
- select {}
- }
- log.Info().Msgf("new acme account created. write to config file '%s'", configFile)
- err = os.WriteFile(configFile, acmeAccountJSON, 0o600)
- if err != nil {
- log.Error().Err(err).Msg("os.WriteFile failed, waiting for manual restart to avoid rate limits")
- select {}
- }
- }
- }
-
- return myAcmeConfig, nil
-}
diff --git a/server/certificates/cached_challengers.go b/server/certificates/cached_challengers.go
deleted file mode 100644
index bc9ea67..0000000
--- a/server/certificates/cached_challengers.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package certificates
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "time"
-
- "github.com/go-acme/lego/v4/challenge"
- "github.com/rs/zerolog/log"
-
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
-)
-
-type AcmeTLSChallengeProvider struct {
- challengeCache cache.SetGetKey
-}
-
-// make sure AcmeTLSChallengeProvider match Provider interface
-var _ challenge.Provider = AcmeTLSChallengeProvider{}
-
-func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
- return a.challengeCache.Set(domain, keyAuth, 1*time.Hour)
-}
-
-func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
- a.challengeCache.Remove(domain)
- return nil
-}
-
-type AcmeHTTPChallengeProvider struct {
- challengeCache cache.SetGetKey
-}
-
-// make sure AcmeHTTPChallengeProvider match Provider interface
-var _ challenge.Provider = AcmeHTTPChallengeProvider{}
-
-func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
- return a.challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
-}
-
-func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
- a.challengeCache.Remove(domain + "/" + token)
- return nil
-}
-
-func SetupHTTPACMEChallengeServer(challengeCache cache.SetGetKey, sslPort uint) http.HandlerFunc {
- // handle custom-ssl-ports to be added on https redirects
- portPart := ""
- if sslPort != 443 {
- portPart = fmt.Sprintf(":%d", sslPort)
- }
-
- return func(w http.ResponseWriter, req *http.Request) {
- ctx := context.New(w, req)
- domain := ctx.TrimHostPort()
-
- // it's an acme request
- if strings.HasPrefix(ctx.Path(), challengePath) {
- challenge, ok := challengeCache.Get(domain + "/" + strings.TrimPrefix(ctx.Path(), challengePath))
- if !ok || challenge == nil {
- log.Info().Msgf("HTTP-ACME challenge for '%s' failed: token not found", domain)
- ctx.String("no challenge for this token", http.StatusNotFound)
- }
- log.Info().Msgf("HTTP-ACME challenge for '%s' succeeded", domain)
- ctx.String(challenge.(string))
- return
- }
-
- // it's a normal http request that needs to be redirected
- u, err := url.Parse(fmt.Sprintf("https://%s%s%s", domain, portPart, ctx.Path()))
- if err != nil {
- log.Error().Err(err).Msg("could not craft http to https redirect")
- ctx.String("", http.StatusInternalServerError)
- }
-
- newURL := u.String()
- log.Debug().Msgf("redirect http to https: %s", newURL)
- ctx.Redirect(newURL, http.StatusMovedPermanently)
- }
-}
diff --git a/server/certificates/certificates.go b/server/certificates/certificates.go
index 3ae891a..b1c1329 100644
--- a/server/certificates/certificates.go
+++ b/server/certificates/certificates.go
@@ -1,19 +1,30 @@
package certificates
import (
+ "bytes"
"context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
"crypto/tls"
"crypto/x509"
+ "encoding/gob"
+ "encoding/json"
"errors"
"fmt"
+ "os"
"strconv"
"strings"
+ "sync"
"time"
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/certificate"
+ "github.com/go-acme/lego/v4/challenge"
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
"github.com/go-acme/lego/v4/lego"
+ "github.com/go-acme/lego/v4/providers/dns"
+ "github.com/go-acme/lego/v4/registration"
"github.com/reugn/equalizer"
"github.com/rs/zerolog/log"
@@ -24,104 +35,89 @@ import (
"codeberg.org/codeberg/pages/server/upstream"
)
-var ErrUserRateLimitExceeded = errors.New("rate limit exceeded: 10 certificates per user per 24 hours")
-
// TLSConfig returns the configuration for generating, serving and cleaning up Let's Encrypt certificates.
-func TLSConfig(mainDomainSuffix string,
+func TLSConfig(mainDomainSuffix []byte,
giteaClient *gitea.Client,
- acmeClient *AcmeClient,
- firstDefaultBranch string,
+ firstDefaultBranch,
+ dnsProvider string,
+ acmeUseRateLimits bool,
keyCache, challengeCache, dnsLookupCache, canonicalDomainCache cache.SetGetKey,
certDB database.CertDB,
) *tls.Config {
return &tls.Config{
// check DNS name & get certificate from Let's Encrypt
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
- domain := strings.ToLower(strings.TrimSpace(info.ServerName))
- if len(domain) < 1 {
- return nil, errors.New("missing domain info via SNI (RFC 4366, Section 3.1)")
+ sni := strings.ToLower(strings.TrimSpace(info.ServerName))
+ sniBytes := []byte(sni)
+ if len(sni) < 1 {
+ return nil, errors.New("missing sni")
}
- // https request init is actually a acme challenge
if info.SupportedProtos != nil {
for _, proto := range info.SupportedProtos {
- if proto != tlsalpn01.ACMETLS1Protocol {
- continue
+ if proto == tlsalpn01.ACMETLS1Protocol {
+ challenge, ok := challengeCache.Get(sni)
+ if !ok {
+ return nil, errors.New("no challenge for this domain")
+ }
+ cert, err := tlsalpn01.ChallengeCert(sni, challenge.(string))
+ if err != nil {
+ return nil, err
+ }
+ return cert, nil
}
- log.Info().Msgf("Detect ACME-TLS1 challenge for '%s'", domain)
-
- challenge, ok := challengeCache.Get(domain)
- if !ok {
- return nil, errors.New("no challenge for this domain")
- }
- cert, err := tlsalpn01.ChallengeCert(domain, challenge.(string))
- if err != nil {
- return nil, err
- }
- return cert, nil
}
}
targetOwner := ""
- mayObtainCert := true
- if strings.HasSuffix(domain, mainDomainSuffix) || strings.EqualFold(domain, mainDomainSuffix[1:]) {
+ if bytes.HasSuffix(sniBytes, mainDomainSuffix) || bytes.Equal(sniBytes, mainDomainSuffix[1:]) {
// deliver default certificate for the main domain (*.codeberg.page)
- domain = mainDomainSuffix
+ sniBytes = mainDomainSuffix
+ sni = string(sniBytes)
} else {
var targetRepo, targetBranch string
- targetOwner, targetRepo, targetBranch = dnsutils.GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch, dnsLookupCache)
+ targetOwner, targetRepo, targetBranch = dnsutils.GetTargetFromDNS(sni, string(mainDomainSuffix), firstDefaultBranch, dnsLookupCache)
if targetOwner == "" {
// DNS not set up, return main certificate to redirect to the docs
- domain = mainDomainSuffix
+ sniBytes = mainDomainSuffix
+ sni = string(sniBytes)
} else {
- targetOpt := &upstream.Options{
- TargetOwner: targetOwner,
- TargetRepo: targetRepo,
- TargetBranch: targetBranch,
- }
- _, valid := targetOpt.CheckCanonicalDomain(giteaClient, domain, mainDomainSuffix, canonicalDomainCache)
+ _, _ = targetRepo, targetBranch
+ _, valid := upstream.CheckCanonicalDomain(giteaClient, targetOwner, targetRepo, targetBranch, sni, string(mainDomainSuffix), canonicalDomainCache)
if !valid {
- // We shouldn't obtain a certificate when we cannot check if the
- // repository has specified this domain in the `.domains` file.
- mayObtainCert = false
+ sniBytes = mainDomainSuffix
+ sni = string(sniBytes)
}
}
}
- if tlsCertificate, ok := keyCache.Get(domain); ok {
+ if tlsCertificate, ok := keyCache.Get(sni); ok {
// we can use an existing certificate object
return tlsCertificate.(*tls.Certificate), nil
}
- var tlsCertificate *tls.Certificate
+ var tlsCertificate tls.Certificate
var err error
- if tlsCertificate, err = acmeClient.retrieveCertFromDB(domain, mainDomainSuffix, false, certDB); err != nil {
- if !errors.Is(err, database.ErrNotFound) {
- return nil, err
- }
- // we could not find a cert in db, request a new certificate
-
- // first check if we are allowed to obtain a cert for this domain
- if strings.EqualFold(domain, mainDomainSuffix) {
+ var ok bool
+ if tlsCertificate, ok = retrieveCertFromDB(sniBytes, mainDomainSuffix, dnsProvider, acmeUseRateLimits, certDB); !ok {
+ // request a new certificate
+ if bytes.Equal(sniBytes, mainDomainSuffix) {
return nil, errors.New("won't request certificate for main domain, something really bad has happened")
}
- if !mayObtainCert {
- return nil, fmt.Errorf("won't request certificate for %q", domain)
- }
- tlsCertificate, err = acmeClient.obtainCert(acmeClient.legoClient, []string{domain}, nil, targetOwner, false, mainDomainSuffix, certDB)
+ tlsCertificate, err = obtainCert(acmeClient, []string{sni}, nil, targetOwner, dnsProvider, mainDomainSuffix, acmeUseRateLimits, certDB)
if err != nil {
return nil, err
}
}
- if err := keyCache.Set(domain, tlsCertificate, 15*time.Minute); err != nil {
+ if err := keyCache.Set(sni, &tlsCertificate, 15*time.Minute); err != nil {
return nil, err
}
- return tlsCertificate, nil
+ return &tlsCertificate, nil
},
+ PreferServerCipherSuites: true,
NextProtos: []string{
- "h2",
"http/1.1",
tlsalpn01.ACMETLS1Protocol,
},
@@ -140,115 +136,159 @@ func TLSConfig(mainDomainSuffix string,
}
}
-func (c *AcmeClient) checkUserLimit(user string) error {
- userLimit, ok := c.acmeClientCertificateLimitPerUser[user]
+func checkUserLimit(user string) error {
+ userLimit, ok := acmeClientCertificateLimitPerUser[user]
if !ok {
- // Each user can only add 10 new domains per day.
+ // Each Codeberg user can only add 10 new domains per day.
userLimit = equalizer.NewTokenBucket(10, time.Hour*24)
- c.acmeClientCertificateLimitPerUser[user] = userLimit
+ acmeClientCertificateLimitPerUser[user] = userLimit
}
if !userLimit.Ask() {
- return fmt.Errorf("user '%s' error: %w", user, ErrUserRateLimitExceeded)
+ return errors.New("rate limit exceeded: 10 certificates per user per 24 hours")
}
return nil
}
-func (c *AcmeClient) retrieveCertFromDB(sni, mainDomainSuffix string, useDnsProvider bool, certDB database.CertDB) (*tls.Certificate, error) {
+var (
+ acmeClient, mainDomainAcmeClient *lego.Client
+ acmeClientCertificateLimitPerUser = map[string]*equalizer.TokenBucket{}
+)
+
+// rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
+// TODO: when this is used a lot, we probably have to think of a somewhat better solution?
+var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15*time.Minute)
+
+// rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
+var acmeClientRequestLimit = equalizer.NewTokenBucket(5, 1*time.Second)
+
+type AcmeTLSChallengeProvider struct {
+ challengeCache cache.SetGetKey
+}
+
+// make sure AcmeTLSChallengeProvider match Provider interface
+var _ challenge.Provider = AcmeTLSChallengeProvider{}
+
+func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
+ return a.challengeCache.Set(domain, keyAuth, 1*time.Hour)
+}
+
+func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
+ a.challengeCache.Remove(domain)
+ return nil
+}
+
+type AcmeHTTPChallengeProvider struct {
+ challengeCache cache.SetGetKey
+}
+
+// make sure AcmeHTTPChallengeProvider match Provider interface
+var _ challenge.Provider = AcmeHTTPChallengeProvider{}
+
+func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
+ return a.challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
+}
+
+func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
+ a.challengeCache.Remove(domain + "/" + token)
+ return nil
+}
+
+func retrieveCertFromDB(sni, mainDomainSuffix []byte, dnsProvider string, acmeUseRateLimits bool, certDB database.CertDB) (tls.Certificate, bool) {
// parse certificate from database
- res, err := certDB.Get(sni)
+ res, err := certDB.Get(string(sni))
if err != nil {
- return nil, err
- } else if res == nil {
- return nil, database.ErrNotFound
+ panic(err) // TODO: no panic
+ }
+ if res == nil {
+ return tls.Certificate{}, false
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
- return nil, err
+ panic(err)
}
// TODO: document & put into own function
- if !strings.EqualFold(sni, mainDomainSuffix) {
+ if !bytes.Equal(sni, mainDomainSuffix) {
tlsCertificate.Leaf, err = x509.ParseCertificate(tlsCertificate.Certificate[0])
if err != nil {
- return nil, fmt.Errorf("error parsing leaf tlsCert: %w", err)
+ panic(err)
}
// renew certificates 7 days before they expire
- if tlsCertificate.Leaf.NotAfter.Before(time.Now().Add(7 * 24 * time.Hour)) {
- // TODO: use ValidTill of custom cert struct
+ if !tlsCertificate.Leaf.NotAfter.After(time.Now().Add(7 * 24 * time.Hour)) {
+ // TODO: add ValidUntil to custom res struct
if res.CSR != nil && len(res.CSR) > 0 {
// CSR stores the time when the renewal shall be tried again
nextTryUnix, err := strconv.ParseInt(string(res.CSR), 10, 64)
if err == nil && time.Now().Before(time.Unix(nextTryUnix, 0)) {
- return &tlsCertificate, nil
+ return tlsCertificate, true
}
}
- // TODO: make a queue ?
go (func() {
res.CSR = nil // acme client doesn't like CSR to be set
- if _, err := c.obtainCert(c.legoClient, []string{sni}, res, "", useDnsProvider, mainDomainSuffix, certDB); err != nil {
- log.Error().Msgf("Couldn't renew certificate for %s: %v", sni, err)
+ tlsCertificate, err = obtainCert(acmeClient, []string{string(sni)}, res, "", dnsProvider, mainDomainSuffix, acmeUseRateLimits, certDB)
+ if err != nil {
+ log.Error().Msgf("Couldn't renew certificate for %s: %v", string(sni), err)
}
})()
}
}
- return &tlsCertificate, nil
+ return tlsCertificate, true
}
-func (c *AcmeClient) obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string, useDnsProvider bool, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
+var obtainLocks = sync.Map{}
+
+func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user, dnsProvider string, mainDomainSuffix []byte, acmeUseRateLimits bool, keyDatabase database.CertDB) (tls.Certificate, error) {
name := strings.TrimPrefix(domains[0], "*")
- if useDnsProvider && len(domains[0]) > 0 && domains[0][0] == '*' {
+ if dnsProvider == "" && len(domains[0]) > 0 && domains[0][0] == '*' {
domains = domains[1:]
}
// lock to avoid simultaneous requests
- _, working := c.obtainLocks.LoadOrStore(name, struct{}{})
+ _, working := obtainLocks.LoadOrStore(name, struct{}{})
if working {
for working {
time.Sleep(100 * time.Millisecond)
- _, working = c.obtainLocks.Load(name)
+ _, working = obtainLocks.Load(name)
}
- cert, err := c.retrieveCertFromDB(name, mainDomainSuffix, useDnsProvider, keyDatabase)
- if err != nil {
- return nil, fmt.Errorf("certificate failed in synchronous request: %w", err)
+ cert, ok := retrieveCertFromDB([]byte(name), mainDomainSuffix, dnsProvider, acmeUseRateLimits, keyDatabase)
+ if !ok {
+ return tls.Certificate{}, errors.New("certificate failed in synchronous request")
}
return cert, nil
}
- defer c.obtainLocks.Delete(name)
+ defer obtainLocks.Delete(name)
if acmeClient == nil {
- return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!", mainDomainSuffix, keyDatabase)
+ return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!", string(mainDomainSuffix), keyDatabase), nil
}
// request actual cert
var res *certificate.Resource
var err error
if renew != nil && renew.CertURL != "" {
- if c.acmeUseRateLimits {
- c.acmeClientRequestLimit.Take()
+ if acmeUseRateLimits {
+ acmeClientRequestLimit.Take()
}
log.Debug().Msgf("Renewing certificate for: %v", domains)
res, err = acmeClient.Certificate.Renew(*renew, true, false, "")
if err != nil {
log.Error().Err(err).Msgf("Couldn't renew certificate for %v, trying to request a new one", domains)
- if c.acmeUseRateLimits {
- c.acmeClientFailLimit.Take()
- }
res = nil
}
}
if res == nil {
if user != "" {
- if err := c.checkUserLimit(user); err != nil {
- return nil, err
+ if err := checkUserLimit(user); err != nil {
+ return tls.Certificate{}, err
}
}
- if c.acmeUseRateLimits {
- c.acmeClientOrderLimit.Take()
- c.acmeClientRequestLimit.Take()
+ if acmeUseRateLimits {
+ acmeClientOrderLimit.Take()
+ acmeClientRequestLimit.Take()
}
log.Debug().Msgf("Re-requesting new certificate for %v", domains)
res, err = acmeClient.Certificate.Obtain(certificate.ObtainRequest{
@@ -256,58 +296,163 @@ func (c *AcmeClient) obtainCert(acmeClient *lego.Client, domains []string, renew
Bundle: true,
MustStaple: false,
})
- if c.acmeUseRateLimits && err != nil {
- c.acmeClientFailLimit.Take()
- }
}
if err != nil {
log.Error().Err(err).Msgf("Couldn't obtain again a certificate or %v", domains)
if renew != nil && renew.CertURL != "" {
tlsCertificate, err := tls.X509KeyPair(renew.Certificate, renew.PrivateKey)
- if err != nil {
- mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
- if err2 != nil {
- return nil, errors.Join(err, err2)
- }
- return mockC, err
- }
- leaf, err := leaf(&tlsCertificate)
- if err == nil && leaf.NotAfter.After(time.Now()) {
+ if err == nil && tlsCertificate.Leaf.NotAfter.After(time.Now()) {
// avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
if err := keyDatabase.Put(name, renew); err != nil {
- mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
- if err2 != nil {
- return nil, errors.Join(err, err2)
- }
- return mockC, err
+ return mockCert(domains[0], err.Error(), string(mainDomainSuffix), keyDatabase), err
}
- return &tlsCertificate, nil
+ return tlsCertificate, nil
}
}
- return mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
+ return mockCert(domains[0], err.Error(), string(mainDomainSuffix), keyDatabase), err
}
log.Debug().Msgf("Obtained certificate for %v", domains)
if err := keyDatabase.Put(name, res); err != nil {
- return nil, err
+ return tls.Certificate{}, err
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
- return nil, err
+ return tls.Certificate{}, err
}
- return &tlsCertificate, nil
+ return tlsCertificate, nil
}
-func SetupMainDomainCertificates(mainDomainSuffix string, acmeClient *AcmeClient, certDB database.CertDB) error {
+func SetupAcmeConfig(acmeAPI, acmeMail, acmeEabHmac, acmeEabKID string, acmeAcceptTerms bool) (*lego.Config, error) {
+ const configFile = "acme-account.json"
+ var myAcmeAccount AcmeAccount
+ var myAcmeConfig *lego.Config
+
+ if account, err := os.ReadFile(configFile); err == nil {
+ if err := json.Unmarshal(account, &myAcmeAccount); err != nil {
+ return nil, err
+ }
+ myAcmeAccount.Key, err = certcrypto.ParsePEMPrivateKey([]byte(myAcmeAccount.KeyPEM))
+ if err != nil {
+ return nil, err
+ }
+ myAcmeConfig = lego.NewConfig(&myAcmeAccount)
+ myAcmeConfig.CADirURL = acmeAPI
+ myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
+
+ // Validate Config
+ _, err := lego.NewClient(myAcmeConfig)
+ if err != nil {
+ // TODO: should we fail hard instead?
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ }
+ return myAcmeConfig, nil
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ myAcmeAccount = AcmeAccount{
+ Email: acmeMail,
+ Key: privateKey,
+ KeyPEM: string(certcrypto.PEMEncode(privateKey)),
+ }
+ myAcmeConfig = lego.NewConfig(&myAcmeAccount)
+ myAcmeConfig.CADirURL = acmeAPI
+ myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
+ tempClient, err := lego.NewClient(myAcmeConfig)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ // accept terms & log in to EAB
+ if acmeEabKID == "" || acmeEabHmac == "" {
+ reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: acmeAcceptTerms})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
+ } else {
+ myAcmeAccount.Registration = reg
+ }
+ } else {
+ reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
+ TermsOfServiceAgreed: acmeAcceptTerms,
+ Kid: acmeEabKID,
+ HmacEncoded: acmeEabHmac,
+ })
+ if err != nil {
+ log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
+ } else {
+ myAcmeAccount.Registration = reg
+ }
+ }
+
+ if myAcmeAccount.Registration != nil {
+ acmeAccountJSON, err := json.Marshal(myAcmeAccount)
+ if err != nil {
+ log.Error().Err(err).Msg("json.Marshalfailed, waiting for manual restart to avoid rate limits")
+ select {}
+ }
+ err = os.WriteFile(configFile, acmeAccountJSON, 0o600)
+ if err != nil {
+ log.Error().Err(err).Msg("os.WriteFile failed, waiting for manual restart to avoid rate limits")
+ select {}
+ }
+ }
+ }
+
+ return myAcmeConfig, nil
+}
+
+func SetupCertificates(mainDomainSuffix []byte, dnsProvider string, acmeConfig *lego.Config, acmeUseRateLimits, enableHTTPServer bool, challengeCache cache.SetGetKey, certDB database.CertDB) error {
// getting main cert before ACME account so that we can fail here without hitting rate limits
- mainCertBytes, err := certDB.Get(mainDomainSuffix)
- if err != nil && !errors.Is(err, database.ErrNotFound) {
- return fmt.Errorf("cert database is not working: %w", err)
+ mainCertBytes, err := certDB.Get(string(mainDomainSuffix))
+ if err != nil {
+ return fmt.Errorf("cert database is not working")
+ }
+
+ acmeClient, err = lego.NewClient(acmeConfig)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ err = acmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
+ }
+ if enableHTTPServer {
+ err = acmeClient.Challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{challengeCache})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create HTTP-01 provider")
+ }
+ }
+ }
+
+ mainDomainAcmeClient, err = lego.NewClient(acmeConfig)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ if dnsProvider == "" {
+ // using mock server, don't use wildcard certs
+ err := mainDomainAcmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
+ }
+ } else {
+ provider, err := dns.NewDNSChallengeProviderByName(dnsProvider)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create DNS Challenge provider")
+ }
+ err = mainDomainAcmeClient.Challenge.SetDNS01Provider(provider)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create DNS-01 provider")
+ }
+ }
}
if mainCertBytes == nil {
- _, err = acmeClient.obtainCert(acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, nil, "", true, mainDomainSuffix, certDB)
+ _, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(mainDomainSuffix), string(mainDomainSuffix[1:])}, nil, "", dnsProvider, mainDomainSuffix, acmeUseRateLimits, certDB)
if err != nil {
log.Error().Err(err).Msg("Couldn't renew main domain certificate, continuing with mock certs only")
}
@@ -316,45 +461,58 @@ func SetupMainDomainCertificates(mainDomainSuffix string, acmeClient *AcmeClient
return nil
}
-func MaintainCertDB(ctx context.Context, interval time.Duration, acmeClient *AcmeClient, mainDomainSuffix string, certDB database.CertDB) {
+func MaintainCertDB(ctx context.Context, interval time.Duration, mainDomainSuffix []byte, dnsProvider string, acmeUseRateLimits bool, certDB database.CertDB) {
for {
- // delete expired certs that will be invalid until next clean up
- threshold := time.Now().Add(interval)
+ // clean up expired certs
+ now := time.Now()
expiredCertCount := 0
+ keyDatabaseIterator := certDB.Items()
+ key, resBytes, err := keyDatabaseIterator.Next()
+ for err == nil {
+ if !bytes.Equal(key, mainDomainSuffix) {
+ resGob := bytes.NewBuffer(resBytes)
+ resDec := gob.NewDecoder(resGob)
+ res := &certificate.Resource{}
+ err = resDec.Decode(res)
+ if err != nil {
+ panic(err)
+ }
- certs, err := certDB.Items(0, 0)
- if err != nil {
- log.Error().Err(err).Msg("could not get certs from list")
- } else {
- for _, cert := range certs {
- if !strings.EqualFold(cert.Domain, strings.TrimPrefix(mainDomainSuffix, ".")) {
- if time.Unix(cert.ValidTill, 0).Before(threshold) {
- err := certDB.Delete(cert.Domain)
- if err != nil {
- log.Error().Err(err).Msgf("Deleting expired certificate for %q failed", cert.Domain)
- } else {
- expiredCertCount++
- }
+ tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
+ if err != nil || !tlsCertificates[0].NotAfter.After(now) {
+ err := certDB.Delete(string(key))
+ if err != nil {
+ log.Error().Err(err).Msgf("Deleting expired certificate for %q failed", string(key))
+ } else {
+ expiredCertCount++
}
}
}
- log.Debug().Msgf("Removed %d expired certificates from the database", expiredCertCount)
+ key, resBytes, err = keyDatabaseIterator.Next()
+ }
+ log.Debug().Msgf("Removed %d expired certificates from the database", expiredCertCount)
+
+ // compact the database
+ msg, err := certDB.Compact()
+ if err != nil {
+ log.Error().Err(err).Msg("Compacting key database failed")
+ } else {
+ log.Debug().Msgf("Compacted key database: %s", msg)
}
// update main cert
- res, err := certDB.Get(mainDomainSuffix)
+ res, err := certDB.Get(string(mainDomainSuffix))
if err != nil {
log.Error().Msgf("Couldn't get cert for domain %q", mainDomainSuffix)
} else if res == nil {
- log.Error().Msgf("Couldn't renew certificate for main domain %q expected main domain cert to exist, but it's missing - seems like the database is corrupted", mainDomainSuffix)
+ log.Error().Msgf("Couldn't renew certificate for main domain %q expected main domain cert to exist, but it's missing - seems like the database is corrupted", string(mainDomainSuffix))
} else {
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
- if err != nil {
- log.Error().Err(fmt.Errorf("could not parse cert for mainDomainSuffix: %w", err))
- } else if tlsCertificates[0].NotAfter.Before(time.Now().Add(30 * 24 * time.Hour)) {
- // renew main certificate 30 days before it expires
+
+ // renew main certificate 30 days before it expires
+ if !tlsCertificates[0].NotAfter.After(time.Now().Add(30 * 24 * time.Hour)) {
go (func() {
- _, err = acmeClient.obtainCert(acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, res, "", true, mainDomainSuffix, certDB)
+ _, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(mainDomainSuffix), string(mainDomainSuffix[1:])}, res, "", dnsProvider, mainDomainSuffix, acmeUseRateLimits, certDB)
if err != nil {
log.Error().Err(err).Msg("Couldn't renew certificate for main domain")
}
@@ -369,12 +527,3 @@ func MaintainCertDB(ctx context.Context, interval time.Duration, acmeClient *Acm
}
}
}
-
-// leaf returns the parsed leaf certificate, either from c.leaf or by parsing
-// the corresponding c.Certificate[0].
-func leaf(c *tls.Certificate) (*x509.Certificate, error) {
- if c.Leaf != nil {
- return c.Leaf, nil
- }
- return x509.ParseCertificate(c.Certificate[0])
-}
diff --git a/server/certificates/mock.go b/server/certificates/mock.go
index a28d0f4..0e87e6e 100644
--- a/server/certificates/mock.go
+++ b/server/certificates/mock.go
@@ -13,15 +13,14 @@ import (
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/certificate"
- "github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/database"
)
-func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
+func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB) tls.Certificate {
key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
if err != nil {
- return nil, err
+ panic(err)
}
template := x509.Certificate{
@@ -53,7 +52,7 @@ func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB)
key,
)
if err != nil {
- return nil, err
+ panic(err)
}
out := &bytes.Buffer{}
@@ -62,7 +61,7 @@ func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB)
Type: "CERTIFICATE",
})
if err != nil {
- return nil, err
+ panic(err)
}
outBytes := out.Bytes()
res := &certificate.Resource{
@@ -76,12 +75,12 @@ func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB)
databaseName = mainDomainSuffix
}
if err := keyDatabase.Put(databaseName, res); err != nil {
- log.Error().Err(err)
+ panic(err)
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
- return nil, err
+ panic(err)
}
- return &tlsCertificate, nil
+ return tlsCertificate
}
diff --git a/server/certificates/mock_test.go b/server/certificates/mock_test.go
index 644e8a9..1cbd1f6 100644
--- a/server/certificates/mock_test.go
+++ b/server/certificates/mock_test.go
@@ -3,18 +3,14 @@ package certificates
import (
"testing"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/mock"
-
"codeberg.org/codeberg/pages/server/database"
+ "github.com/stretchr/testify/assert"
)
func TestMockCert(t *testing.T) {
- db := database.NewMockCertDB(t)
- db.Mock.On("Put", mock.Anything, mock.Anything).Return(nil)
-
- cert, err := mockCert("example.com", "some error msg", "codeberg.page", db)
+ db, err := database.NewTmpDB()
assert.NoError(t, err)
+ cert := mockCert("example.com", "some error msg", "codeberg.page", db)
if assert.NotEmpty(t, cert) {
assert.NotEmpty(t, cert.Certificate)
}
diff --git a/server/context/context.go b/server/context/context.go
deleted file mode 100644
index 6650164..0000000
--- a/server/context/context.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package context
-
-import (
- stdContext "context"
- "net/http"
-
- "codeberg.org/codeberg/pages/server/utils"
-)
-
-type Context struct {
- RespWriter http.ResponseWriter
- Req *http.Request
- StatusCode int
-}
-
-func New(w http.ResponseWriter, r *http.Request) *Context {
- return &Context{
- RespWriter: w,
- Req: r,
- StatusCode: http.StatusOK,
- }
-}
-
-func (c *Context) Context() stdContext.Context {
- if c.Req != nil {
- return c.Req.Context()
- }
- return stdContext.Background()
-}
-
-func (c *Context) Response() *http.Response {
- if c.Req != nil && c.Req.Response != nil {
- return c.Req.Response
- }
- return nil
-}
-
-func (c *Context) String(raw string, status ...int) {
- code := http.StatusOK
- if len(status) != 0 {
- code = status[0]
- }
- c.RespWriter.WriteHeader(code)
- _, _ = c.RespWriter.Write([]byte(raw))
-}
-
-func (c *Context) Redirect(uri string, statusCode int) {
- http.Redirect(c.RespWriter, c.Req, uri, statusCode)
-}
-
-// Path returns the cleaned requested path.
-func (c *Context) Path() string {
- return utils.CleanPath(c.Req.URL.Path)
-}
-
-func (c *Context) Host() string {
- return c.Req.URL.Host
-}
-
-func (c *Context) TrimHostPort() string {
- return utils.TrimHostPort(c.Req.Host)
-}
diff --git a/server/database/interface.go b/server/database/interface.go
index 7fdbae7..3ba3efc 100644
--- a/server/database/interface.go
+++ b/server/database/interface.go
@@ -1,78 +1,15 @@
package database
import (
- "fmt"
-
- "github.com/go-acme/lego/v4/certcrypto"
+ "github.com/akrylysov/pogreb"
"github.com/go-acme/lego/v4/certificate"
- "github.com/rs/zerolog/log"
)
-//go:generate go install github.com/vektra/mockery/v2@latest
-//go:generate mockery --name CertDB --output . --filename mock.go --inpackage --case underscore
-
type CertDB interface {
Close() error
Put(name string, cert *certificate.Resource) error
Get(name string) (*certificate.Resource, error)
Delete(key string) error
- Items(page, pageSize int) ([]*Cert, error)
-}
-
-type Cert struct {
- Domain string `xorm:"pk NOT NULL UNIQUE 'domain'"`
- Created int64 `xorm:"created NOT NULL DEFAULT 0 'created'"`
- Updated int64 `xorm:"updated NOT NULL DEFAULT 0 'updated'"`
- ValidTill int64 `xorm:" NOT NULL DEFAULT 0 'valid_till'"`
- // certificate.Resource
- CertURL string `xorm:"'cert_url'"`
- CertStableURL string `xorm:"'cert_stable_url'"`
- PrivateKey []byte `xorm:"'private_key'"`
- Certificate []byte `xorm:"'certificate'"`
- IssuerCertificate []byte `xorm:"'issuer_certificate'"`
-}
-
-func (c Cert) Raw() *certificate.Resource {
- return &certificate.Resource{
- Domain: c.Domain,
- CertURL: c.CertURL,
- CertStableURL: c.CertStableURL,
- PrivateKey: c.PrivateKey,
- Certificate: c.Certificate,
- IssuerCertificate: c.IssuerCertificate,
- }
-}
-
-func toCert(name string, c *certificate.Resource) (*Cert, error) {
- tlsCertificates, err := certcrypto.ParsePEMBundle(c.Certificate)
- if err != nil {
- return nil, err
- }
- if len(tlsCertificates) == 0 || tlsCertificates[0] == nil {
- err := fmt.Errorf("parsed cert resource has no cert")
- log.Error().Err(err).Str("domain", c.Domain).Msgf("cert: %v", c)
- return nil, err
- }
- validTill := tlsCertificates[0].NotAfter.Unix()
-
- // handle wildcard certs
- if name[:1] == "." {
- name = "*" + name
- }
- if name != c.Domain {
- err := fmt.Errorf("domain key '%s' and cert domain '%s' not equal", name, c.Domain)
- log.Error().Err(err).Msg("toCert conversion did discover mismatch")
- // TODO: fail hard: return nil, err
- }
-
- return &Cert{
- Domain: c.Domain,
- ValidTill: validTill,
-
- CertURL: c.CertURL,
- CertStableURL: c.CertStableURL,
- PrivateKey: c.PrivateKey,
- Certificate: c.Certificate,
- IssuerCertificate: c.IssuerCertificate,
- }, nil
+ Compact() (string, error)
+ Items() *pogreb.ItemIterator
}
diff --git a/server/database/mock.go b/server/database/mock.go
index e7e2c38..e6c1b5a 100644
--- a/server/database/mock.go
+++ b/server/database/mock.go
@@ -1,122 +1,55 @@
-// Code generated by mockery v2.20.0. DO NOT EDIT.
-
package database
import (
- certificate "github.com/go-acme/lego/v4/certificate"
- mock "github.com/stretchr/testify/mock"
+ "fmt"
+ "time"
+
+ "github.com/OrlovEvgeny/go-mcache"
+ "github.com/akrylysov/pogreb"
+ "github.com/go-acme/lego/v4/certificate"
)
-// MockCertDB is an autogenerated mock type for the CertDB type
-type MockCertDB struct {
- mock.Mock
+var _ CertDB = tmpDB{}
+
+type tmpDB struct {
+ intern *mcache.CacheDriver
+ ttl time.Duration
}
-// Close provides a mock function with given fields:
-func (_m *MockCertDB) Close() error {
- ret := _m.Called()
-
- var r0 error
- if rf, ok := ret.Get(0).(func() error); ok {
- r0 = rf()
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (p tmpDB) Close() error {
+ _ = p.intern.Close()
+ return nil
}
-// Delete provides a mock function with given fields: key
-func (_m *MockCertDB) Delete(key string) error {
- ret := _m.Called(key)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(string) error); ok {
- r0 = rf(key)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (p tmpDB) Put(name string, cert *certificate.Resource) error {
+ return p.intern.Set(name, cert, p.ttl)
}
-// Get provides a mock function with given fields: name
-func (_m *MockCertDB) Get(name string) (*certificate.Resource, error) {
- ret := _m.Called(name)
-
- var r0 *certificate.Resource
- var r1 error
- if rf, ok := ret.Get(0).(func(string) (*certificate.Resource, error)); ok {
- return rf(name)
+func (p tmpDB) Get(name string) (*certificate.Resource, error) {
+ cert, has := p.intern.Get(name)
+ if !has {
+ return nil, fmt.Errorf("cert for '%s' not found", name)
}
- if rf, ok := ret.Get(0).(func(string) *certificate.Resource); ok {
- r0 = rf(name)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*certificate.Resource)
- }
- }
-
- if rf, ok := ret.Get(1).(func(string) error); ok {
- r1 = rf(name)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+ return cert.(*certificate.Resource), nil
}
-// Items provides a mock function with given fields: page, pageSize
-func (_m *MockCertDB) Items(page int, pageSize int) ([]*Cert, error) {
- ret := _m.Called(page, pageSize)
-
- var r0 []*Cert
- var r1 error
- if rf, ok := ret.Get(0).(func(int, int) ([]*Cert, error)); ok {
- return rf(page, pageSize)
- }
- if rf, ok := ret.Get(0).(func(int, int) []*Cert); ok {
- r0 = rf(page, pageSize)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]*Cert)
- }
- }
-
- if rf, ok := ret.Get(1).(func(int, int) error); ok {
- r1 = rf(page, pageSize)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+func (p tmpDB) Delete(key string) error {
+ p.intern.Remove(key)
+ return nil
}
-// Put provides a mock function with given fields: name, cert
-func (_m *MockCertDB) Put(name string, cert *certificate.Resource) error {
- ret := _m.Called(name, cert)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(string, *certificate.Resource) error); ok {
- r0 = rf(name, cert)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+func (p tmpDB) Compact() (string, error) {
+ p.intern.Truncate()
+ return "Truncate done", nil
}
-type mockConstructorTestingTNewMockCertDB interface {
- mock.TestingT
- Cleanup(func())
+func (p tmpDB) Items() *pogreb.ItemIterator {
+ panic("ItemIterator not implemented for tmpDB")
}
-// NewMockCertDB creates a new instance of MockCertDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewMockCertDB(t mockConstructorTestingTNewMockCertDB) *MockCertDB {
- mock := &MockCertDB{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
+func NewTmpDB() (CertDB, error) {
+ return &tmpDB{
+ intern: mcache.New(),
+ ttl: time.Minute,
+ }, nil
}
diff --git a/server/database/setup.go b/server/database/setup.go
new file mode 100644
index 0000000..1c5a0af
--- /dev/null
+++ b/server/database/setup.go
@@ -0,0 +1,109 @@
+package database
+
+import (
+ "bytes"
+ "context"
+ "encoding/gob"
+ "fmt"
+ "time"
+
+ "github.com/akrylysov/pogreb"
+ "github.com/akrylysov/pogreb/fs"
+ "github.com/go-acme/lego/v4/certificate"
+ "github.com/rs/zerolog/log"
+)
+
+var _ CertDB = aDB{}
+
+type aDB struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ intern *pogreb.DB
+ syncInterval time.Duration
+}
+
+func (p aDB) Close() error {
+ p.cancel()
+ return p.intern.Sync()
+}
+
+func (p aDB) Put(name string, cert *certificate.Resource) error {
+ var resGob bytes.Buffer
+ if err := gob.NewEncoder(&resGob).Encode(cert); err != nil {
+ return err
+ }
+ return p.intern.Put([]byte(name), resGob.Bytes())
+}
+
+func (p aDB) Get(name string) (*certificate.Resource, error) {
+ cert := &certificate.Resource{}
+ resBytes, err := p.intern.Get([]byte(name))
+ if err != nil {
+ return nil, err
+ }
+ if resBytes == nil {
+ return nil, nil
+ }
+ if err = gob.NewDecoder(bytes.NewBuffer(resBytes)).Decode(cert); err != nil {
+ return nil, err
+ }
+ return cert, nil
+}
+
+func (p aDB) Delete(key string) error {
+ return p.intern.Delete([]byte(key))
+}
+
+func (p aDB) Compact() (string, error) {
+ result, err := p.intern.Compact()
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%+v", result), nil
+}
+
+func (p aDB) Items() *pogreb.ItemIterator {
+ return p.intern.Items()
+}
+
+var _ CertDB = &aDB{}
+
+func (p aDB) sync() {
+ for {
+ err := p.intern.Sync()
+ if err != nil {
+ log.Error().Err(err).Msg("Syncing cert database failed")
+ }
+ select {
+ case <-p.ctx.Done():
+ return
+ case <-time.After(p.syncInterval):
+ }
+ }
+}
+
+func New(path string) (CertDB, error) {
+ if path == "" {
+ return nil, fmt.Errorf("path not set")
+ }
+ db, err := pogreb.Open(path, &pogreb.Options{
+ BackgroundSyncInterval: 30 * time.Second,
+ BackgroundCompactionInterval: 6 * time.Hour,
+ FileSystem: fs.OSMMap,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ result := &aDB{
+ ctx: ctx,
+ cancel: cancel,
+ intern: db,
+ syncInterval: 5 * time.Minute,
+ }
+
+ go result.sync()
+
+ return result, nil
+}
diff --git a/server/database/xorm.go b/server/database/xorm.go
deleted file mode 100644
index 217b6d1..0000000
--- a/server/database/xorm.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package database
-
-import (
- "errors"
- "fmt"
-
- "github.com/rs/zerolog/log"
-
- "github.com/go-acme/lego/v4/certificate"
- "xorm.io/xorm"
-
- // register sql driver
- _ "github.com/go-sql-driver/mysql"
- _ "github.com/lib/pq"
- _ "github.com/mattn/go-sqlite3"
-)
-
-var _ CertDB = xDB{}
-
-var ErrNotFound = errors.New("entry not found")
-
-type xDB struct {
- engine *xorm.Engine
-}
-
-func NewXormDB(dbType, dbConn string) (CertDB, error) {
- if !supportedDriver(dbType) {
- return nil, fmt.Errorf("not supported db type '%s'", dbType)
- }
- if dbConn == "" {
- return nil, fmt.Errorf("no db connection provided")
- }
-
- e, err := xorm.NewEngine(dbType, dbConn)
- if err != nil {
- return nil, err
- }
-
- if err := e.Sync2(new(Cert)); err != nil {
- return nil, fmt.Errorf("could not sync db model :%w", err)
- }
-
- return &xDB{
- engine: e,
- }, nil
-}
-
-func (x xDB) Close() error {
- return x.engine.Close()
-}
-
-func (x xDB) Put(domain string, cert *certificate.Resource) error {
- log.Trace().Str("domain", cert.Domain).Msg("inserting cert to db")
-
- domain = integrationTestReplacements(domain)
- c, err := toCert(domain, cert)
- if err != nil {
- return err
- }
-
- sess := x.engine.NewSession()
- if err := sess.Begin(); err != nil {
- return err
- }
- defer sess.Close()
-
- if exist, _ := sess.ID(c.Domain).Exist(new(Cert)); exist {
- if _, err := sess.ID(c.Domain).Update(c); err != nil {
- return err
- }
- } else {
- if _, err = sess.Insert(c); err != nil {
- return err
- }
- }
-
- return sess.Commit()
-}
-
-func (x xDB) Get(domain string) (*certificate.Resource, error) {
- // handle wildcard certs
- if domain[:1] == "." {
- domain = "*" + domain
- }
- domain = integrationTestReplacements(domain)
-
- cert := new(Cert)
- log.Trace().Str("domain", domain).Msg("get cert from db")
- if found, err := x.engine.ID(domain).Get(cert); err != nil {
- return nil, err
- } else if !found {
- return nil, fmt.Errorf("%w: name='%s'", ErrNotFound, domain)
- }
- return cert.Raw(), nil
-}
-
-func (x xDB) Delete(domain string) error {
- // handle wildcard certs
- if domain[:1] == "." {
- domain = "*" + domain
- }
- domain = integrationTestReplacements(domain)
-
- log.Trace().Str("domain", domain).Msg("delete cert from db")
- _, err := x.engine.ID(domain).Delete(new(Cert))
- return err
-}
-
-// Items return al certs from db, if pageSize is 0 it does not use limit
-func (x xDB) Items(page, pageSize int) ([]*Cert, error) {
- // paginated return
- if pageSize > 0 {
- certs := make([]*Cert, 0, pageSize)
- if page >= 0 {
- page = 1
- }
- err := x.engine.Limit(pageSize, (page-1)*pageSize).Find(&certs)
- return certs, err
- }
-
- // return all
- certs := make([]*Cert, 0, 64)
- err := x.engine.Find(&certs)
- return certs, err
-}
-
-// Supported database drivers
-const (
- DriverSqlite = "sqlite3"
- DriverMysql = "mysql"
- DriverPostgres = "postgres"
-)
-
-func supportedDriver(driver string) bool {
- switch driver {
- case DriverMysql, DriverPostgres, DriverSqlite:
- return true
- default:
- return false
- }
-}
-
-// integrationTestReplacements is needed because integration tests use a single domain cert,
-// while production use a wildcard cert
-// TODO: find a better way to handle this
-func integrationTestReplacements(domainKey string) string {
- if domainKey == "*.localhost.mock.directory" {
- return "localhost.mock.directory"
- }
- return domainKey
-}
diff --git a/server/database/xorm_test.go b/server/database/xorm_test.go
deleted file mode 100644
index 50d8a7f..0000000
--- a/server/database/xorm_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package database
-
-import (
- "errors"
- "testing"
-
- "github.com/go-acme/lego/v4/certificate"
- "github.com/stretchr/testify/assert"
- "xorm.io/xorm"
-)
-
-func newTestDB(t *testing.T) *xDB {
- e, err := xorm.NewEngine("sqlite3", ":memory:")
- assert.NoError(t, err)
- assert.NoError(t, e.Sync2(new(Cert)))
- return &xDB{engine: e}
-}
-
-func TestSanitizeWildcardCerts(t *testing.T) {
- certDB := newTestDB(t)
-
- _, err := certDB.Get(".not.found")
- assert.True(t, errors.Is(err, ErrNotFound))
-
- // TODO: cert key and domain mismatch are don not fail hard jet
- // https://codeberg.org/Codeberg/pages-server/src/commit/d8595cee882e53d7f44f1ddc4ef8a1f7b8f31d8d/server/database/interface.go#L64
- //
- // assert.Error(t, certDB.Put(".wildcard.de", &certificate.Resource{
- // Domain: "*.localhost.mock.directory",
- // Certificate: localhost_mock_directory_certificate,
- // }))
-
- // insert new wildcard cert
- assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
- Domain: "*.wildcard.de",
- Certificate: localhost_mock_directory_certificate,
- }))
-
- // update existing cert
- assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
- Domain: "*.wildcard.de",
- Certificate: localhost_mock_directory_certificate,
- }))
-
- c1, err := certDB.Get(".wildcard.de")
- assert.NoError(t, err)
- c2, err := certDB.Get("*.wildcard.de")
- assert.NoError(t, err)
- assert.EqualValues(t, c1, c2)
-}
-
-var localhost_mock_directory_certificate = []byte(`-----BEGIN CERTIFICATE-----
-MIIDczCCAlugAwIBAgIIJyBaXHmLk6gwDQYJKoZIhvcNAQELBQAwKDEmMCQGA1UE
-AxMdUGViYmxlIEludGVybWVkaWF0ZSBDQSA0OWE0ZmIwHhcNMjMwMjEwMDEwOTA2
-WhcNMjgwMjEwMDEwOTA2WjAjMSEwHwYDVQQDExhsb2NhbGhvc3QubW9jay5kaXJl
-Y3RvcnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIU/CjzS7t62Gj
-neEMqvP7sn99ULT7AEUzEfWL05fWG2z714qcUg1hXkZLgdVDgmsCpplyddip7+2t
-ZH/9rLPLMqJphzvOL4CF6jDLbeifETtKyjnt9vUZFnnNWcP3tu8lo8iYSl08qsUI
-Pp/hiEriAQzCDjTbR5m9xUPNPYqxzcS4ALzmmCX9Qfc4CuuhMkdv2G4TT7rylWrA
-SCSRPnGjeA7pCByfNrO/uXbxmzl3sMO3k5sqgMkx1QIHEN412V8+vtx88mt2sM6k
-xjzGZWWKXlRq+oufIKX9KPplhsCjMH6E3VNAzgOPYDqXagtUcGmLWghURltO8Mt2
-zwM6OgjjAgMBAAGjgaUwgaIwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG
-AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBSMQvlJ1755
-sarf8i1KNqj7s5o/aDAfBgNVHSMEGDAWgBTcZcxJMhWdP7MecHCCpNkFURC/YzAj
-BgNVHREEHDAaghhsb2NhbGhvc3QubW9jay5kaXJlY3RvcnkwDQYJKoZIhvcNAQEL
-BQADggEBACcd7TT28OWwzQN2PcH0aG38JX5Wp2iOS/unDCfWjNAztXHW7nBDMxza
-VtyebkJfccexpuVuOsjOX+bww0vtEYIvKX3/GbkhogksBrNkE0sJZtMnZWMR33wa
-YxAy/kJBTmLi02r8fX9ZhwjldStHKBav4USuP7DXZjrgX7LFQhR4LIDrPaYqQRZ8
-ltC3mM9LDQ9rQyIFP5cSBMO3RUAm4I8JyLoOdb/9G2uxjHr7r6eG1g8DmLYSKBsQ
-mWGQDOYgR3cGltDe2yMxM++yHY+b1uhxGOWMrDA1+1k7yI19LL8Ifi2FMovDfu/X
-JxYk1NNNtdctwaYJFenmGQvDaIq1KgE=
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIDUDCCAjigAwIBAgIIKBJ7IIA6W1swDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
-AxMVUGViYmxlIFJvb3QgQ0EgNTdmZjE2MCAXDTIzMDIwOTA1MzMxMloYDzIwNTMw
-MjA5MDUzMzEyWjAoMSYwJAYDVQQDEx1QZWJibGUgSW50ZXJtZWRpYXRlIENBIDQ5
-YTRmYjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOvlqRx8SXQFWo2
-gFCiXxls53eENcyr8+meFyjgnS853eEvplaPxoa2MREKd+ZYxM8EMMfj2XGvR3UI
-aqR5QyLQ9ihuRqvQo4fG91usBHgH+vDbGPdMX8gDmm9HgnmtOVhSKJU+M2jfE1SW
-UuWB9xOa3LMreTXbTNfZEMoXf+GcWZMbx5WPgEga3DvfmV+RsfNvB55eD7YAyZgF
-ZnQ3Dskmnxxlkz0EGgd7rqhFHHNB9jARlL22gITADwoWZidlr3ciM9DISymRKQ0c
-mRN15fQjNWdtuREgJlpXecbYQMGhdTOmFrqdHkveD1o63rGSC4z+s/APV6xIbcRp
-aNpO7L8CAwEAAaOBgzCBgDAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYB
-BQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNxlzEky
-FZ0/sx5wcIKk2QVREL9jMB8GA1UdIwQYMBaAFOqfkm9rebIz4z0SDIKW5edLg5JM
-MA0GCSqGSIb3DQEBCwUAA4IBAQBRG9AHEnyj2fKzVDDbQaKHjAF5jh0gwyHoIeRK
-FkP9mQNSWxhvPWI0tK/E49LopzmVuzSbDd5kZsaii73rAs6f6Rf9W5veo3AFSEad
-stM+Zv0f2vWB38nuvkoCRLXMX+QUeuL65rKxdEpyArBju4L3/PqAZRgMLcrH+ak8
-nvw5RdAq+Km/ZWyJgGikK6cfMmh91YALCDFnoWUWrCjkBaBFKrG59ONV9f0IQX07
-aNfFXFCF5l466xw9dHjw5iaFib10cpY3iq4kyPYIMs6uaewkCtxWKKjiozM4g4w3
-HqwyUyZ52WUJOJ/6G9DJLDtN3fgGR+IAp8BhYd5CqOscnt3h
------END CERTIFICATE-----`)
diff --git a/server/dns/const.go b/server/dns/const.go
new file mode 100644
index 0000000..bb2413b
--- /dev/null
+++ b/server/dns/const.go
@@ -0,0 +1,6 @@
+package dns
+
+import "time"
+
+// lookupCacheTimeout specifies the timeout for the DNS lookup cache.
+var lookupCacheTimeout = 15 * time.Minute
diff --git a/server/dns/dns.go b/server/dns/dns.go
index c11b278..d30f1e0 100644
--- a/server/dns/dns.go
+++ b/server/dns/dns.go
@@ -3,16 +3,10 @@ package dns
import (
"net"
"strings"
- "time"
"codeberg.org/codeberg/pages/server/cache"
)
-// lookupCacheTimeout specifies the timeout for the DNS lookup cache.
-var lookupCacheTimeout = 15 * time.Minute
-
-var defaultPagesRepo = "pages"
-
// GetTargetFromDNS searches for CNAME or TXT entries on the request domain ending with MainDomainSuffix.
// If everything is fine, it returns the target data.
func GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch string, dnsLookupCache cache.SetGetKey) (targetOwner, targetRepo, targetBranch string) {
@@ -30,7 +24,7 @@ func GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch string, dnsLo
names, err := net.LookupTXT(domain)
if err == nil {
for _, name := range names {
- name = strings.TrimSuffix(strings.TrimSpace(name), ".")
+ name = strings.TrimSuffix(name, ".")
if strings.HasSuffix(name, mainDomainSuffix) {
cname = name
break
@@ -52,9 +46,9 @@ func GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch string, dnsLo
targetBranch = cnameParts[len(cnameParts)-3]
}
if targetRepo == "" {
- targetRepo = defaultPagesRepo
+ targetRepo = firstDefaultBranch
}
- if targetBranch == "" && targetRepo != defaultPagesRepo {
+ if targetBranch == "" && targetRepo != firstDefaultBranch {
targetBranch = firstDefaultBranch
}
// if targetBranch is still empty, the caller must find the default branch
diff --git a/server/gitea/cache.go b/server/gitea/cache.go
index af61edf..932ff3c 100644
--- a/server/gitea/cache.go
+++ b/server/gitea/cache.go
@@ -1,115 +1,12 @@
package gitea
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "time"
-
- "github.com/rs/zerolog/log"
-
- "codeberg.org/codeberg/pages/server/cache"
-)
-
-const (
- // defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
- defaultBranchCacheTimeout = 15 * time.Minute
-
- // branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
- // than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
- // picked up faster, while still allowing the content to be cached longer if nothing changes.
- branchExistenceCacheTimeout = 5 * time.Minute
-
- // fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
- // on your available memory.
- // TODO: move as option into cache interface
- fileCacheTimeout = 5 * time.Minute
-
- // fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
- fileCacheSizeLimit = int64(1000 * 1000)
-)
-
type FileResponse struct {
- Exists bool
- IsSymlink bool
- ETag string
- MimeType string
- Body []byte
+ Exists bool
+ ETag []byte
+ MimeType string
+ Body []byte
}
func (f FileResponse) IsEmpty() bool {
return len(f.Body) != 0
}
-
-func (f FileResponse) createHttpResponse(cacheKey string) (header http.Header, statusCode int) {
- header = make(http.Header)
-
- if f.Exists {
- statusCode = http.StatusOK
- } else {
- statusCode = http.StatusNotFound
- }
-
- if f.IsSymlink {
- header.Set(giteaObjectTypeHeader, objTypeSymlink)
- }
- header.Set(ETagHeader, f.ETag)
- header.Set(ContentTypeHeader, f.MimeType)
- header.Set(ContentLengthHeader, fmt.Sprintf("%d", len(f.Body)))
- header.Set(PagesCacheIndicatorHeader, "true")
-
- log.Trace().Msgf("fileCache for %q used", cacheKey)
- return header, statusCode
-}
-
-type BranchTimestamp struct {
- Branch string
- Timestamp time.Time
- notFound bool
-}
-
-type writeCacheReader struct {
- originalReader io.ReadCloser
- buffer *bytes.Buffer
- rileResponse *FileResponse
- cacheKey string
- cache cache.SetGetKey
- hasError bool
-}
-
-func (t *writeCacheReader) Read(p []byte) (n int, err error) {
- n, err = t.originalReader.Read(p)
- if err != nil && err != io.EOF {
- log.Trace().Err(err).Msgf("[cache] original reader for %q has returned an error", t.cacheKey)
- t.hasError = true
- } else if n > 0 {
- _, _ = t.buffer.Write(p[:n])
- }
- return
-}
-
-func (t *writeCacheReader) Close() error {
- if !t.hasError {
- fc := *t.rileResponse
- fc.Body = t.buffer.Bytes()
- _ = t.cache.Set(t.cacheKey, fc, fileCacheTimeout)
- }
- log.Trace().Msgf("cacheReader for %q saved=%t closed", t.cacheKey, !t.hasError)
- return t.originalReader.Close()
-}
-
-func (f FileResponse) CreateCacheReader(r io.ReadCloser, cache cache.SetGetKey, cacheKey string) io.ReadCloser {
- if r == nil || cache == nil || cacheKey == "" {
- log.Error().Msg("could not create CacheReader")
- return nil
- }
-
- return &writeCacheReader{
- originalReader: r,
- buffer: bytes.NewBuffer(make([]byte, 0)),
- rileResponse: &f,
- cache: cache,
- cacheKey: cacheKey,
- }
-}
diff --git a/server/gitea/client.go b/server/gitea/client.go
index f3bda54..16cba84 100644
--- a/server/gitea/client.go
+++ b/server/gitea/client.go
@@ -1,295 +1,142 @@
package gitea
import (
- "bytes"
"errors"
"fmt"
- "io"
- "mime"
- "net/http"
"net/url"
- "path"
- "strconv"
"strings"
"time"
- "code.gitea.io/sdk/gitea"
"github.com/rs/zerolog/log"
+ "github.com/valyala/fasthttp"
+ "github.com/valyala/fastjson"
+)
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/version"
+const (
+ giteaAPIRepos = "/api/v1/repos/"
+ giteaObjectTypeHeader = "X-Gitea-Object-Type"
)
var ErrorNotFound = errors.New("not found")
-const (
- // cache key prefixes
- branchTimestampCacheKeyPrefix = "branchTime"
- defaultBranchCacheKeyPrefix = "defaultBranch"
- rawContentCacheKeyPrefix = "rawContent"
-
- // pages server
- PagesCacheIndicatorHeader = "X-Pages-Cache"
- symlinkReadLimit = 10000
-
- // gitea
- giteaObjectTypeHeader = "X-Gitea-Object-Type"
- objTypeSymlink = "symlink"
-
- // std
- ETagHeader = "ETag"
- ContentTypeHeader = "Content-Type"
- ContentLengthHeader = "Content-Length"
-)
-
type Client struct {
- sdkClient *gitea.Client
- responseCache cache.SetGetKey
-
- giteaRoot string
+ giteaRoot string
+ giteaAPIToken string
+ fastClient *fasthttp.Client
+ infoTimeout time.Duration
+ contentTimeout time.Duration
followSymlinks bool
supportLFS bool
-
- forbiddenMimeTypes map[string]bool
- defaultMimeType string
}
-func NewClient(giteaRoot, giteaAPIToken string, respCache cache.SetGetKey, followSymlinks, supportLFS bool) (*Client, error) {
- rootURL, err := url.Parse(giteaRoot)
- if err != nil {
- return nil, err
+// TODO: once golang v1.19 is min requirement, we can switch to 'JoinPath()' of 'net/url' package
+func joinURL(baseURL string, paths ...string) string {
+ p := make([]string, 0, len(paths))
+ for i := range paths {
+ path := strings.TrimSpace(paths[i])
+ path = strings.Trim(path, "/")
+ if len(path) != 0 {
+ p = append(p, path)
+ }
}
+
+ return baseURL + "/" + strings.Join(p, "/")
+}
+
+func NewClient(giteaRoot, giteaAPIToken string, followSymlinks, supportLFS bool) (*Client, error) {
+ rootURL, err := url.Parse(giteaRoot)
giteaRoot = strings.Trim(rootURL.String(), "/")
- stdClient := http.Client{Timeout: 10 * time.Second}
-
- // TODO: pass down
- var (
- forbiddenMimeTypes map[string]bool
- defaultMimeType string
- )
-
- if forbiddenMimeTypes == nil {
- forbiddenMimeTypes = make(map[string]bool)
- }
- if defaultMimeType == "" {
- defaultMimeType = "application/octet-stream"
- }
-
- sdk, err := gitea.NewClient(
- giteaRoot,
- gitea.SetHTTPClient(&stdClient),
- gitea.SetToken(giteaAPIToken),
- gitea.SetUserAgent("pages-server/"+version.Version),
- )
-
return &Client{
- sdkClient: sdk,
- responseCache: respCache,
-
- giteaRoot: giteaRoot,
+ giteaRoot: giteaRoot,
+ giteaAPIToken: giteaAPIToken,
+ infoTimeout: 5 * time.Second,
+ contentTimeout: 10 * time.Second,
+ fastClient: getFastHTTPClient(),
followSymlinks: followSymlinks,
supportLFS: supportLFS,
-
- forbiddenMimeTypes: forbiddenMimeTypes,
- defaultMimeType: defaultMimeType,
}, err
}
-func (client *Client) ContentWebLink(targetOwner, targetRepo, branch, resource string) string {
- return path.Join(client.giteaRoot, targetOwner, targetRepo, "src/branch", branch, resource)
-}
-
func (client *Client) GiteaRawContent(targetOwner, targetRepo, ref, resource string) ([]byte, error) {
- reader, _, _, err := client.ServeRawContent(targetOwner, targetRepo, ref, resource)
+ resp, err := client.ServeRawContent(targetOwner, targetRepo, ref, resource)
if err != nil {
return nil, err
}
- defer reader.Close()
- return io.ReadAll(reader)
+ return resp.Body(), nil
}
-func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource string) (io.ReadCloser, http.Header, int, error) {
- cacheKey := fmt.Sprintf("%s/%s/%s|%s|%s", rawContentCacheKeyPrefix, targetOwner, targetRepo, ref, resource)
- log := log.With().Str("cache_key", cacheKey).Logger()
-
- // handle if cache entry exist
- if cache, ok := client.responseCache.Get(cacheKey); ok {
- cache := cache.(FileResponse)
- cachedHeader, cachedStatusCode := cache.createHttpResponse(cacheKey)
- // TODO: check against some timestamp mismatch?!?
- if cache.Exists {
- if cache.IsSymlink {
- linkDest := string(cache.Body)
- log.Debug().Msgf("[cache] follow symlink from %q to %q", resource, linkDest)
- return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
- } else {
- log.Debug().Msg("[cache] return bytes")
- return io.NopCloser(bytes.NewReader(cache.Body)), cachedHeader, cachedStatusCode, nil
- }
- } else {
- return nil, cachedHeader, cachedStatusCode, ErrorNotFound
- }
+func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource string) (*fasthttp.Response, error) {
+ var apiURL string
+ if client.supportLFS {
+ apiURL = joinURL(client.giteaRoot, giteaAPIRepos, targetOwner, targetRepo, "media", resource+"?ref="+url.QueryEscape(ref))
+ } else {
+ apiURL = joinURL(client.giteaRoot, giteaAPIRepos, targetOwner, targetRepo, "raw", resource+"?ref="+url.QueryEscape(ref))
}
-
- // not in cache, open reader via gitea api
- reader, resp, err := client.sdkClient.GetFileReader(targetOwner, targetRepo, ref, resource, client.supportLFS)
- if resp != nil {
- switch resp.StatusCode {
- case http.StatusOK:
- // first handle symlinks
- {
- objType := resp.Header.Get(giteaObjectTypeHeader)
- log.Trace().Msgf("server raw content object %q", objType)
- if client.followSymlinks && objType == objTypeSymlink {
- defer reader.Close()
- // read limited chars for symlink
- linkDestBytes, err := io.ReadAll(io.LimitReader(reader, symlinkReadLimit))
- if err != nil {
- return nil, nil, http.StatusInternalServerError, err
- }
- linkDest := strings.TrimSpace(string(linkDestBytes))
-
- // handle relative links
- // we first remove the link from the path, and make a relative join (resolve parent paths like "/../" too)
- linkDest = path.Join(path.Dir(resource), linkDest)
-
- // we store symlink not content to reduce duplicates in cache
- if err := client.responseCache.Set(cacheKey, FileResponse{
- Exists: true,
- IsSymlink: true,
- Body: []byte(linkDest),
- ETag: resp.Header.Get(ETagHeader),
- }, fileCacheTimeout); err != nil {
- log.Error().Err(err).Msg("[cache] error on cache write")
- }
-
- log.Debug().Msgf("follow symlink from %q to %q", resource, linkDest)
- return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
- }
- }
-
- // now we are sure it's content so set the MIME type
- mimeType := client.getMimeTypeByExtension(resource)
- resp.Response.Header.Set(ContentTypeHeader, mimeType)
-
- if !shouldRespBeSavedToCache(resp.Response) {
- return reader, resp.Response.Header, resp.StatusCode, err
- }
-
- // now we write to cache and respond at the same time
- fileResp := FileResponse{
- Exists: true,
- ETag: resp.Header.Get(ETagHeader),
- MimeType: mimeType,
- }
- return fileResp.CreateCacheReader(reader, client.responseCache, cacheKey), resp.Response.Header, resp.StatusCode, nil
-
- case http.StatusNotFound:
- if err := client.responseCache.Set(cacheKey, FileResponse{
- Exists: false,
- ETag: resp.Header.Get(ETagHeader),
- }, fileCacheTimeout); err != nil {
- log.Error().Err(err).Msg("[cache] error on cache write")
- }
-
- return nil, resp.Response.Header, http.StatusNotFound, ErrorNotFound
- default:
- return nil, resp.Response.Header, resp.StatusCode, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
- }
- }
- return nil, nil, http.StatusInternalServerError, err
-}
-
-func (client *Client) GiteaGetRepoBranchTimestamp(repoOwner, repoName, branchName string) (*BranchTimestamp, error) {
- cacheKey := fmt.Sprintf("%s/%s/%s/%s", branchTimestampCacheKeyPrefix, repoOwner, repoName, branchName)
-
- if stamp, ok := client.responseCache.Get(cacheKey); ok && stamp != nil {
- branchTimeStamp := stamp.(*BranchTimestamp)
- if branchTimeStamp.notFound {
- log.Trace().Msgf("[cache] use branch %q not found", branchName)
- return &BranchTimestamp{}, ErrorNotFound
- }
- log.Trace().Msgf("[cache] use branch %q exist", branchName)
- return branchTimeStamp, nil
- }
-
- branch, resp, err := client.sdkClient.GetRepoBranch(repoOwner, repoName, branchName)
+ resp, err := client.do(client.contentTimeout, apiURL)
if err != nil {
- if resp != nil && resp.StatusCode == http.StatusNotFound {
- log.Trace().Msgf("[cache] set cache branch %q not found", branchName)
- if err := client.responseCache.Set(cacheKey, &BranchTimestamp{Branch: branchName, notFound: true}, branchExistenceCacheTimeout); err != nil {
- log.Error().Err(err).Msg("[cache] error on cache write")
- }
- return &BranchTimestamp{}, ErrorNotFound
+ return nil, err
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode() {
+ case fasthttp.StatusOK:
+ objType := string(resp.Header.Peek(giteaObjectTypeHeader))
+ log.Trace().Msgf("server raw content object: %s", objType)
+ if client.followSymlinks && objType == "symlink" {
+ // TODO: limit to 1000 chars if we switched to std
+ linkDest := strings.TrimSpace(string(resp.Body()))
+ log.Debug().Msgf("follow symlink from '%s' to '%s'", resource, linkDest)
+ return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
}
- return &BranchTimestamp{}, err
- }
- if resp.StatusCode != http.StatusOK {
- return &BranchTimestamp{}, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
- }
- stamp := &BranchTimestamp{
- Branch: branch.Name,
- Timestamp: branch.Commit.Timestamp,
- }
+ return resp, nil
- log.Trace().Msgf("set cache branch [%s] exist", branchName)
- if err := client.responseCache.Set(cacheKey, stamp, branchExistenceCacheTimeout); err != nil {
- log.Error().Err(err).Msg("[cache] error on cache write")
+ case fasthttp.StatusNotFound:
+ return nil, ErrorNotFound
+
+ default:
+ return nil, fmt.Errorf("unexpected status code '%d'", resp.StatusCode())
}
- return stamp, nil
+}
+
+func (client *Client) GiteaGetRepoBranchTimestamp(repoOwner, repoName, branchName string) (time.Time, error) {
+ url := joinURL(client.giteaRoot, giteaAPIRepos, repoOwner, repoName, "branches", branchName)
+ res, err := client.do(client.infoTimeout, url)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if res.StatusCode() != fasthttp.StatusOK {
+ return time.Time{}, fmt.Errorf("unexpected status code '%d'", res.StatusCode())
+ }
+ return time.Parse(time.RFC3339, fastjson.GetString(res.Body(), "commit", "timestamp"))
}
func (client *Client) GiteaGetRepoDefaultBranch(repoOwner, repoName string) (string, error) {
- cacheKey := fmt.Sprintf("%s/%s/%s", defaultBranchCacheKeyPrefix, repoOwner, repoName)
-
- if branch, ok := client.responseCache.Get(cacheKey); ok && branch != nil {
- return branch.(string), nil
- }
-
- repo, resp, err := client.sdkClient.GetRepo(repoOwner, repoName)
+ url := joinURL(client.giteaRoot, giteaAPIRepos, repoOwner, repoName)
+ res, err := client.do(client.infoTimeout, url)
if err != nil {
return "", err
}
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
+ if res.StatusCode() != fasthttp.StatusOK {
+ return "", fmt.Errorf("unexpected status code '%d'", res.StatusCode())
}
-
- branch := repo.DefaultBranch
- if err := client.responseCache.Set(cacheKey, branch, defaultBranchCacheTimeout); err != nil {
- log.Error().Err(err).Msg("[cache] error on cache write")
- }
- return branch, nil
+ return fastjson.GetString(res.Body(), "default_branch"), nil
}
-func (client *Client) getMimeTypeByExtension(resource string) string {
- mimeType := mime.TypeByExtension(path.Ext(resource))
- mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
- if client.forbiddenMimeTypes[mimeTypeSplit[0]] || mimeType == "" {
- mimeType = client.defaultMimeType
- }
- log.Trace().Msgf("probe mime of %q is %q", resource, mimeType)
- return mimeType
-}
-
-func shouldRespBeSavedToCache(resp *http.Response) bool {
- if resp == nil {
- return false
- }
-
- contentLengthRaw := resp.Header.Get(ContentLengthHeader)
- if contentLengthRaw == "" {
- return false
- }
-
- contentLength, err := strconv.ParseInt(contentLengthRaw, 10, 64)
- if err != nil {
- log.Error().Err(err).Msg("could not parse content length")
- }
-
- // if content to big or could not be determined we not cache it
- return contentLength > 0 && contentLength < fileCacheSizeLimit
+func (client *Client) do(timeout time.Duration, url string) (*fasthttp.Response, error) {
+ req := fasthttp.AcquireRequest()
+
+ req.SetRequestURI(url)
+ req.Header.Set(fasthttp.HeaderAuthorization, "token "+client.giteaAPIToken)
+ res := fasthttp.AcquireResponse()
+
+ err := client.fastClient.DoTimeout(req, res, timeout)
+
+ return res, err
}
diff --git a/server/gitea/client_test.go b/server/gitea/client_test.go
new file mode 100644
index 0000000..7dbad68
--- /dev/null
+++ b/server/gitea/client_test.go
@@ -0,0 +1,23 @@
+package gitea
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestJoinURL(t *testing.T) {
+ baseURL := ""
+ assert.EqualValues(t, "/", joinURL(baseURL))
+ assert.EqualValues(t, "/", joinURL(baseURL, "", ""))
+
+ baseURL = "http://wwow.url.com"
+ assert.EqualValues(t, "http://wwow.url.com/a/b/c/d", joinURL(baseURL, "a", "b/c/", "d"))
+
+ baseURL = "http://wow.url.com/subpath/2"
+ assert.EqualValues(t, "http://wow.url.com/subpath/2/content.pdf", joinURL(baseURL, "/content.pdf"))
+ assert.EqualValues(t, "http://wow.url.com/subpath/2/wonderful.jpg", joinURL(baseURL, "wonderful.jpg"))
+ assert.EqualValues(t, "http://wow.url.com/subpath/2/raw/wonderful.jpg?ref=main", joinURL(baseURL, "raw", "wonderful.jpg"+"?ref="+url.QueryEscape("main")))
+ assert.EqualValues(t, "http://wow.url.com/subpath/2/raw/wonderful.jpg%3Fref=main", joinURL(baseURL, "raw", "wonderful.jpg%3Fref=main"))
+}
diff --git a/server/gitea/fasthttp.go b/server/gitea/fasthttp.go
new file mode 100644
index 0000000..4ff0f4a
--- /dev/null
+++ b/server/gitea/fasthttp.go
@@ -0,0 +1,15 @@
+package gitea
+
+import (
+ "time"
+
+ "github.com/valyala/fasthttp"
+)
+
+func getFastHTTPClient() *fasthttp.Client {
+ return &fasthttp.Client{
+ MaxConnDuration: 60 * time.Second,
+ MaxConnWaitTimeout: 1000 * time.Millisecond,
+ MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea!
+ }
+}
diff --git a/server/handler.go b/server/handler.go
new file mode 100644
index 0000000..eaa2feb
--- /dev/null
+++ b/server/handler.go
@@ -0,0 +1,314 @@
+package server
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "github.com/valyala/fasthttp"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/dns"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+ "codeberg.org/codeberg/pages/server/utils"
+ "codeberg.org/codeberg/pages/server/version"
+)
+
+// Handler handles a single HTTP request to the web server.
+func Handler(mainDomainSuffix, rawDomain []byte,
+ giteaClient *gitea.Client,
+ giteaRoot, rawInfoPage string,
+ blacklistedPaths, allowedCorsDomains [][]byte,
+ dnsLookupCache, canonicalDomainCache, branchTimestampCache, fileResponseCache cache.SetGetKey,
+ defaultBranches []string,
+) func(ctx *fasthttp.RequestCtx) {
+ return func(ctx *fasthttp.RequestCtx) {
+ log := log.With().Strs("Handler", []string{string(ctx.Request.Host()), string(ctx.Request.Header.RequestURI())}).Logger()
+
+ ctx.Response.Header.Set("Server", "CodebergPages/"+version.Version)
+
+ // Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
+ ctx.Response.Header.Set("Referrer-Policy", "strict-origin-when-cross-origin")
+
+ // Enable browser caching for up to 10 minutes
+ ctx.Response.Header.Set("Cache-Control", "public, max-age=600")
+
+ trimmedHost := utils.TrimHostPort(ctx.Request.Host())
+
+ // Add HSTS for RawDomain and MainDomainSuffix
+ if hsts := GetHSTSHeader(trimmedHost, mainDomainSuffix, rawDomain); hsts != "" {
+ ctx.Response.Header.Set("Strict-Transport-Security", hsts)
+ }
+
+ // Block all methods not required for static pages
+ if !ctx.IsGet() && !ctx.IsHead() && !ctx.IsOptions() {
+ ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
+ ctx.Error("Method not allowed", fasthttp.StatusMethodNotAllowed)
+ return
+ }
+
+ // Block blacklisted paths (like ACME challenges)
+ for _, blacklistedPath := range blacklistedPaths {
+ if bytes.HasPrefix(ctx.Path(), blacklistedPath) {
+ html.ReturnErrorPage(ctx, fasthttp.StatusForbidden)
+ return
+ }
+ }
+
+ // Allow CORS for specified domains
+ allowCors := false
+ for _, allowedCorsDomain := range allowedCorsDomains {
+ if bytes.Equal(trimmedHost, allowedCorsDomain) {
+ allowCors = true
+ break
+ }
+ }
+ if allowCors {
+ ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
+ ctx.Response.Header.Set("Access-Control-Allow-Methods", "GET, HEAD")
+ }
+ ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
+ if ctx.IsOptions() {
+ ctx.Response.Header.SetStatusCode(fasthttp.StatusNoContent)
+ return
+ }
+
+ // Prepare request information to Gitea
+ var targetOwner, targetRepo, targetBranch, targetPath string
+ targetOptions := &upstream.Options{
+ TryIndexPages: true,
+ }
+
+ // tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty, it will
+ // also disallow search indexing and add a Link header to the canonical URL.
+ tryBranch := func(log zerolog.Logger, repo, branch string, path []string, canonicalLink string) bool {
+ if repo == "" {
+ log.Debug().Msg("tryBranch: repo is empty")
+ return false
+ }
+
+ // Replace "~" to "/" so we can access branch that contains slash character
+ // Branch name cannot contain "~" so doing this is okay
+ branch = strings.ReplaceAll(branch, "~", "/")
+
+ // Check if the branch exists, otherwise treat it as a file path
+ branchTimestampResult := upstream.GetBranchTimestamp(giteaClient, targetOwner, repo, branch, branchTimestampCache)
+ if branchTimestampResult == nil {
+ log.Debug().Msg("tryBranch: branch doesn't exist")
+ return false
+ }
+
+ // Branch exists, use it
+ targetRepo = repo
+ targetPath = strings.Trim(strings.Join(path, "/"), "/")
+ targetBranch = branchTimestampResult.Branch
+
+ targetOptions.BranchTimestamp = branchTimestampResult.Timestamp
+
+ if canonicalLink != "" {
+ // Hide from search machines & add canonical link
+ ctx.Response.Header.Set("X-Robots-Tag", "noarchive, noindex")
+ ctx.Response.Header.Set("Link",
+ strings.NewReplacer("%b", targetBranch, "%p", targetPath).Replace(canonicalLink)+
+ "; rel=\"canonical\"",
+ )
+ }
+
+ log.Debug().Msg("tryBranch: true")
+ return true
+ }
+
+ log.Debug().Msg("Preparing")
+ if rawDomain != nil && bytes.Equal(trimmedHost, rawDomain) {
+ // Serve raw content from RawDomain
+ log.Debug().Msg("Serving raw domain")
+
+ targetOptions.TryIndexPages = false
+ if targetOptions.ForbiddenMimeTypes == nil {
+ targetOptions.ForbiddenMimeTypes = make(map[string]bool)
+ }
+ targetOptions.ForbiddenMimeTypes["text/html"] = true
+ targetOptions.DefaultMimeType = "text/plain; charset=utf-8"
+
+ pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
+ if len(pathElements) < 2 {
+ // https://{RawDomain}/{owner}/{repo}[/@{branch}]/{path} is required
+ ctx.Redirect(rawInfoPage, fasthttp.StatusTemporaryRedirect)
+ return
+ }
+ targetOwner = pathElements[0]
+ targetRepo = pathElements[1]
+
+ // raw.codeberg.org/example/myrepo/@main/index.html
+ if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
+ log.Debug().Msg("Preparing raw domain, now trying with specified branch")
+ if tryBranch(log,
+ targetRepo, pathElements[2][1:], pathElements[3:],
+ giteaRoot+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
+ ) {
+ log.Info().Msg("tryBranch, now trying upstream 1")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ return
+ }
+ log.Warn().Msg("Path missed a branch")
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ return
+ }
+
+ log.Debug().Msg("Preparing raw domain, now trying with default branch")
+ tryBranch(log,
+ targetRepo, "", pathElements[2:],
+ giteaRoot+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
+ )
+ log.Info().Msg("tryBranch, now trying upstream 2")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ return
+
+ } else if bytes.HasSuffix(trimmedHost, mainDomainSuffix) {
+ // Serve pages from subdomains of MainDomainSuffix
+ log.Info().Msg("Serve pages from main domain suffix")
+
+ pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
+ targetOwner = string(bytes.TrimSuffix(trimmedHost, mainDomainSuffix))
+ targetRepo = pathElements[0]
+ targetPath = strings.Trim(strings.Join(pathElements[1:], "/"), "/")
+
+ if targetOwner == "www" {
+ // www.codeberg.page redirects to codeberg.page // TODO: rm hardcoded - use cname?
+ ctx.Redirect("https://"+string(mainDomainSuffix[1:])+string(ctx.Path()), fasthttp.StatusPermanentRedirect)
+ return
+ }
+
+ // Check if the first directory is a repo with the second directory as a branch
+ // example.codeberg.page/myrepo/@main/index.html
+ if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
+ if targetRepo == "pages" {
+ // example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
+ ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect)
+ return
+ }
+
+ log.Debug().Msg("Preparing main domain, now trying with specified repo & branch")
+ if tryBranch(log,
+ pathElements[0], pathElements[1][1:], pathElements[2:],
+ "/"+pathElements[0]+"/%p",
+ ) {
+ log.Info().Msg("tryBranch, now trying upstream 3")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ } else {
+ log.Warn().Msg("tryBranch: upstream 3 failed")
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ }
+ return
+ }
+
+ // Check if the first directory is a branch for the "pages" repo
+ // example.codeberg.page/@main/index.html
+ if strings.HasPrefix(pathElements[0], "@") {
+ log.Debug().Msg("Preparing main domain, now trying with specified branch")
+ if tryBranch(log,
+ "pages", pathElements[0][1:], pathElements[1:], "/%p") {
+ log.Info().Msg("tryBranch, now trying upstream 4")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ } else {
+ log.Warn().Msg("tryBranch: upstream 4 failed")
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ }
+ return
+ }
+
+ for _, branch := range defaultBranches {
+ // Check if the first directory is a repo with a default branch
+ // example.codeberg.page/myrepo/index.html
+ // example.codeberg.page/{PAGES_BRANCHE}/... is not allowed here.
+ log.Debug().Msg("main domain preparations, now trying with specified repo")
+ if pathElements[0] != branch && tryBranch(log,
+ pathElements[0], branch, pathElements[1:], "") {
+ log.Info().Msg("tryBranch, now trying upstream 5")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ return
+ }
+
+ // Try to use the "pages" repo on its default branch
+ // example.codeberg.page/index.html
+ log.Debug().Msg("main domain preparations, now trying with default repo/branch")
+ if tryBranch(log,
+ branch, "", pathElements, "") {
+ log.Info().Msg("tryBranch, now trying upstream 6")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ return
+ }
+ }
+
+ // Couldn't find a valid repo/branch
+
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ return
+ } else {
+ trimmedHostStr := string(trimmedHost)
+
+ // Serve pages from external domains
+ targetOwner, targetRepo, targetBranch = dns.GetTargetFromDNS(trimmedHostStr, string(mainDomainSuffix), defaultBranches[0], dnsLookupCache)
+ if targetOwner == "" {
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ return
+ }
+
+ pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
+ canonicalLink := ""
+ if strings.HasPrefix(pathElements[0], "@") {
+ targetBranch = pathElements[0][1:]
+ pathElements = pathElements[1:]
+ canonicalLink = "/%p"
+ }
+
+ // Try to use the given repo on the given branch or the default branch
+ log.Debug().Msg("Preparing custom domain, now trying with details from DNS")
+ if tryBranch(log,
+ targetRepo, targetBranch, pathElements, canonicalLink) {
+ canonicalDomain, valid := upstream.CheckCanonicalDomain(giteaClient, targetOwner, targetRepo, targetBranch, trimmedHostStr, string(mainDomainSuffix), canonicalDomainCache)
+ if !valid {
+ log.Warn().Msg("Custom domains, domain from DNS isn't valid/canonical")
+ html.ReturnErrorPage(ctx, fasthttp.StatusMisdirectedRequest)
+ return
+ } else if canonicalDomain != trimmedHostStr {
+ // only redirect if the target is also a codeberg page!
+ targetOwner, _, _ = dns.GetTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0], string(mainDomainSuffix), defaultBranches[0], dnsLookupCache)
+ if targetOwner != "" {
+ ctx.Redirect("https://"+canonicalDomain+string(ctx.RequestURI()), fasthttp.StatusTemporaryRedirect)
+ return
+ }
+
+ log.Warn().Msg("Custom domains, targetOwner from DNS is empty")
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ return
+ }
+
+ log.Info().Msg("tryBranch, now trying upstream 7 %s")
+ tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost,
+ targetOptions, targetOwner, targetRepo, targetBranch, targetPath,
+ canonicalDomainCache, branchTimestampCache, fileResponseCache)
+ return
+ }
+
+ log.Warn().Msg("Couldn't handle request, none of the options succeed")
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
+ return
+ }
+ }
+}
diff --git a/server/handler/handler.go b/server/handler/handler.go
deleted file mode 100644
index 7da5d39..0000000
--- a/server/handler/handler.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package handler
-
-import (
- "net/http"
- "strings"
-
- "github.com/rs/zerolog/log"
-
- "codeberg.org/codeberg/pages/html"
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
-)
-
-const (
- headerAccessControlAllowOrigin = "Access-Control-Allow-Origin"
- headerAccessControlAllowMethods = "Access-Control-Allow-Methods"
- defaultPagesRepo = "pages"
-)
-
-// Handler handles a single HTTP request to the web server.
-func Handler(mainDomainSuffix, rawDomain string,
- giteaClient *gitea.Client,
- blacklistedPaths, allowedCorsDomains []string,
- defaultPagesBranches []string,
- dnsLookupCache, canonicalDomainCache, redirectsCache cache.SetGetKey,
-) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- log := log.With().Strs("Handler", []string{req.Host, req.RequestURI}).Logger()
- ctx := context.New(w, req)
-
- ctx.RespWriter.Header().Set("Server", "pages-server")
-
- // Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
- ctx.RespWriter.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
-
- // Enable browser caching for up to 10 minutes
- ctx.RespWriter.Header().Set("Cache-Control", "public, max-age=600")
-
- trimmedHost := ctx.TrimHostPort()
-
- // Add HSTS for RawDomain and MainDomainSuffix
- if hsts := getHSTSHeader(trimmedHost, mainDomainSuffix, rawDomain); hsts != "" {
- ctx.RespWriter.Header().Set("Strict-Transport-Security", hsts)
- }
-
- // Handle all http methods
- ctx.RespWriter.Header().Set("Allow", http.MethodGet+", "+http.MethodHead+", "+http.MethodOptions)
- switch ctx.Req.Method {
- case http.MethodOptions:
- // return Allow header
- ctx.RespWriter.WriteHeader(http.StatusNoContent)
- return
- case http.MethodGet,
- http.MethodHead:
- // end switch case and handle allowed requests
- break
- default:
- // Block all methods not required for static pages
- ctx.String("Method not allowed", http.StatusMethodNotAllowed)
- return
- }
-
- // Block blacklisted paths (like ACME challenges)
- for _, blacklistedPath := range blacklistedPaths {
- if strings.HasPrefix(ctx.Path(), blacklistedPath) {
- html.ReturnErrorPage(ctx, "requested path is blacklisted", http.StatusForbidden)
- return
- }
- }
-
- // Allow CORS for specified domains
- allowCors := false
- for _, allowedCorsDomain := range allowedCorsDomains {
- if strings.EqualFold(trimmedHost, allowedCorsDomain) {
- allowCors = true
- break
- }
- }
- if allowCors {
- ctx.RespWriter.Header().Set(headerAccessControlAllowOrigin, "*")
- ctx.RespWriter.Header().Set(headerAccessControlAllowMethods, http.MethodGet+", "+http.MethodHead)
- }
-
- // Prepare request information to Gitea
- pathElements := strings.Split(strings.Trim(ctx.Path(), "/"), "/")
-
- if rawDomain != "" && strings.EqualFold(trimmedHost, rawDomain) {
- log.Debug().Msg("raw domain request detected")
- handleRaw(log, ctx, giteaClient,
- mainDomainSuffix,
- trimmedHost,
- pathElements,
- canonicalDomainCache, redirectsCache)
- } else if strings.HasSuffix(trimmedHost, mainDomainSuffix) {
- log.Debug().Msg("subdomain request detected")
- handleSubDomain(log, ctx, giteaClient,
- mainDomainSuffix,
- defaultPagesBranches,
- trimmedHost,
- pathElements,
- canonicalDomainCache, redirectsCache)
- } else {
- log.Debug().Msg("custom domain request detected")
- handleCustomDomain(log, ctx, giteaClient,
- mainDomainSuffix,
- trimmedHost,
- pathElements,
- defaultPagesBranches[0],
- dnsLookupCache, canonicalDomainCache, redirectsCache)
- }
- }
-}
diff --git a/server/handler/handler_custom_domain.go b/server/handler/handler_custom_domain.go
deleted file mode 100644
index 8742be4..0000000
--- a/server/handler/handler_custom_domain.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package handler
-
-import (
- "net/http"
- "path"
- "strings"
-
- "codeberg.org/codeberg/pages/html"
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/dns"
- "codeberg.org/codeberg/pages/server/gitea"
- "codeberg.org/codeberg/pages/server/upstream"
- "github.com/rs/zerolog"
-)
-
-func handleCustomDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
- mainDomainSuffix string,
- trimmedHost string,
- pathElements []string,
- firstDefaultBranch string,
- dnsLookupCache, canonicalDomainCache, redirectsCache cache.SetGetKey,
-) {
- // Serve pages from custom domains
- targetOwner, targetRepo, targetBranch := dns.GetTargetFromDNS(trimmedHost, mainDomainSuffix, firstDefaultBranch, dnsLookupCache)
- if targetOwner == "" {
- html.ReturnErrorPage(ctx,
- "could not obtain repo owner from custom domain",
- http.StatusFailedDependency)
- return
- }
-
- pathParts := pathElements
- canonicalLink := false
- if strings.HasPrefix(pathElements[0], "@") {
- targetBranch = pathElements[0][1:]
- pathParts = pathElements[1:]
- canonicalLink = true
- }
-
- // Try to use the given repo on the given branch or the default branch
- log.Debug().Msg("custom domain preparations, now trying with details from DNS")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: targetRepo,
- TargetBranch: targetBranch,
- TargetPath: path.Join(pathParts...),
- }, canonicalLink); works {
- canonicalDomain, valid := targetOpt.CheckCanonicalDomain(giteaClient, trimmedHost, mainDomainSuffix, canonicalDomainCache)
- if !valid {
- html.ReturnErrorPage(ctx, "domain not specified in .domains file", http.StatusMisdirectedRequest)
- return
- } else if canonicalDomain != trimmedHost {
- // only redirect if the target is also a codeberg page!
- targetOwner, _, _ = dns.GetTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix, firstDefaultBranch, dnsLookupCache)
- if targetOwner != "" {
- ctx.Redirect("https://"+canonicalDomain+"/"+targetOpt.TargetPath, http.StatusTemporaryRedirect)
- return
- }
-
- html.ReturnErrorPage(ctx, "target is no codeberg page", http.StatusFailedDependency)
- return
- }
-
- log.Debug().Msg("tryBranch, now trying upstream 7")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- return
- }
-
- html.ReturnErrorPage(ctx, "could not find target for custom domain", http.StatusFailedDependency)
-}
diff --git a/server/handler/handler_raw_domain.go b/server/handler/handler_raw_domain.go
deleted file mode 100644
index caa8209..0000000
--- a/server/handler/handler_raw_domain.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package handler
-
-import (
- "fmt"
- "net/http"
- "path"
- "strings"
-
- "github.com/rs/zerolog"
-
- "codeberg.org/codeberg/pages/html"
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
- "codeberg.org/codeberg/pages/server/upstream"
-)
-
-func handleRaw(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
- mainDomainSuffix string,
- trimmedHost string,
- pathElements []string,
- canonicalDomainCache, redirectsCache cache.SetGetKey,
-) {
- // Serve raw content from RawDomain
- log.Debug().Msg("raw domain")
-
- if len(pathElements) < 2 {
- html.ReturnErrorPage(
- ctx,
- "a url in the form of https://{domain}/{owner}/{repo}[/@{branch}]/{path} is required",
- http.StatusBadRequest,
- )
-
- return
- }
-
- // raw.codeberg.org/example/myrepo/@main/index.html
- if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
- log.Debug().Msg("raw domain preparations, now trying with specified branch")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- ServeRaw: true,
- TargetOwner: pathElements[0],
- TargetRepo: pathElements[1],
- TargetBranch: pathElements[2][1:],
- TargetPath: path.Join(pathElements[3:]...),
- }, true); works {
- log.Trace().Msg("tryUpstream: serve raw domain with specified branch")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- return
- }
- log.Debug().Msg("missing branch info")
- html.ReturnErrorPage(ctx, "missing branch info", http.StatusFailedDependency)
- return
- }
-
- log.Debug().Msg("raw domain preparations, now trying with default branch")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: false,
- ServeRaw: true,
- TargetOwner: pathElements[0],
- TargetRepo: pathElements[1],
- TargetPath: path.Join(pathElements[2:]...),
- }, true); works {
- log.Trace().Msg("tryUpstream: serve raw domain with default branch")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- } else {
- html.ReturnErrorPage(ctx,
- fmt.Sprintf("raw domain could not find repo %s/%s or repo is empty", targetOpt.TargetOwner, targetOpt.TargetRepo),
- http.StatusNotFound)
- }
-}
diff --git a/server/handler/handler_sub_domain.go b/server/handler/handler_sub_domain.go
deleted file mode 100644
index 6c14393..0000000
--- a/server/handler/handler_sub_domain.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package handler
-
-import (
- "fmt"
- "net/http"
- "path"
- "strings"
-
- "github.com/rs/zerolog"
- "golang.org/x/exp/slices"
-
- "codeberg.org/codeberg/pages/html"
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
- "codeberg.org/codeberg/pages/server/upstream"
-)
-
-func handleSubDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
- mainDomainSuffix string,
- defaultPagesBranches []string,
- trimmedHost string,
- pathElements []string,
- canonicalDomainCache, redirectsCache cache.SetGetKey,
-) {
- // Serve pages from subdomains of MainDomainSuffix
- log.Debug().Msg("main domain suffix")
-
- targetOwner := strings.TrimSuffix(trimmedHost, mainDomainSuffix)
- targetRepo := pathElements[0]
-
- if targetOwner == "www" {
- // www.codeberg.page redirects to codeberg.page // TODO: rm hardcoded - use cname?
- ctx.Redirect("https://"+mainDomainSuffix[1:]+ctx.Path(), http.StatusPermanentRedirect)
- return
- }
-
- // Check if the first directory is a repo with the second directory as a branch
- // example.codeberg.page/myrepo/@main/index.html
- if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
- if targetRepo == defaultPagesRepo {
- // example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
- ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
- return
- }
-
- log.Debug().Msg("main domain preparations, now trying with specified repo & branch")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: pathElements[0],
- TargetBranch: pathElements[1][1:],
- TargetPath: path.Join(pathElements[2:]...),
- }, true); works {
- log.Trace().Msg("tryUpstream: serve with specified repo and branch")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- } else {
- html.ReturnErrorPage(
- ctx,
- formatSetBranchNotFoundMessage(pathElements[1][1:], targetOwner, pathElements[0]),
- http.StatusFailedDependency,
- )
- }
- return
- }
-
- // Check if the first directory is a branch for the defaultPagesRepo
- // example.codeberg.page/@main/index.html
- if strings.HasPrefix(pathElements[0], "@") {
- targetBranch := pathElements[0][1:]
-
- // if the default pages branch can be determined exactly, it does not need to be set
- if len(defaultPagesBranches) == 1 && slices.Contains(defaultPagesBranches, targetBranch) {
- // example.codeberg.org/@pages/... redirects to example.codeberg.org/...
- ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
- return
- }
-
- log.Debug().Msg("main domain preparations, now trying with specified branch")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: defaultPagesRepo,
- TargetBranch: targetBranch,
- TargetPath: path.Join(pathElements[1:]...),
- }, true); works {
- log.Trace().Msg("tryUpstream: serve default pages repo with specified branch")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- } else {
- html.ReturnErrorPage(
- ctx,
- formatSetBranchNotFoundMessage(targetBranch, targetOwner, defaultPagesRepo),
- http.StatusFailedDependency,
- )
- }
- return
- }
-
- for _, defaultPagesBranch := range defaultPagesBranches {
- // Check if the first directory is a repo with a default pages branch
- // example.codeberg.page/myrepo/index.html
- // example.codeberg.page/{PAGES_BRANCHE}/... is not allowed here.
- log.Debug().Msg("main domain preparations, now trying with specified repo")
- if pathElements[0] != defaultPagesBranch {
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: pathElements[0],
- TargetBranch: defaultPagesBranch,
- TargetPath: path.Join(pathElements[1:]...),
- }, false); works {
- log.Debug().Msg("tryBranch, now trying upstream 5")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- return
- }
- }
-
- // Try to use the defaultPagesRepo on an default pages branch
- // example.codeberg.page/index.html
- log.Debug().Msg("main domain preparations, now trying with default repo")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: defaultPagesRepo,
- TargetBranch: defaultPagesBranch,
- TargetPath: path.Join(pathElements...),
- }, false); works {
- log.Debug().Msg("tryBranch, now trying upstream 6")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- return
- }
- }
-
- // Try to use the defaultPagesRepo on its default branch
- // example.codeberg.page/index.html
- log.Debug().Msg("main domain preparations, now trying with default repo/branch")
- if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
- TryIndexPages: true,
- TargetOwner: targetOwner,
- TargetRepo: defaultPagesRepo,
- TargetPath: path.Join(pathElements...),
- }, false); works {
- log.Debug().Msg("tryBranch, now trying upstream 6")
- tryUpstream(ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
- return
- }
-
- // Couldn't find a valid repo/branch
- html.ReturnErrorPage(ctx,
- fmt.Sprintf("could not find a valid repository or branch for repository: %s", targetRepo),
- http.StatusNotFound)
-}
-
-func formatSetBranchNotFoundMessage(branch, owner, repo string) string {
- return fmt.Sprintf("explicitly set branch %q does not exist at %s/%s", branch, owner, repo)
-}
diff --git a/server/handler/handler_test.go b/server/handler/handler_test.go
deleted file mode 100644
index d04ebda..0000000
--- a/server/handler/handler_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package handler
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/gitea"
- "github.com/rs/zerolog/log"
-)
-
-func TestHandlerPerformance(t *testing.T) {
- giteaClient, _ := gitea.NewClient("https://codeberg.org", "", cache.NewKeyValueCache(), false, false)
- testHandler := Handler(
- "codeberg.page", "raw.codeberg.org",
- giteaClient,
- []string{"/.well-known/acme-challenge/"},
- []string{"raw.codeberg.org", "fonts.codeberg.org", "design.codeberg.org"},
- []string{"pages"},
- cache.NewKeyValueCache(),
- cache.NewKeyValueCache(),
- cache.NewKeyValueCache(),
- )
-
- testCase := func(uri string, status int) {
- t.Run(uri, func(t *testing.T) {
- req := httptest.NewRequest("GET", uri, http.NoBody)
- w := httptest.NewRecorder()
-
- log.Printf("Start: %v\n", time.Now())
- start := time.Now()
- testHandler(w, req)
- end := time.Now()
- log.Printf("Done: %v\n", time.Now())
-
- resp := w.Result()
-
- if resp.StatusCode != status {
- t.Errorf("request failed with status code %d", resp.StatusCode)
- } else {
- t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
- }
- })
- }
-
- testCase("https://mondstern.codeberg.page/", 404) // TODO: expect 200
- testCase("https://codeberg.page/", 404) // TODO: expect 200
- testCase("https://example.momar.xyz/", 424)
-}
diff --git a/server/handler/hsts.go b/server/handler/hsts.go
deleted file mode 100644
index 1ab73ae..0000000
--- a/server/handler/hsts.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package handler
-
-import (
- "strings"
-)
-
-// getHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
-// string for custom domains.
-func getHSTSHeader(host, mainDomainSuffix, rawDomain string) string {
- if strings.HasSuffix(host, mainDomainSuffix) || strings.EqualFold(host, rawDomain) {
- return "max-age=63072000; includeSubdomains; preload"
- } else {
- return ""
- }
-}
diff --git a/server/handler/try.go b/server/handler/try.go
deleted file mode 100644
index 838ae27..0000000
--- a/server/handler/try.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package handler
-
-import (
- "net/http"
- "strings"
-
- "github.com/rs/zerolog"
-
- "codeberg.org/codeberg/pages/html"
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
- "codeberg.org/codeberg/pages/server/upstream"
-)
-
-// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
-func tryUpstream(ctx *context.Context, giteaClient *gitea.Client,
- mainDomainSuffix, trimmedHost string,
- options *upstream.Options,
- canonicalDomainCache cache.SetGetKey,
- redirectsCache cache.SetGetKey,
-) {
- // check if a canonical domain exists on a request on MainDomain
- if strings.HasSuffix(trimmedHost, mainDomainSuffix) && !options.ServeRaw {
- canonicalDomain, _ := options.CheckCanonicalDomain(giteaClient, "", mainDomainSuffix, canonicalDomainCache)
- if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix) {
- canonicalPath := ctx.Req.RequestURI
- if options.TargetRepo != defaultPagesRepo {
- path := strings.SplitN(canonicalPath, "/", 3)
- if len(path) >= 3 {
- canonicalPath = "/" + path[2]
- }
- }
- ctx.Redirect("https://"+canonicalDomain+canonicalPath, http.StatusTemporaryRedirect)
- return
- }
- }
-
- // Add host for debugging.
- options.Host = trimmedHost
-
- // Try to request the file from the Gitea API
- if !options.Upstream(ctx, giteaClient, redirectsCache) {
- html.ReturnErrorPage(ctx, "gitea client failed", ctx.StatusCode)
- }
-}
-
-// tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty,
-// it will also disallow search indexing and add a Link header to the canonical URL.
-func tryBranch(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
- targetOptions *upstream.Options, canonicalLink bool,
-) (*upstream.Options, bool) {
- if targetOptions.TargetOwner == "" || targetOptions.TargetRepo == "" {
- log.Debug().Msg("tryBranch: owner or repo is empty")
- return nil, false
- }
-
- // Replace "~" to "/" so we can access branch that contains slash character
- // Branch name cannot contain "~" so doing this is okay
- targetOptions.TargetBranch = strings.ReplaceAll(targetOptions.TargetBranch, "~", "/")
-
- // Check if the branch exists, otherwise treat it as a file path
- branchExist, _ := targetOptions.GetBranchTimestamp(giteaClient)
- if !branchExist {
- log.Debug().Msg("tryBranch: branch doesn't exist")
- return nil, false
- }
-
- if canonicalLink {
- // Hide from search machines & add canonical link
- ctx.RespWriter.Header().Set("X-Robots-Tag", "noarchive, noindex")
- ctx.RespWriter.Header().Set("Link", targetOptions.ContentWebLink(giteaClient)+"; rel=\"canonical\"")
- }
-
- log.Debug().Msg("tryBranch: true")
- return targetOptions, true
-}
diff --git a/server/handler_test.go b/server/handler_test.go
new file mode 100644
index 0000000..a731ebf
--- /dev/null
+++ b/server/handler_test.go
@@ -0,0 +1,52 @@
+package server
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/valyala/fasthttp"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+func TestHandlerPerformance(t *testing.T) {
+ giteaRoot := "https://codeberg.org"
+ giteaClient, _ := gitea.NewClient(giteaRoot, "", false, false)
+ testHandler := Handler(
+ []byte("codeberg.page"), []byte("raw.codeberg.org"),
+ giteaClient,
+ giteaRoot, "https://docs.codeberg.org/pages/raw-content/",
+ [][]byte{[]byte("/.well-known/acme-challenge/")},
+ [][]byte{[]byte("raw.codeberg.org"), []byte("fonts.codeberg.org"), []byte("design.codeberg.org")},
+ cache.NewKeyValueCache(),
+ cache.NewKeyValueCache(),
+ cache.NewKeyValueCache(),
+ cache.NewKeyValueCache(),
+ []string{"pages"},
+ )
+
+ testCase := func(uri string, status int) {
+ ctx := &fasthttp.RequestCtx{
+ Request: *fasthttp.AcquireRequest(),
+ Response: *fasthttp.AcquireResponse(),
+ }
+ ctx.Request.SetRequestURI(uri)
+ fmt.Printf("Start: %v\n", time.Now())
+ start := time.Now()
+ testHandler(ctx)
+ end := time.Now()
+ fmt.Printf("Done: %v\n", time.Now())
+ if ctx.Response.StatusCode() != status {
+ t.Errorf("request failed with status code %d", ctx.Response.StatusCode())
+ } else {
+ t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
+ }
+ }
+
+ testCase("https://mondstern.codeberg.page/", 424) // TODO: expect 200
+ testCase("https://mondstern.codeberg.page/", 424) // TODO: expect 200
+ testCase("https://example.momar.xyz/", 424) // TODO: expect 200
+ testCase("https://codeberg.page/", 424) // TODO: expect 200
+}
diff --git a/server/helpers.go b/server/helpers.go
new file mode 100644
index 0000000..6d55ddf
--- /dev/null
+++ b/server/helpers.go
@@ -0,0 +1,15 @@
+package server
+
+import (
+ "bytes"
+)
+
+// GetHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
+// string for custom domains.
+func GetHSTSHeader(host, mainDomainSuffix, rawDomain []byte) string {
+ if bytes.HasSuffix(host, mainDomainSuffix) || bytes.Equal(host, rawDomain) {
+ return "max-age=63072000; includeSubdomains; preload"
+ } else {
+ return ""
+ }
+}
diff --git a/server/setup.go b/server/setup.go
new file mode 100644
index 0000000..176bb42
--- /dev/null
+++ b/server/setup.go
@@ -0,0 +1,53 @@
+package server
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/rs/zerolog/log"
+ "github.com/valyala/fasthttp"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/utils"
+)
+
+type fasthttpLogger struct{}
+
+func (fasthttpLogger) Printf(format string, args ...interface{}) {
+ log.Printf("FastHTTP: %s", fmt.Sprintf(format, args...))
+}
+
+func SetupServer(handler fasthttp.RequestHandler) *fasthttp.Server {
+ // Enable compression by wrapping the handler with the compression function provided by FastHTTP
+ compressedHandler := fasthttp.CompressHandlerBrotliLevel(handler, fasthttp.CompressBrotliBestSpeed, fasthttp.CompressBestSpeed)
+
+ return &fasthttp.Server{
+ Handler: compressedHandler,
+ DisablePreParseMultipartForm: true,
+ NoDefaultServerHeader: true,
+ NoDefaultDate: true,
+ ReadTimeout: 30 * time.Second, // needs to be this high for ACME certificates with ZeroSSL & HTTP-01 challenge
+ Logger: fasthttpLogger{},
+ }
+}
+
+func SetupHTTPACMEChallengeServer(challengeCache cache.SetGetKey) *fasthttp.Server {
+ challengePath := []byte("/.well-known/acme-challenge/")
+
+ return &fasthttp.Server{
+ Handler: func(ctx *fasthttp.RequestCtx) {
+ if bytes.HasPrefix(ctx.Path(), challengePath) {
+ challenge, ok := challengeCache.Get(string(utils.TrimHostPort(ctx.Host())) + "/" + string(bytes.TrimPrefix(ctx.Path(), challengePath)))
+ if !ok || challenge == nil {
+ ctx.SetStatusCode(http.StatusNotFound)
+ ctx.SetBodyString("no challenge for this token")
+ }
+ ctx.SetBodyString(challenge.(string))
+ } else {
+ ctx.Redirect("https://"+string(ctx.Host())+string(ctx.RequestURI()), http.StatusMovedPermanently)
+ }
+ },
+ }
+}
diff --git a/server/try.go b/server/try.go
new file mode 100644
index 0000000..24831c4
--- /dev/null
+++ b/server/try.go
@@ -0,0 +1,50 @@
+package server
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/valyala/fasthttp"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+)
+
+// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
+func tryUpstream(ctx *fasthttp.RequestCtx, giteaClient *gitea.Client,
+ mainDomainSuffix, trimmedHost []byte,
+
+ targetOptions *upstream.Options,
+ targetOwner, targetRepo, targetBranch, targetPath string,
+
+ canonicalDomainCache, branchTimestampCache, fileResponseCache cache.SetGetKey,
+) {
+ // check if a canonical domain exists on a request on MainDomain
+ if bytes.HasSuffix(trimmedHost, mainDomainSuffix) {
+ canonicalDomain, _ := upstream.CheckCanonicalDomain(giteaClient, targetOwner, targetRepo, targetBranch, "", string(mainDomainSuffix), canonicalDomainCache)
+ if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], string(mainDomainSuffix)) {
+ canonicalPath := string(ctx.RequestURI())
+ if targetRepo != "pages" {
+ path := strings.SplitN(canonicalPath, "/", 3)
+ if len(path) >= 3 {
+ canonicalPath = "/" + path[2]
+ }
+ }
+ ctx.Redirect("https://"+canonicalDomain+canonicalPath, fasthttp.StatusTemporaryRedirect)
+ return
+ }
+ }
+
+ targetOptions.TargetOwner = targetOwner
+ targetOptions.TargetRepo = targetRepo
+ targetOptions.TargetBranch = targetBranch
+ targetOptions.TargetPath = targetPath
+ targetOptions.Host = string(trimmedHost)
+
+ // Try to request the file from the Gitea API
+ if !targetOptions.Upstream(ctx, giteaClient, branchTimestampCache, fileResponseCache) {
+ html.ReturnErrorPage(ctx, ctx.Response.StatusCode())
+ }
+}
diff --git a/server/upstream/const.go b/server/upstream/const.go
new file mode 100644
index 0000000..247e1d1
--- /dev/null
+++ b/server/upstream/const.go
@@ -0,0 +1,24 @@
+package upstream
+
+import "time"
+
+// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
+var defaultBranchCacheTimeout = 15 * time.Minute
+
+// branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
+// than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
+// picked up faster, while still allowing the content to be cached longer if nothing changes.
+var branchExistenceCacheTimeout = 5 * time.Minute
+
+// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
+// on your available memory.
+// TODO: move as option into cache interface
+var fileCacheTimeout = 5 * time.Minute
+
+// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
+var fileCacheSizeLimit = 1024 * 1024
+
+// canonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
+var canonicalDomainCacheTimeout = 15 * time.Minute
+
+const canonicalDomainConfig = ".domains"
diff --git a/server/upstream/domains.go b/server/upstream/domains.go
index eb30394..553c148 100644
--- a/server/upstream/domains.go
+++ b/server/upstream/domains.go
@@ -1,70 +1,50 @@
package upstream
import (
- "errors"
"strings"
- "time"
-
- "github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/gitea"
)
-// canonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
-var canonicalDomainCacheTimeout = 15 * time.Minute
-
-const canonicalDomainConfig = ".domains"
-
// CheckCanonicalDomain returns the canonical domain specified in the repo (using the `.domains` file).
-func (o *Options) CheckCanonicalDomain(giteaClient *gitea.Client, actualDomain, mainDomainSuffix string, canonicalDomainCache cache.SetGetKey) (domain string, valid bool) {
- // Check if this request is cached.
- if cachedValue, ok := canonicalDomainCache.Get(o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch); ok {
- domains := cachedValue.([]string)
+func CheckCanonicalDomain(giteaClient *gitea.Client, targetOwner, targetRepo, targetBranch, actualDomain, mainDomainSuffix string, canonicalDomainCache cache.SetGetKey) (string, bool) {
+ var (
+ domains []string
+ valid bool
+ )
+ if cachedValue, ok := canonicalDomainCache.Get(targetOwner + "/" + targetRepo + "/" + targetBranch); ok {
+ domains = cachedValue.([]string)
for _, domain := range domains {
if domain == actualDomain {
valid = true
break
}
}
- return domains[0], valid
- }
-
- body, err := giteaClient.GiteaRawContent(o.TargetOwner, o.TargetRepo, o.TargetBranch, canonicalDomainConfig)
- if err != nil && !errors.Is(err, gitea.ErrorNotFound) {
- log.Error().Err(err).Msgf("could not read %s of %s/%s", canonicalDomainConfig, o.TargetOwner, o.TargetRepo)
- }
-
- var domains []string
- for _, domain := range strings.Split(string(body), "\n") {
- domain = strings.ToLower(domain)
- domain = strings.TrimSpace(domain)
- domain = strings.TrimPrefix(domain, "http://")
- domain = strings.TrimPrefix(domain, "https://")
- if len(domain) > 0 && !strings.HasPrefix(domain, "#") && !strings.ContainsAny(domain, "\t /") && strings.ContainsRune(domain, '.') {
- domains = append(domains, domain)
+ } else {
+ body, err := giteaClient.GiteaRawContent(targetOwner, targetRepo, targetBranch, canonicalDomainConfig)
+ if err == nil {
+ for _, domain := range strings.Split(string(body), "\n") {
+ domain = strings.ToLower(domain)
+ domain = strings.TrimSpace(domain)
+ domain = strings.TrimPrefix(domain, "http://")
+ domain = strings.TrimPrefix(domain, "https://")
+ if len(domain) > 0 && !strings.HasPrefix(domain, "#") && !strings.ContainsAny(domain, "\t /") && strings.ContainsRune(domain, '.') {
+ domains = append(domains, domain)
+ }
+ if domain == actualDomain {
+ valid = true
+ }
+ }
}
- if domain == actualDomain {
+ domains = append(domains, targetOwner+mainDomainSuffix)
+ if domains[len(domains)-1] == actualDomain {
valid = true
}
+ if targetRepo != "" && targetRepo != "pages" {
+ domains[len(domains)-1] += "/" + targetRepo
+ }
+ _ = canonicalDomainCache.Set(targetOwner+"/"+targetRepo+"/"+targetBranch, domains, canonicalDomainCacheTimeout)
}
-
- // Add [owner].[pages-domain] as valid domain.
- domains = append(domains, o.TargetOwner+mainDomainSuffix)
- if domains[len(domains)-1] == actualDomain {
- valid = true
- }
-
- // If the target repository isn't called pages, add `/[repository]` to the
- // previous valid domain.
- if o.TargetRepo != "" && o.TargetRepo != "pages" {
- domains[len(domains)-1] += "/" + o.TargetRepo
- }
-
- // Add result to cache.
- _ = canonicalDomainCache.Set(o.TargetOwner+"/"+o.TargetRepo+"/"+o.TargetBranch, domains, canonicalDomainCacheTimeout)
-
- // Return the first domain from the list and return if any of the domains
- // matched the requested domain.
return domains[0], valid
}
diff --git a/server/upstream/header.go b/server/upstream/header.go
deleted file mode 100644
index 9575a3f..0000000
--- a/server/upstream/header.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package upstream
-
-import (
- "net/http"
- "time"
-
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
-)
-
-// setHeader set values to response header
-func (o *Options) setHeader(ctx *context.Context, header http.Header) {
- if eTag := header.Get(gitea.ETagHeader); eTag != "" {
- ctx.RespWriter.Header().Set(gitea.ETagHeader, eTag)
- }
- if cacheIndicator := header.Get(gitea.PagesCacheIndicatorHeader); cacheIndicator != "" {
- ctx.RespWriter.Header().Set(gitea.PagesCacheIndicatorHeader, cacheIndicator)
- }
- if length := header.Get(gitea.ContentLengthHeader); length != "" {
- ctx.RespWriter.Header().Set(gitea.ContentLengthHeader, length)
- }
- if mime := header.Get(gitea.ContentTypeHeader); mime == "" || o.ServeRaw {
- ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, rawMime)
- } else {
- ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, mime)
- }
- ctx.RespWriter.Header().Set(headerLastModified, o.BranchTimestamp.In(time.UTC).Format(time.RFC1123))
-}
diff --git a/server/upstream/helper.go b/server/upstream/helper.go
index ac0ab3f..0714dcd 100644
--- a/server/upstream/helper.go
+++ b/server/upstream/helper.go
@@ -1,47 +1,76 @@
package upstream
import (
- "errors"
- "fmt"
-
- "github.com/rs/zerolog/log"
+ "mime"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+ "codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/gitea"
)
-// GetBranchTimestamp finds the default branch (if branch is "") and save branch and it's last modification time to Options
-func (o *Options) GetBranchTimestamp(giteaClient *gitea.Client) (bool, error) {
- log := log.With().Strs("BranchInfo", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch}).Logger()
+type branchTimestamp struct {
+ Branch string
+ Timestamp time.Time
+}
- if o.TargetBranch == "" {
+// GetBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch
+// (or nil if the branch doesn't exist)
+func GetBranchTimestamp(giteaClient *gitea.Client, owner, repo, branch string, branchTimestampCache cache.SetGetKey) *branchTimestamp {
+ if result, ok := branchTimestampCache.Get(owner + "/" + repo + "/" + branch); ok {
+ if result == nil {
+ return nil
+ }
+ return result.(*branchTimestamp)
+ }
+ result := &branchTimestamp{
+ Branch: branch,
+ }
+ if len(branch) == 0 {
// Get default branch
- defaultBranch, err := giteaClient.GiteaGetRepoDefaultBranch(o.TargetOwner, o.TargetRepo)
+ defaultBranch, err := giteaClient.GiteaGetRepoDefaultBranch(owner, repo)
if err != nil {
- log.Err(err).Msg("Couldn't fetch default branch from repository")
- return false, err
+ _ = branchTimestampCache.Set(owner+"/"+repo+"/", nil, defaultBranchCacheTimeout)
+ return nil
}
- log.Debug().Msgf("Successfully fetched default branch %q from Gitea", defaultBranch)
- o.TargetBranch = defaultBranch
+ result.Branch = defaultBranch
}
- timestamp, err := giteaClient.GiteaGetRepoBranchTimestamp(o.TargetOwner, o.TargetRepo, o.TargetBranch)
+ timestamp, err := giteaClient.GiteaGetRepoBranchTimestamp(owner, repo, result.Branch)
if err != nil {
- if !errors.Is(err, gitea.ErrorNotFound) {
- log.Error().Err(err).Msg("Could not get latest commit timestamp from branch")
+ return nil
+ }
+ result.Timestamp = timestamp
+ _ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, result, branchExistenceCacheTimeout)
+ return result
+}
+
+func (o *Options) getMimeTypeByExtension() string {
+ if o.ForbiddenMimeTypes == nil {
+ o.ForbiddenMimeTypes = make(map[string]bool)
+ }
+ mimeType := mime.TypeByExtension(path.Ext(o.TargetPath))
+ mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
+ if o.ForbiddenMimeTypes[mimeTypeSplit[0]] || mimeType == "" {
+ if o.DefaultMimeType != "" {
+ mimeType = o.DefaultMimeType
+ } else {
+ mimeType = "application/octet-stream"
}
- return false, err
}
-
- if timestamp == nil || timestamp.Branch == "" {
- return false, fmt.Errorf("empty response")
- }
-
- log.Debug().Msgf("Successfully fetched latest commit timestamp from branch: %#v", timestamp)
- o.BranchTimestamp = timestamp.Timestamp
- o.TargetBranch = timestamp.Branch
- return true, nil
+ return mimeType
}
-func (o *Options) ContentWebLink(giteaClient *gitea.Client) string {
- return giteaClient.ContentWebLink(o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath) + "; rel=\"canonical\""
+func (o *Options) generateUri() string {
+ return path.Join(o.TargetOwner, o.TargetRepo, "raw", o.TargetBranch, o.TargetPath)
+}
+
+func (o *Options) generateUriClientArgs() (targetOwner, targetRepo, ref, resource string) {
+ return o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath
+}
+
+func (o *Options) timestamp() string {
+ return strconv.FormatInt(o.BranchTimestamp.Unix(), 10)
}
diff --git a/server/upstream/redirects.go b/server/upstream/redirects.go
deleted file mode 100644
index ab6c971..0000000
--- a/server/upstream/redirects.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package upstream
-
-import (
- "strconv"
- "strings"
- "time"
-
- "codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
- "codeberg.org/codeberg/pages/server/gitea"
- "github.com/rs/zerolog/log"
-)
-
-type Redirect struct {
- From string
- To string
- StatusCode int
-}
-
-// redirectsCacheTimeout specifies the timeout for the redirects cache.
-var redirectsCacheTimeout = 10 * time.Minute
-
-const redirectsConfig = "_redirects"
-
-// getRedirects returns redirects specified in the _redirects file.
-func (o *Options) getRedirects(giteaClient *gitea.Client, redirectsCache cache.SetGetKey) []Redirect {
- var redirects []Redirect
- cacheKey := o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch
-
- // Check for cached redirects
- if cachedValue, ok := redirectsCache.Get(cacheKey); ok {
- redirects = cachedValue.([]Redirect)
- } else {
- // Get _redirects file and parse
- body, err := giteaClient.GiteaRawContent(o.TargetOwner, o.TargetRepo, o.TargetBranch, redirectsConfig)
- if err == nil {
- for _, line := range strings.Split(string(body), "\n") {
- redirectArr := strings.Fields(line)
-
- // Ignore comments and invalid lines
- if strings.HasPrefix(line, "#") || len(redirectArr) < 2 {
- continue
- }
-
- // Get redirect status code
- statusCode := 301
- if len(redirectArr) == 3 {
- statusCode, err = strconv.Atoi(redirectArr[2])
- if err != nil {
- log.Info().Err(err).Msgf("could not read %s of %s/%s", redirectsConfig, o.TargetOwner, o.TargetRepo)
- }
- }
-
- redirects = append(redirects, Redirect{
- From: redirectArr[0],
- To: redirectArr[1],
- StatusCode: statusCode,
- })
- }
- }
- _ = redirectsCache.Set(cacheKey, redirects, redirectsCacheTimeout)
- }
- return redirects
-}
-
-func (o *Options) matchRedirects(ctx *context.Context, giteaClient *gitea.Client, redirects []Redirect, redirectsCache cache.SetGetKey) (final bool) {
- if len(redirects) > 0 {
- for _, redirect := range redirects {
- reqUrl := ctx.Req.RequestURI
- // remove repo and branch from request url
- reqUrl = strings.TrimPrefix(reqUrl, "/"+o.TargetRepo)
- reqUrl = strings.TrimPrefix(reqUrl, "/@"+o.TargetBranch)
-
- // check if from url matches request url
- if strings.TrimSuffix(redirect.From, "/") == strings.TrimSuffix(reqUrl, "/") {
- // do rewrite if status code is 200
- if redirect.StatusCode == 200 {
- o.TargetPath = redirect.To
- o.Upstream(ctx, giteaClient, redirectsCache)
- return true
- } else {
- ctx.Redirect(redirect.To, redirect.StatusCode)
- return true
- }
- }
-
- // handle wildcard redirects
- trimmedFromUrl := strings.TrimSuffix(redirect.From, "/*")
- if strings.HasSuffix(redirect.From, "/*") && strings.HasPrefix(reqUrl, trimmedFromUrl) {
- if strings.Contains(redirect.To, ":splat") {
- splatUrl := strings.ReplaceAll(redirect.To, ":splat", strings.TrimPrefix(reqUrl, trimmedFromUrl))
- // do rewrite if status code is 200
- if redirect.StatusCode == 200 {
- o.TargetPath = splatUrl
- o.Upstream(ctx, giteaClient, redirectsCache)
- return true
- } else {
- ctx.Redirect(splatUrl, redirect.StatusCode)
- return true
- }
- } else {
- // do rewrite if status code is 200
- if redirect.StatusCode == 200 {
- o.TargetPath = redirect.To
- o.Upstream(ctx, giteaClient, redirectsCache)
- return true
- } else {
- ctx.Redirect(redirect.To, redirect.StatusCode)
- return true
- }
- }
- }
- }
- }
-
- return false
-}
diff --git a/server/upstream/upstream.go b/server/upstream/upstream.go
index 1a444e4..0e27727 100644
--- a/server/upstream/upstream.go
+++ b/server/upstream/upstream.go
@@ -1,28 +1,20 @@
package upstream
import (
+ "bytes"
"errors"
- "fmt"
"io"
- "net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
+ "github.com/valyala/fasthttp"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
- "codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
)
-const (
- headerLastModified = "Last-Modified"
- headerIfModifiedSince = "If-Modified-Since"
-
- rawMime = "text/plain; charset=utf-8"
-)
-
// upstreamIndexPages lists pages that may be considered as index pages for directories.
var upstreamIndexPages = []string{
"index.html",
@@ -35,81 +27,68 @@ var upstreamNotFoundPages = []string{
// Options provides various options for the upstream request.
type Options struct {
- TargetOwner string
- TargetRepo string
- TargetBranch string
- TargetPath string
+ TargetOwner,
+ TargetRepo,
+ TargetBranch,
+ TargetPath,
// Used for debugging purposes.
Host string
- TryIndexPages bool
- BranchTimestamp time.Time
+ DefaultMimeType string
+ ForbiddenMimeTypes map[string]bool
+ TryIndexPages bool
+ BranchTimestamp time.Time
// internal
appendTrailingSlash bool
redirectIfExists string
-
- ServeRaw bool
}
// Upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
-func (o *Options) Upstream(ctx *context.Context, giteaClient *gitea.Client, redirectsCache cache.SetGetKey) bool {
- log := log.With().Strs("upstream", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath}).Logger()
-
- if o.TargetOwner == "" || o.TargetRepo == "" {
- html.ReturnErrorPage(ctx, "gitea client: either repo owner or name info is missing", http.StatusBadRequest)
- return true
- }
+func (o *Options) Upstream(ctx *fasthttp.RequestCtx, giteaClient *gitea.Client, branchTimestampCache, fileResponseCache cache.SetGetKey) (final bool) {
+ log := log.With().Strs("upstream", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath, o.Host}).Logger()
// Check if the branch exists and when it was modified
if o.BranchTimestamp.IsZero() {
- branchExist, err := o.GetBranchTimestamp(giteaClient)
- // handle 404
- if err != nil && errors.Is(err, gitea.ErrorNotFound) || !branchExist {
- html.ReturnErrorPage(ctx,
- fmt.Sprintf("branch %q for %s/%s not found", o.TargetBranch, o.TargetOwner, o.TargetRepo),
- http.StatusNotFound)
- return true
- }
+ branch := GetBranchTimestamp(giteaClient, o.TargetOwner, o.TargetRepo, o.TargetBranch, branchTimestampCache)
- // handle unexpected errors
- if err != nil {
- html.ReturnErrorPage(ctx,
- fmt.Sprintf("could not get timestamp of branch %q: '%v'", o.TargetBranch, err),
- http.StatusFailedDependency)
+ if branch == nil {
+ html.ReturnErrorPage(ctx, fasthttp.StatusFailedDependency)
return true
}
+ o.TargetBranch = branch.Branch
+ o.BranchTimestamp = branch.Timestamp
+ }
+
+ if o.TargetOwner == "" || o.TargetRepo == "" || o.TargetBranch == "" {
+ html.ReturnErrorPage(ctx, fasthttp.StatusBadRequest)
+ return true
}
// Check if the browser has a cached version
- if ctx.Response() != nil {
- if ifModifiedSince, err := time.Parse(time.RFC1123, ctx.Response().Header.Get(headerIfModifiedSince)); err == nil {
- if ifModifiedSince.After(o.BranchTimestamp) {
- ctx.RespWriter.WriteHeader(http.StatusNotModified)
- log.Trace().Msg("check response against last modified: valid")
- return true
- }
+ if ifModifiedSince, err := time.Parse(time.RFC1123, string(ctx.Request.Header.Peek("If-Modified-Since"))); err == nil {
+ if !ifModifiedSince.Before(o.BranchTimestamp) {
+ ctx.Response.SetStatusCode(fasthttp.StatusNotModified)
+ return true
}
- log.Trace().Msg("check response against last modified: outdated")
}
log.Debug().Msg("Preparing")
- reader, header, statusCode, err := giteaClient.ServeRawContent(o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath)
- if reader != nil {
- defer reader.Close()
+ // Make a GET request to the upstream URL
+ uri := o.generateUri()
+ var res *fasthttp.Response
+ var cachedResponse gitea.FileResponse
+ var err error
+ if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + o.timestamp()); ok && !cachedValue.(gitea.FileResponse).IsEmpty() {
+ cachedResponse = cachedValue.(gitea.FileResponse)
+ } else {
+ res, err = giteaClient.ServeRawContent(o.generateUriClientArgs())
}
-
log.Debug().Msg("Aquisting")
- // Handle not found error
- if err != nil && errors.Is(err, gitea.ErrorNotFound) {
- // Get and match redirects
- redirects := o.getRedirects(giteaClient, redirectsCache)
- if o.matchRedirects(ctx, giteaClient, redirects, redirectsCache) {
- return true
- }
-
+ // Handle errors
+ if (err != nil && errors.Is(err, gitea.ErrorNotFound)) || (res == nil && !cachedResponse.Exists) {
if o.TryIndexPages {
// copy the o struct & try if an index page exists
optionsForIndexPages := *o
@@ -117,20 +96,25 @@ func (o *Options) Upstream(ctx *context.Context, giteaClient *gitea.Client, redi
optionsForIndexPages.appendTrailingSlash = true
for _, indexPage := range upstreamIndexPages {
optionsForIndexPages.TargetPath = strings.TrimSuffix(o.TargetPath, "/") + "/" + indexPage
- if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
+ if optionsForIndexPages.Upstream(ctx, giteaClient, branchTimestampCache, fileResponseCache) {
+ _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), gitea.FileResponse{
+ Exists: false,
+ }, fileCacheTimeout)
return true
}
}
// compatibility fix for GitHub Pages (/example → /example.html)
optionsForIndexPages.appendTrailingSlash = false
- optionsForIndexPages.redirectIfExists = strings.TrimSuffix(ctx.Path(), "/") + ".html"
+ optionsForIndexPages.redirectIfExists = strings.TrimSuffix(string(ctx.Request.URI().Path()), "/") + ".html"
optionsForIndexPages.TargetPath = o.TargetPath + ".html"
- if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
+ if optionsForIndexPages.Upstream(ctx, giteaClient, branchTimestampCache, fileResponseCache) {
+ _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), gitea.FileResponse{
+ Exists: false,
+ }, fileCacheTimeout)
return true
}
}
-
- ctx.StatusCode = http.StatusNotFound
+ ctx.Response.SetStatusCode(fasthttp.StatusNotFound)
if o.TryIndexPages {
// copy the o struct & try if a not found page exists
optionsForNotFoundPages := *o
@@ -138,71 +122,92 @@ func (o *Options) Upstream(ctx *context.Context, giteaClient *gitea.Client, redi
optionsForNotFoundPages.appendTrailingSlash = false
for _, notFoundPage := range upstreamNotFoundPages {
optionsForNotFoundPages.TargetPath = "/" + notFoundPage
- if optionsForNotFoundPages.Upstream(ctx, giteaClient, redirectsCache) {
+ if optionsForNotFoundPages.Upstream(ctx, giteaClient, branchTimestampCache, fileResponseCache) {
+ _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), gitea.FileResponse{
+ Exists: false,
+ }, fileCacheTimeout)
return true
}
}
}
-
+ if res != nil {
+ // Update cache if the request is fresh
+ _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), gitea.FileResponse{
+ Exists: false,
+ }, fileCacheTimeout)
+ }
return false
}
-
- // handle unexpected client errors
- if err != nil || reader == nil || statusCode != http.StatusOK {
- log.Debug().Msg("Handling error")
- var msg string
-
- if err != nil {
- msg = "gitea client: returned unexpected error"
- log.Error().Err(err).Msg(msg)
- msg = fmt.Sprintf("%s: '%v'", msg, err)
- }
- if reader == nil {
- msg = "gitea client: returned no reader"
- log.Error().Msg(msg)
- }
- if statusCode != http.StatusOK {
- msg = fmt.Sprintf("gitea client: couldn't fetch contents: %d - %s", statusCode, http.StatusText(statusCode))
- log.Error().Msg(msg)
- }
-
- html.ReturnErrorPage(ctx, msg, http.StatusInternalServerError)
+ if res != nil && (err != nil || res.StatusCode() != fasthttp.StatusOK) {
+ log.Warn().Msgf("Couldn't fetch contents from %q: %v (status code %d)", uri, err, res.StatusCode())
+ html.ReturnErrorPage(ctx, fasthttp.StatusInternalServerError)
return true
}
// Append trailing slash if missing (for index files), and redirect to fix filenames in general
// o.appendTrailingSlash is only true when looking for index pages
- if o.appendTrailingSlash && !strings.HasSuffix(ctx.Path(), "/") {
- ctx.Redirect(ctx.Path()+"/", http.StatusTemporaryRedirect)
+ if o.appendTrailingSlash && !bytes.HasSuffix(ctx.Request.URI().Path(), []byte{'/'}) {
+ ctx.Redirect(string(ctx.Request.URI().Path())+"/", fasthttp.StatusTemporaryRedirect)
return true
}
- if strings.HasSuffix(ctx.Path(), "/index.html") && !o.ServeRaw {
- ctx.Redirect(strings.TrimSuffix(ctx.Path(), "index.html"), http.StatusTemporaryRedirect)
+ if bytes.HasSuffix(ctx.Request.URI().Path(), []byte("/index.html")) {
+ ctx.Redirect(strings.TrimSuffix(string(ctx.Request.URI().Path()), "index.html"), fasthttp.StatusTemporaryRedirect)
return true
}
if o.redirectIfExists != "" {
- ctx.Redirect(o.redirectIfExists, http.StatusTemporaryRedirect)
+ ctx.Redirect(o.redirectIfExists, fasthttp.StatusTemporaryRedirect)
return true
}
+ log.Debug().Msg("Handling error")
- // Set ETag & MIME
- o.setHeader(ctx, header)
+ // Set the MIME type
+ mimeType := o.getMimeTypeByExtension()
+ ctx.Response.Header.SetContentType(mimeType)
+
+ // Set ETag
+ if cachedResponse.Exists {
+ ctx.Response.Header.SetBytesV(fasthttp.HeaderETag, cachedResponse.ETag)
+ } else if res != nil {
+ cachedResponse.ETag = res.Header.Peek(fasthttp.HeaderETag)
+ ctx.Response.Header.SetBytesV(fasthttp.HeaderETag, cachedResponse.ETag)
+ }
+
+ if ctx.Response.StatusCode() != fasthttp.StatusNotFound {
+ // Everything's okay so far
+ ctx.Response.SetStatusCode(fasthttp.StatusOK)
+ }
+ ctx.Response.Header.SetLastModified(o.BranchTimestamp)
log.Debug().Msg("Prepare response")
- ctx.RespWriter.WriteHeader(ctx.StatusCode)
-
// Write the response body to the original request
- if reader != nil {
- _, err := io.Copy(ctx.RespWriter, reader)
- if err != nil {
- log.Error().Err(err).Msgf("Couldn't write body for %q", o.TargetPath)
- html.ReturnErrorPage(ctx, "", http.StatusInternalServerError)
- return true
- }
- }
+ var cacheBodyWriter bytes.Buffer
+ if res != nil {
+ if res.Header.ContentLength() > fileCacheSizeLimit {
+ // fasthttp else will set "Content-Length: 0"
+ ctx.Response.SetBodyStream(&strings.Reader{}, -1)
+ err = res.BodyWriteTo(ctx.Response.BodyWriter())
+ } else {
+ // TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
+ err = res.BodyWriteTo(io.MultiWriter(ctx.Response.BodyWriter(), &cacheBodyWriter))
+ }
+ } else {
+ _, err = ctx.Write(cachedResponse.Body)
+ }
+ if err != nil {
+ log.Error().Err(err).Msgf("Couldn't write body for %q", uri)
+ html.ReturnErrorPage(ctx, fasthttp.StatusInternalServerError)
+ return true
+ }
log.Debug().Msg("Sending response")
+ if res != nil && res.Header.ContentLength() <= fileCacheSizeLimit && ctx.Err() == nil {
+ cachedResponse.Exists = true
+ cachedResponse.MimeType = mimeType
+ cachedResponse.Body = cacheBodyWriter.Bytes()
+ _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), cachedResponse, fileCacheTimeout)
+ }
+
return true
}
diff --git a/server/utils/utils.go b/server/utils/utils.go
index 91ed359..7be330f 100644
--- a/server/utils/utils.go
+++ b/server/utils/utils.go
@@ -1,27 +1,11 @@
package utils
-import (
- "net/url"
- "path"
- "strings"
-)
+import "bytes"
-func TrimHostPort(host string) string {
- i := strings.IndexByte(host, ':')
+func TrimHostPort(host []byte) []byte {
+ i := bytes.IndexByte(host, ':')
if i >= 0 {
return host[:i]
}
return host
}
-
-func CleanPath(uriPath string) string {
- unescapedPath, _ := url.PathUnescape(uriPath)
- cleanedPath := path.Join("/", unescapedPath)
-
- // If the path refers to a directory, add a trailing slash.
- if !strings.HasSuffix(cleanedPath, "/") && (strings.HasSuffix(unescapedPath, "/") || strings.HasSuffix(unescapedPath, "/.") || strings.HasSuffix(unescapedPath, "/..")) {
- cleanedPath += "/"
- }
-
- return cleanedPath
-}
diff --git a/server/utils/utils_test.go b/server/utils/utils_test.go
index b8fcea9..3dc0632 100644
--- a/server/utils/utils_test.go
+++ b/server/utils/utils_test.go
@@ -7,63 +7,7 @@ import (
)
func TestTrimHostPort(t *testing.T) {
- assert.EqualValues(t, "aa", TrimHostPort("aa"))
- assert.EqualValues(t, "", TrimHostPort(":"))
- assert.EqualValues(t, "example.com", TrimHostPort("example.com:80"))
-}
-
-// TestCleanPath is mostly copied from fasthttp, to keep the behaviour we had before migrating away from it.
-// Source (MIT licensed): https://github.com/valyala/fasthttp/blob/v1.48.0/uri_test.go#L154
-// Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia, Kirill Danshin, Erik Dubbelboer, FastHTTP Authors
-func TestCleanPath(t *testing.T) {
- // double slash
- testURIPathNormalize(t, "/aa//bb", "/aa/bb")
-
- // triple slash
- testURIPathNormalize(t, "/x///y/", "/x/y/")
-
- // multi slashes
- testURIPathNormalize(t, "/abc//de///fg////", "/abc/de/fg/")
-
- // encoded slashes
- testURIPathNormalize(t, "/xxxx%2fyyy%2f%2F%2F", "/xxxx/yyy/")
-
- // dotdot
- testURIPathNormalize(t, "/aaa/..", "/")
-
- // dotdot with trailing slash
- testURIPathNormalize(t, "/xxx/yyy/../", "/xxx/")
-
- // multi dotdots
- testURIPathNormalize(t, "/aaa/bbb/ccc/../../ddd", "/aaa/ddd")
-
- // dotdots separated by other data
- testURIPathNormalize(t, "/a/b/../c/d/../e/..", "/a/c/")
-
- // too many dotdots
- testURIPathNormalize(t, "/aaa/../../../../xxx", "/xxx")
- testURIPathNormalize(t, "/../../../../../..", "/")
- testURIPathNormalize(t, "/../../../../../../", "/")
-
- // encoded dotdots
- testURIPathNormalize(t, "/aaa%2Fbbb%2F%2E.%2Fxxx", "/aaa/xxx")
-
- // double slash with dotdots
- testURIPathNormalize(t, "/aaa////..//b", "/b")
-
- // fake dotdot
- testURIPathNormalize(t, "/aaa/..bbb/ccc/..", "/aaa/..bbb/")
-
- // single dot
- testURIPathNormalize(t, "/a/./b/././c/./d.html", "/a/b/c/d.html")
- testURIPathNormalize(t, "./foo/", "/foo/")
- testURIPathNormalize(t, "./../.././../../aaa/bbb/../../../././../", "/")
- testURIPathNormalize(t, "./a/./.././../b/./foo.html", "/b/foo.html")
-}
-
-func testURIPathNormalize(t *testing.T, requestURI, expectedPath string) {
- cleanedPath := CleanPath(requestURI)
- if cleanedPath != expectedPath {
- t.Fatalf("Unexpected path %q. Expected %q. requestURI=%q", cleanedPath, expectedPath, requestURI)
- }
+ assert.EqualValues(t, "aa", TrimHostPort([]byte("aa")))
+ assert.EqualValues(t, "", TrimHostPort([]byte(":")))
+ assert.EqualValues(t, "example.com", TrimHostPort([]byte("example.com:80")))
}