13 Commits

Author SHA1 Message Date
x 2bcf339408 refactor: db location
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-19 00:44:03 +01:00
x 2df37e9002 fix: relax chunk limits, support proxies, optimize reads
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-19 00:33:09 +01:00
x 722dbaa6aa feat: implement encrypted chunked storage and convergent encryption
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 23:39:53 +01:00
x 2d6a3ab216 fix(web): use web crypto for upload id's
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 22:30:20 +01:00
x d18ef48bd4 perf(storage)!: optimize cleanup with secondary index
BREAKING CHANGE: This change requires a fresh database. Existing
databases will lack the index, and the cleanup routine will not function
correctly for pre-existing files.

Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 22:10:07 +01:00
x e18be18029 fix(download): enforce integrity check using db metadata
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 21:54:08 +01:00
x a69e5a52a3 perf: implement zero-copy merge for chunked uploads
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 21:45:41 +01:00
x 8b638275b8 fix: unhandled errors
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 21:19:42 +01:00
x 73ee7a9a14 refactor: embed web files
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 20:53:56 +01:00
x 954aec6d8e feat: replace fs scans with bbolt for fast, persistent metadata management
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 20:27:33 +01:00
x 5a3846266e feat: unit tests
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 19:53:29 +01:00
x a115c49195 fix: add blank favicon
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 19:47:21 +01:00
x 00e5c95fe3 refactor: split handlers.go and centralize config
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 19:25:35 +01:00
27 changed files with 1650 additions and 496 deletions
-1
View File
@@ -1,3 +1,2 @@
storage/* storage/*
# Added by goreleaser init:
dist/ dist/
-5
View File
@@ -26,7 +26,6 @@ archives:
{{- else }}{{ .Arch }}{{ end }} {{- else }}{{ .Arch }}{{ end }}
formats: ["tar.gz"] formats: ["tar.gz"]
files: files:
- web/**/*
- README.md - README.md
dockers: dockers:
@@ -37,8 +36,6 @@ dockers:
goos: linux goos: linux
goarch: amd64 goarch: amd64
dockerfile: Dockerfile.release dockerfile: Dockerfile.release
extra_files:
- web
build_flag_templates: build_flag_templates:
- "--platform=linux/amd64" - "--platform=linux/amd64"
- "--label=org.opencontainers.image.title={{ .ProjectName }}" - "--label=org.opencontainers.image.title={{ .ProjectName }}"
@@ -51,8 +48,6 @@ dockers:
goos: linux goos: linux
goarch: arm64 goarch: arm64
dockerfile: Dockerfile.release dockerfile: Dockerfile.release
extra_files:
- web
build_flag_templates: build_flag_templates:
- "--platform=linux/arm64" - "--platform=linux/arm64"
- "--label=org.opencontainers.image.title={{ .ProjectName }}" - "--label=org.opencontainers.image.title={{ .ProjectName }}"
+3 -1
View File
@@ -2,6 +2,9 @@ FROM --platform=$BUILDPLATFORM golang:1.25.6 AS builder
WORKDIR /app WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . . COPY . .
ARG TARGETOS ARG TARGETOS
@@ -28,7 +31,6 @@ RUN useradd -m -u 10001 -s /bin/bash appuser
WORKDIR /app WORKDIR /app
COPY --from=builder /app/safebin . COPY --from=builder /app/safebin .
COPY --from=builder /app/web ./web
RUN mkdir -p /app/storage && chown 10001:10001 /app/storage RUN mkdir -p /app/storage && chown 10001:10001 /app/storage
VOLUME ["/app/storage"] VOLUME ["/app/storage"]
-1
View File
@@ -9,7 +9,6 @@ RUN useradd -m -u 10001 -s /bin/bash appuser
WORKDIR /app WORKDIR /app
COPY safebin . COPY safebin .
COPY web ./web
RUN mkdir -p /app/storage && chown 10001:10001 /app/storage RUN mkdir -p /app/storage && chown 10001:10001 /app/storage
VOLUME ["/app/storage"] VOLUME ["/app/storage"]
+4
View File
@@ -1,3 +1,7 @@
module github.com/skidoodle/safebin module github.com/skidoodle/safebin
go 1.25.6 go 1.25.6
require go.etcd.io/bbolt v1.4.3
require golang.org/x/sys v0.29.0 // indirect
+14
View File
@@ -0,0 +1,14 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+42 -15
View File
@@ -4,9 +4,43 @@ import (
"flag" "flag"
"fmt" "fmt"
"html/template" "html/template"
"io/fs"
"log/slog" "log/slog"
"os" "os"
"strconv" "strconv"
"time"
"go.etcd.io/bbolt"
)
const (
DefaultHost = "0.0.0.0"
DefaultPort = 8080
DefaultStorage = "./storage"
DefaultMaxMB = 512
ServerTimeout = 10 * time.Minute
ShutdownTimeout = 10 * time.Second
UploadChunkSize = 8 << 20
MinChunkSize = 1 << 20
MaxRequestOverhead = 10 << 20
PermUserRWX = 0o700
MegaByte = 1 << 20
ChunkSafetyMargin = 2
SlugLength = 22
KeyLength = 16
CleanupInterval = 1 * time.Hour
TempExpiry = 4 * time.Hour
MinRetention = 24 * time.Hour
MaxRetention = 365 * 24 * time.Hour
DBDirName = "db"
DBFileName = "safebin.db"
DBBucketName = "files"
DBBucketIndexName = "expiry_index"
TempDirName = "tmp"
) )
type Config struct { type Config struct {
@@ -19,20 +53,15 @@ type App struct {
Conf Config Conf Config
Tmpl *template.Template Tmpl *template.Template
Logger *slog.Logger Logger *slog.Logger
DB *bbolt.DB
Assets fs.FS
} }
const (
defaultHost = "0.0.0.0"
defaultPort = 8080
defaultStorage = "./storage"
defaultMaxMB = 512
)
func LoadConfig() Config { func LoadConfig() Config {
hostEnv := getEnv("SAFEBIN_HOST", defaultHost) hostEnv := getEnv("SAFEBIN_HOST", DefaultHost)
portEnv := getEnvInt("SAFEBIN_PORT", defaultPort) portEnv := getEnvInt("SAFEBIN_PORT", DefaultPort)
storageEnv := getEnv("SAFEBIN_STORAGE", defaultStorage) storageEnv := getEnv("SAFEBIN_STORAGE", DefaultStorage)
maxMBEnv := int64(getEnvInt("SAFEBIN_MAX_MB", defaultMaxMB)) maxMBEnv := int64(getEnvInt("SAFEBIN_MAX_MB", DefaultMaxMB))
var host string var host string
var port int var port int
@@ -56,7 +85,6 @@ func getEnv(key, fallback string) string {
if value, ok := os.LookupEnv(key); ok { if value, ok := os.LookupEnv(key); ok {
return value return value
} }
return fallback return fallback
} }
@@ -67,10 +95,9 @@ func getEnvInt(key string, fallback int) int {
return i return i
} }
} }
return fallback return fallback
} }
func ParseTemplates() *template.Template { func ParseTemplates(fsys fs.FS) *template.Template {
return template.Must(template.ParseGlob("./web/templates/*.html")) return template.Must(template.ParseFS(fsys, "*.html"))
} }
+37
View File
@@ -0,0 +1,37 @@
package app
import (
"testing"
)
func TestGetEnv(t *testing.T) {
key := "SAFEBIN_TEST_KEY"
val := "somevalue"
if got := getEnv(key, "default"); got != "default" {
t.Errorf("Expected default, got %s", got)
}
t.Setenv(key, val)
if got := getEnv(key, "default"); got != val {
t.Errorf("Expected %s, got %s", val, got)
}
}
func TestGetEnvInt(t *testing.T) {
key := "SAFEBIN_TEST_INT"
if got := getEnvInt(key, 8080); got != 8080 {
t.Errorf("Expected default 8080, got %d", got)
}
t.Setenv(key, "9090")
if got := getEnvInt(key, 8080); got != 9090 {
t.Errorf("Expected 9090, got %d", got)
}
t.Setenv(key, "notanumber")
if got := getEnvInt(key, 8080); got != 8080 {
t.Errorf("Expected fallback on invalid input, got %d", got)
}
}
+46
View File
@@ -0,0 +1,46 @@
package app
import (
"os"
"path/filepath"
"time"
"go.etcd.io/bbolt"
)
type FileMeta struct {
ID string `json:"id"`
Size int64 `json:"size"`
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
}
func InitDB(storageDir string) (*bbolt.DB, error) {
dbDir := filepath.Join(storageDir, DBDirName)
if err := os.MkdirAll(dbDir, PermUserRWX); err != nil {
return nil, err
}
path := filepath.Join(dbDir, DBFileName)
db, err := bbolt.Open(path, 0600, &bbolt.Options{Timeout: 1 * time.Second})
if err != nil {
return nil, err
}
err = db.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(DBBucketName)); err != nil {
return err
}
if _, err := tx.CreateBucketIfNotExists([]byte(DBBucketIndexName)); err != nil {
return err
}
return nil
})
if err != nil {
_ = db.Close()
return nil, err
}
return db, nil
}
+104
View File
@@ -0,0 +1,104 @@
package app
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"go.etcd.io/bbolt"
)
func TestInitDB(t *testing.T) {
tmpDir := t.TempDir()
db, err := InitDB(tmpDir)
if err != nil {
t.Fatalf("InitDB failed: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
}()
dbPath := filepath.Join(tmpDir, DBDirName, DBFileName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
t.Error("Database file was not created")
}
err = db.View(func(tx *bbolt.Tx) error {
if b := tx.Bucket([]byte(DBBucketName)); b == nil {
t.Errorf("Bucket '%s' was not created", DBBucketName)
}
if b := tx.Bucket([]byte(DBBucketIndexName)); b == nil {
t.Errorf("Bucket '%s' was not created", DBBucketIndexName)
}
return nil
})
if err != nil {
t.Errorf("View failed: %v", err)
}
}
func TestDB_MetadataLifecycle(t *testing.T) {
tmpDir := t.TempDir()
db, err := InitDB(tmpDir)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
}()
app := &App{
Conf: Config{StorageDir: tmpDir, MaxMB: 100},
DB: db,
}
fileID := "test-file-id"
fileSize := int64(1024)
if err := app.RegisterFile(fileID, fileSize); err != nil {
t.Fatalf("RegisterFile failed: %v", err)
}
err = db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName))
data := b.Get([]byte(fileID))
if data == nil {
t.Fatal("Metadata not found in DB")
}
var meta FileMeta
if err := json.Unmarshal(data, &meta); err != nil {
t.Fatalf("Failed to unmarshal meta: %v", err)
}
if meta.ID != fileID {
t.Errorf("Want ID %s, got %s", fileID, meta.ID)
}
if meta.Size != fileSize {
t.Errorf("Want Size %d, got %d", fileSize, meta.Size)
}
if meta.ExpiresAt.Before(time.Now()) {
t.Error("Expiration time is in the past")
}
bIndex := tx.Bucket([]byte(DBBucketIndexName))
indexKey := []byte(meta.ExpiresAt.Format(time.RFC3339) + "_" + fileID)
if val := bIndex.Get(indexKey); val == nil {
t.Error("Index entry not found")
} else if string(val) != fileID {
t.Errorf("Index value mismatch: want %s, got %s", fileID, string(val))
}
return nil
})
if err != nil {
t.Error(err)
}
}
+107
View File
@@ -0,0 +1,107 @@
package app
import (
"encoding/base64"
"encoding/json"
"fmt"
"mime"
"net/http"
"os"
"path/filepath"
"github.com/skidoodle/safebin/internal/crypto"
"go.etcd.io/bbolt"
)
func (app *App) HandleGetFile(writer http.ResponseWriter, request *http.Request) {
slug := request.PathValue("slug")
if len(slug) < SlugLength {
app.SendError(writer, request, http.StatusBadRequest)
return
}
keyBase64 := slug[:SlugLength]
ext := slug[SlugLength:]
key, err := base64.RawURLEncoding.DecodeString(keyBase64)
if err != nil || len(key) != KeyLength {
app.SendError(writer, request, http.StatusUnauthorized)
return
}
id := crypto.GetID(key, ext)
var meta FileMeta
err = app.DB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName))
if b == nil {
return fmt.Errorf("bucket not found")
}
data := b.Get([]byte(id))
if data == nil {
return fmt.Errorf("file not found")
}
return json.Unmarshal(data, &meta)
})
if err != nil {
app.SendError(writer, request, http.StatusNotFound)
return
}
path := filepath.Join(app.Conf.StorageDir, id)
info, err := os.Stat(path)
if err != nil {
app.SendError(writer, request, http.StatusNotFound)
return
}
if info.Size() != meta.Size {
app.Logger.Error("Integrity check failed: disk size mismatch",
"id", id,
"disk_bytes", info.Size(),
"expected_bytes", meta.Size,
)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
file, err := os.Open(path)
if err != nil {
app.Logger.Error("Failed to open file", "path", path, "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close file", "err", closeErr)
}
}()
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
app.Logger.Error("Failed to create crypto streamer", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
decryptor := crypto.NewDecryptor(file, streamer.AEAD, info.Size())
contentType := mime.TypeByExtension(ext)
if contentType == "" {
contentType = "application/octet-stream"
}
csp := "default-src 'none'; img-src 'self' data:; media-src 'self' data:; " +
"style-src 'unsafe-inline'; sandbox allow-forms allow-scripts allow-downloads allow-same-origin"
writer.Header().Set("Content-Type", contentType)
writer.Header().Set("Content-Security-Policy", csp)
writer.Header().Set("X-Content-Type-Options", "nosniff")
writer.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=%q", slug))
http.ServeContent(writer, request, slug, info.ModTime(), decryptor)
}
-417
View File
@@ -1,417 +0,0 @@
package app
import (
"encoding/base64"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"github.com/skidoodle/safebin/internal/crypto"
)
const (
uploadChunkSize = 8 << 20
maxRequestOverhead = 10 << 20
permUserRWX = 0o700
slugLength = 22
keyLength = 16
megaByte = 1 << 20
chunkSafetyMargin = 2
)
var reUploadID = regexp.MustCompile(`^[a-zA-Z0-9]{10,50}$`)
func (app *App) HandleHome(writer http.ResponseWriter, request *http.Request) {
err := app.Tmpl.ExecuteTemplate(writer, "base", map[string]any{
"MaxMB": app.Conf.MaxMB,
"Host": request.Host,
})
if err != nil {
app.Logger.Error("Template error", "err", err)
}
}
func (app *App) HandleUpload(writer http.ResponseWriter, request *http.Request) {
limit := (app.Conf.MaxMB * megaByte) + megaByte
request.Body = http.MaxBytesReader(writer, request.Body, limit)
file, header, err := request.FormFile("file")
if err != nil {
if err.Error() == "http: request body too large" {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.SendError(writer, request, http.StatusBadRequest)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close upload file", "err", closeErr)
}
}()
tmp, err := os.CreateTemp(filepath.Join(app.Conf.StorageDir, "tmp"), "up_*")
if err != nil {
app.Logger.Error("Failed to create temp file", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
tmpPath := tmp.Name()
defer func() {
if removeErr := os.Remove(tmpPath); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove temp file", "err", removeErr)
}
}()
defer func() {
if closeErr := tmp.Close(); closeErr != nil {
app.Logger.Error("Failed to close temp file", "err", closeErr)
}
}()
if _, err := io.Copy(tmp, file); err != nil {
app.Logger.Error("Failed to write temp file", "err", err)
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.FinalizeFile(writer, request, tmp, header.Filename)
}
func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) {
request.Body = http.MaxBytesReader(writer, request.Body, maxRequestOverhead)
uid := request.FormValue("upload_id")
idx, err := strconv.Atoi(request.FormValue("index"))
if err != nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
maxChunks := int((app.Conf.MaxMB*megaByte)/uploadChunkSize) + chunkSafetyMargin
if !reUploadID.MatchString(uid) || idx > maxChunks || idx < 0 {
app.SendError(writer, request, http.StatusBadRequest)
return
}
file, _, err := request.FormFile("chunk")
if err != nil {
if err.Error() == "http: request body too large" {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.SendError(writer, request, http.StatusBadRequest)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk file", "err", closeErr)
}
}()
if err := app.saveChunk(uid, idx, file); err != nil {
app.Logger.Error("Failed to save chunk", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
}
}
func (app *App) saveChunk(uid string, idx int, src io.Reader) error {
dir := filepath.Join(app.Conf.StorageDir, "tmp", uid)
if err := os.MkdirAll(dir, permUserRWX); err != nil {
return fmt.Errorf("create chunk dir: %w", err)
}
dest, err := os.Create(filepath.Join(dir, strconv.Itoa(idx)))
if err != nil {
return fmt.Errorf("create chunk file: %w", err)
}
defer func() {
if closeErr := dest.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk dest", "err", closeErr)
}
}()
if _, err := io.Copy(dest, src); err != nil {
return fmt.Errorf("copy chunk: %w", err)
}
return nil
}
func (app *App) HandleFinish(writer http.ResponseWriter, request *http.Request) {
uid := request.FormValue("upload_id")
total, err := strconv.Atoi(request.FormValue("total"))
if err != nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
maxChunks := int((app.Conf.MaxMB*megaByte)/uploadChunkSize) + chunkSafetyMargin
if !reUploadID.MatchString(uid) || total > maxChunks || total <= 0 {
app.SendError(writer, request, http.StatusBadRequest)
return
}
mergedPath, err := app.mergeChunks(uid, total)
if err != nil {
app.Logger.Error("Merge failed", "err", err)
if errors.Is(err, io.ErrShortWrite) {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
} else {
app.SendError(writer, request, http.StatusInternalServerError)
}
return
}
defer func() {
if removeErr := os.Remove(mergedPath); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove merged file", "err", removeErr)
}
}()
mergedRead, err := os.Open(mergedPath)
if err != nil {
app.Logger.Error("Failed to open merged file", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
defer func() {
if closeErr := mergedRead.Close(); closeErr != nil {
app.Logger.Error("Failed to close merged reader", "err", closeErr)
}
}()
app.FinalizeFile(writer, request, mergedRead, request.FormValue("filename"))
if err := os.RemoveAll(filepath.Join(app.Conf.StorageDir, "tmp", uid)); err != nil {
app.Logger.Error("Failed to remove chunk dir", "err", err)
}
}
func (app *App) mergeChunks(uid string, total int) (string, error) {
tmpPath := filepath.Join(app.Conf.StorageDir, "tmp", "m_"+uid)
merged, err := os.Create(tmpPath)
if err != nil {
return "", fmt.Errorf("create merge file: %w", err)
}
defer func() {
if closeErr := merged.Close(); closeErr != nil {
app.Logger.Error("Failed to close merged file", "err", closeErr)
}
}()
limit := app.Conf.MaxMB * megaByte
var written int64
for i := range total {
partPath := filepath.Join(app.Conf.StorageDir, "tmp", uid, strconv.Itoa(i))
part, err := os.Open(partPath)
if err != nil {
return "", fmt.Errorf("open chunk %d: %w", i, err)
}
n, err := io.Copy(merged, part)
if closeErr := part.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk part", "err", closeErr)
}
if err != nil {
return "", fmt.Errorf("append chunk %d: %w", i, err)
}
written += n
if written > limit {
return "", io.ErrShortWrite
}
}
return tmpPath, nil
}
func (app *App) HandleGetFile(writer http.ResponseWriter, request *http.Request) {
slug := request.PathValue("slug")
if len(slug) < slugLength {
app.SendError(writer, request, http.StatusBadRequest)
return
}
keyBase64 := slug[:slugLength]
ext := slug[slugLength:]
key, err := base64.RawURLEncoding.DecodeString(keyBase64)
if err != nil || len(key) != keyLength {
app.SendError(writer, request, http.StatusUnauthorized)
return
}
id := crypto.GetID(key, ext)
path := filepath.Join(app.Conf.StorageDir, id)
info, err := os.Stat(path)
if err != nil {
app.SendError(writer, request, http.StatusNotFound)
return
}
file, err := os.Open(path)
if err != nil {
app.Logger.Error("Failed to open file", "path", path, "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close file", "err", closeErr)
}
}()
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
app.Logger.Error("Failed to create crypto streamer", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
decryptor := crypto.NewDecryptor(file, streamer.AEAD, info.Size())
contentType := mime.TypeByExtension(ext)
if contentType == "" {
contentType = "application/octet-stream"
}
csp := "default-src 'none'; img-src 'self' data:; media-src 'self' data:; " +
"style-src 'unsafe-inline'; sandbox allow-forms allow-scripts allow-downloads allow-same-origin"
writer.Header().Set("Content-Type", contentType)
writer.Header().Set("Content-Security-Policy", csp)
writer.Header().Set("X-Content-Type-Options", "nosniff")
writer.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=%q", slug))
http.ServeContent(writer, request, slug, info.ModTime(), decryptor)
}
func (app *App) FinalizeFile(writer http.ResponseWriter, request *http.Request, src *os.File, filename string) {
if _, err := src.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
key, err := crypto.DeriveKey(src)
if err != nil {
app.Logger.Error("Key derivation failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
ext := filepath.Ext(filename)
id := crypto.GetID(key, ext)
finalPath := filepath.Join(app.Conf.StorageDir, id)
if _, err := os.Stat(finalPath); err == nil {
app.RespondWithLink(writer, request, key, filename)
return
}
if _, err := src.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := app.encryptAndSave(src, key, finalPath); err != nil {
app.Logger.Error("Encryption failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
app.RespondWithLink(writer, request, key, filename)
}
func (app *App) encryptAndSave(src io.Reader, key []byte, finalPath string) error {
out, err := os.Create(finalPath + ".tmp")
if err != nil {
return fmt.Errorf("create final file: %w", err)
}
var closed bool
defer func() {
if !closed {
if closeErr := out.Close(); closeErr != nil {
app.Logger.Error("Failed to close final file", "err", closeErr)
}
}
if removeErr := os.Remove(finalPath + ".tmp"); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove temp final file", "err", removeErr)
}
}()
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
return fmt.Errorf("create streamer: %w", err)
}
if err := streamer.EncryptStream(out, src); err != nil {
return fmt.Errorf("encrypt stream: %w", err)
}
if err := out.Close(); err != nil {
return fmt.Errorf("close final file: %w", err)
}
closed = true
if err := os.Rename(finalPath+".tmp", finalPath); err != nil {
return fmt.Errorf("rename final file: %w", err)
}
return nil
}
+52
View File
@@ -0,0 +1,52 @@
package app
import (
"testing"
"time"
)
func TestCalculateRetention(t *testing.T) {
maxMB := int64(100)
tests := []struct {
name string
fileSize int64
wantMin time.Duration
wantMax time.Duration
}{
{
name: "Tiny file (Max retention)",
fileSize: 1024,
wantMin: MaxRetention - time.Hour,
wantMax: MaxRetention,
},
{
name: "Max size file (Min retention)",
fileSize: 100 * MegaByte,
wantMin: MinRetention,
wantMax: MinRetention + time.Minute,
},
{
name: "Half size file (Somewhere in between)",
fileSize: 50 * MegaByte,
wantMin: 24 * time.Hour,
wantMax: MaxRetention,
},
{
name: "Oversized file (Min retention)",
fileSize: 200 * MegaByte,
wantMin: MinRetention,
wantMax: MinRetention + time.Minute,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := CalculateRetention(tc.fileSize, maxMB)
if got < tc.wantMin || got > tc.wantMax {
t.Errorf("Retention for size %d: got %v, want between %v and %v",
tc.fileSize, got, tc.wantMin, tc.wantMax)
}
})
}
}
+35 -8
View File
@@ -5,13 +5,29 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"path/filepath" "path/filepath"
"strings"
) )
func (app *App) Routes() *http.ServeMux { func (app *App) Routes() *http.ServeMux {
mux := http.NewServeMux() mux := http.NewServeMux()
fileServer := http.FileServer(http.Dir("./web/static"))
mux.Handle("GET /static/", http.StripPrefix("/static/", fileServer)) fileServer := http.FileServer(http.FS(app.Assets))
staticHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" || strings.HasSuffix(r.URL.Path, "/") {
http.NotFound(w, r)
return
}
if strings.HasSuffix(r.URL.Path, ".html") {
http.NotFound(w, r)
return
}
fileServer.ServeHTTP(w, r)
})
mux.Handle("GET /static/", http.StripPrefix("/static/", staticHandler))
mux.HandleFunc("GET /{$}", app.HandleHome) mux.HandleFunc("GET /{$}", app.HandleHome)
mux.HandleFunc("POST /{$}", app.HandleUpload) mux.HandleFunc("POST /{$}", app.HandleUpload)
mux.HandleFunc("POST /upload/chunk", app.HandleChunk) mux.HandleFunc("POST /upload/chunk", app.HandleChunk)
@@ -21,6 +37,17 @@ func (app *App) Routes() *http.ServeMux {
return mux return mux
} }
func (app *App) HandleHome(writer http.ResponseWriter, request *http.Request) {
err := app.Tmpl.ExecuteTemplate(writer, "layout", map[string]any{
"MaxMB": app.Conf.MaxMB,
"Host": request.Host,
})
if err != nil {
app.Logger.Error("Template error", "err", err)
}
}
func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Request, key []byte, originalName string) { func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Request, key []byte, originalName string) {
keySlug := base64.RawURLEncoding.EncodeToString(key) keySlug := base64.RawURLEncoding.EncodeToString(key)
ext := filepath.Ext(originalName) ext := filepath.Ext(originalName)
@@ -42,14 +69,15 @@ func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Reques
if _, err := fmt.Fprintf(writer, html, link); err != nil { if _, err := fmt.Fprintf(writer, html, link); err != nil {
app.Logger.Error("Failed to write response", "err", err) app.Logger.Error("Failed to write response", "err", err)
} }
return return
} }
scheme := "https" scheme := request.Header.Get("X-Forwarded-Proto")
if scheme == "" {
if request.TLS == nil { scheme = "https"
scheme = "http" if request.TLS == nil {
scheme = "http"
}
} }
if _, err := fmt.Fprintf(writer, "%s://%s\n", scheme, link); err != nil { if _, err := fmt.Fprintf(writer, "%s://%s\n", scheme, link); err != nil {
@@ -72,7 +100,6 @@ func (app *App) SendError(writer http.ResponseWriter, request *http.Request, cod
if _, err := fmt.Fprintf(writer, html, code); err != nil { if _, err := fmt.Fprintf(writer, html, code); err != nil {
app.Logger.Error("Failed to write error response", "err", err) app.Logger.Error("Failed to write error response", "err", err)
} }
return return
} }
+339
View File
@@ -0,0 +1,339 @@
package app
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"log/slog"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/skidoodle/safebin/internal/crypto"
)
func setupTestApp(t *testing.T) (*App, string) {
storageDir := t.TempDir()
if err := os.MkdirAll(filepath.Join(storageDir, TempDirName), 0700); err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
webDir := filepath.Join(storageDir, "web")
if err := os.MkdirAll(webDir, 0700); err != nil {
t.Fatalf("Failed to create web dir: %v", err)
}
if err := os.WriteFile(filepath.Join(webDir, "layout.html"), []byte(`{{define "layout"}}{{template "content" .}}{{end}}`), 0600); err != nil {
t.Fatalf("Failed to write layout.html: %v", err)
}
if err := os.WriteFile(filepath.Join(webDir, "home.html"), []byte(`{{define "content"}}OK{{end}}`), 0600); err != nil {
t.Fatalf("Failed to write home.html: %v", err)
}
testFS := os.DirFS(webDir)
tmpl := ParseTemplates(testFS)
db, err := InitDB(storageDir)
if err != nil {
t.Fatalf("Failed to init db: %v", err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
})
app := &App{
Conf: Config{
StorageDir: storageDir,
MaxMB: 10,
},
Logger: discardLogger(),
Tmpl: tmpl,
Assets: testFS,
DB: db,
}
return app, storageDir
}
func discardLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func TestIntegration_StandardUploadAndDownload(t *testing.T) {
app, _ := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "test.txt")
if err != nil {
t.Fatalf("CreateFormFile failed: %v", err)
}
content := []byte("Hello Safebin")
if _, err := part.Write(content); err != nil {
t.Fatalf("Write part failed: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("Writer close failed: %v", err)
}
req, _ := http.NewRequest("POST", server.URL+"/", body)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Upload request failed: %v", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close response body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Upload failed status: %d", resp.StatusCode)
}
respBytes, _ := io.ReadAll(resp.Body)
respStr := string(respBytes)
parts := strings.Split(strings.TrimSpace(respStr), "/")
slugWithExt := parts[len(parts)-1]
downloadURL := fmt.Sprintf("%s/%s", server.URL, slugWithExt)
resp, err = http.Get(downloadURL)
if err != nil {
t.Fatalf("Download request failed: %v", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close download response body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Download failed status: %d", resp.StatusCode)
}
downloadedContent, _ := io.ReadAll(resp.Body)
if !bytes.Equal(content, downloadedContent) {
t.Errorf("Content mismatch. Want %s, got %s", content, downloadedContent)
}
}
func TestIntegration_ChunkedUpload(t *testing.T) {
app, _ := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
uploadID := "testchunkid123"
content := []byte("Chunk1Content-Chunk2Content")
chunk1 := content[:13]
chunk2 := content[13:]
uploadChunk(t, server.URL, uploadID, 0, chunk1)
uploadChunk(t, server.URL, uploadID, 1, chunk2)
finishURL := fmt.Sprintf("%s/upload/finish", server.URL)
form := map[string]string{
"upload_id": uploadID,
"total": "2",
"filename": "chunked.txt",
}
resp := postForm(t, finishURL, form)
defer func() {
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close finish response body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Finish failed: %d", resp.StatusCode)
}
respBytes, _ := io.ReadAll(resp.Body)
respStr := string(respBytes)
parts := strings.Split(strings.TrimSpace(respStr), "/")
slugWithExt := parts[len(parts)-1]
downloadURL := fmt.Sprintf("%s/%s", server.URL, slugWithExt)
dlResp, err := http.Get(downloadURL)
if err != nil {
t.Fatalf("Download request failed: %v", err)
}
dlBytes, _ := io.ReadAll(dlResp.Body)
if err := dlResp.Body.Close(); err != nil {
t.Errorf("Failed to close download response body: %v", err)
}
if !bytes.Equal(content, dlBytes) {
t.Errorf("Chunked reassembly failed. Want %s, got %s", content, dlBytes)
}
}
func TestIntegration_ChunkedUpload_VerifyEncryption(t *testing.T) {
app, storageDir := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
uploadID := "securechunk123"
plaintext := []byte("This is a secret message that should be encrypted")
uploadChunk(t, server.URL, uploadID, 0, plaintext)
chunkPath := filepath.Join(storageDir, TempDirName, uploadID, "0")
encryptedData, err := os.ReadFile(chunkPath)
if err != nil {
t.Fatalf("Failed to read chunk file: %v", err)
}
if bytes.Contains(encryptedData, plaintext) {
t.Fatal("Chunk file contains plaintext data!")
}
if len(encryptedData) <= crypto.KeySize {
t.Fatalf("Chunk file too small: %d bytes", len(encryptedData))
}
key := encryptedData[:crypto.KeySize]
ciphertext := encryptedData[crypto.KeySize:]
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
t.Fatalf("Failed to create streamer: %v", err)
}
r := bytes.NewReader(ciphertext)
d := crypto.NewDecryptor(r, streamer.AEAD, int64(len(ciphertext)))
decrypted, err := io.ReadAll(d)
if err != nil {
t.Fatalf("Failed to decrypt chunk: %v", err)
}
if !bytes.Equal(decrypted, plaintext) {
t.Errorf("Decrypted data mismatch.\nWant: %s\nGot: %s", plaintext, decrypted)
}
}
func TestIntegration_Upload_VerifyEncryption(t *testing.T) {
app, storageDir := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
plaintext := []byte("Sensitive Data For Full Upload")
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "secret.txt")
if err != nil {
t.Fatalf("CreateFormFile failed: %v", err)
}
if _, err := part.Write(plaintext); err != nil {
t.Fatalf("Write failed: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("Writer close failed: %v", err)
}
req, _ := http.NewRequest("POST", server.URL+"/", body)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close response body: %v", err)
}
}()
respBytes, _ := io.ReadAll(resp.Body)
slug := filepath.Base(strings.TrimSpace(string(respBytes)))
if len(slug) < SlugLength {
t.Fatalf("Invalid slug: %s", slug)
}
keyBase64 := slug[:SlugLength]
key, _ := base64.RawURLEncoding.DecodeString(keyBase64)
ext := filepath.Ext("secret.txt")
id := crypto.GetID(key, ext)
finalPath := filepath.Join(storageDir, id)
finalData, err := os.ReadFile(finalPath)
if err != nil {
t.Fatalf("Failed to read final file: %v", err)
}
if bytes.Contains(finalData, plaintext) {
t.Fatal("Final file contains plaintext!")
}
streamer, _ := crypto.NewGCMStreamer(key)
d := crypto.NewDecryptor(bytes.NewReader(finalData), streamer.AEAD, int64(len(finalData)))
decrypted, _ := io.ReadAll(d)
if !bytes.Equal(decrypted, plaintext) {
t.Error("Final file decryption failed")
}
}
func uploadChunk(t *testing.T, baseURL, uid string, idx int, data []byte) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
if err := writer.WriteField("upload_id", uid); err != nil {
t.Fatalf("WriteField upload_id failed: %v", err)
}
if err := writer.WriteField("index", fmt.Sprintf("%d", idx)); err != nil {
t.Fatalf("WriteField index failed: %v", err)
}
part, err := writer.CreateFormFile("chunk", "blob")
if err != nil {
t.Fatalf("CreateFormFile failed: %v", err)
}
if _, err := part.Write(data); err != nil {
t.Fatalf("Write part failed: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("Writer close failed: %v", err)
}
req, _ := http.NewRequest("POST", baseURL+"/upload/chunk", body)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil || resp.StatusCode != http.StatusOK {
t.Fatalf("Chunk %d upload failed: %v", idx, err)
}
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close chunk response body: %v", err)
}
}
func postForm(t *testing.T, url string, fields map[string]string) *http.Response {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for k, v := range fields {
if err := writer.WriteField(k, v); err != nil {
t.Fatalf("WriteField %s failed: %v", k, err)
}
}
if err := writer.Close(); err != nil {
t.Fatalf("Writer close failed: %v", err)
}
req, _ := http.NewRequest("POST", url, body)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Post form failed: %v", err)
}
return resp
}
+222 -27
View File
@@ -2,22 +2,22 @@ package app
import ( import (
"context" "context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"time" "time"
)
const ( "github.com/skidoodle/safebin/internal/crypto"
cleanupInterval = 1 * time.Hour "go.etcd.io/bbolt"
tempExpiry = 4 * time.Hour
minRetention = 24 * time.Hour
maxRetention = 365 * 24 * time.Hour
bytesInMB = 1 << 20
) )
func (app *App) StartCleanupTask(ctx context.Context) { func (app *App) StartCleanupTask(ctx context.Context) {
ticker := time.NewTicker(cleanupInterval) ticker := time.NewTicker(CleanupInterval)
for { for {
select { select {
@@ -25,32 +25,227 @@ func (app *App) StartCleanupTask(ctx context.Context) {
ticker.Stop() ticker.Stop()
return return
case <-ticker.C: case <-ticker.C:
app.CleanStorage(app.Conf.StorageDir) app.CleanStorage()
app.CleanTemp(filepath.Join(app.Conf.StorageDir, "tmp")) app.CleanTemp(filepath.Join(app.Conf.StorageDir, TempDirName))
} }
} }
} }
func (app *App) CleanStorage(path string) { func (app *App) saveChunk(uid string, idx int, src io.Reader) error {
entries, err := os.ReadDir(path) dir := filepath.Join(app.Conf.StorageDir, TempDirName, uid)
if err := os.MkdirAll(dir, PermUserRWX); err != nil {
return fmt.Errorf("create chunk dir: %w", err)
}
dest, err := os.Create(filepath.Join(dir, strconv.Itoa(idx)))
if err != nil { if err != nil {
app.Logger.Error("Failed to read storage dir", "err", err) return fmt.Errorf("create chunk file: %w", err)
}
defer func() {
if closeErr := dest.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk dest", "err", closeErr)
}
}()
key := make([]byte, crypto.KeySize)
if _, err := rand.Read(key); err != nil {
return fmt.Errorf("generate chunk key: %w", err)
}
if _, err := dest.Write(key); err != nil {
return fmt.Errorf("write chunk key: %w", err)
}
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
return fmt.Errorf("create streamer: %w", err)
}
if err := streamer.EncryptStream(dest, src); err != nil {
return fmt.Errorf("encrypt chunk: %w", err)
}
return nil
}
func (app *App) getChunkDecryptors(uid string, total int) ([]io.ReadSeeker, func(), error) {
files := make([]*os.File, 0, total)
decryptors := make([]io.ReadSeeker, 0, total)
closeAll := func() {
for _, f := range files {
_ = f.Close()
}
}
for i := range total {
partPath := filepath.Join(app.Conf.StorageDir, TempDirName, uid, strconv.Itoa(i))
f, err := os.Open(partPath)
if err != nil {
closeAll()
return nil, nil, fmt.Errorf("open chunk %d: %w", i, err)
}
files = append(files, f)
key := make([]byte, crypto.KeySize)
if _, err := io.ReadFull(f, key); err != nil {
closeAll()
return nil, nil, fmt.Errorf("read chunk key %d: %w", i, err)
}
info, err := f.Stat()
if err != nil {
closeAll()
return nil, nil, fmt.Errorf("stat chunk %d: %w", i, err)
}
bodySize := info.Size() - int64(crypto.KeySize)
if bodySize < 0 {
closeAll()
return nil, nil, fmt.Errorf("invalid chunk size %d", i)
}
bodyReader := io.NewSectionReader(f, int64(crypto.KeySize), bodySize)
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
closeAll()
return nil, nil, fmt.Errorf("create streamer %d: %w", i, err)
}
decryptor := crypto.NewDecryptor(bodyReader, streamer.AEAD, bodySize)
decryptors = append(decryptors, decryptor)
}
return decryptors, closeAll, nil
}
func (app *App) encryptAndSave(src io.Reader, key []byte, finalPath string) error {
out, err := os.Create(finalPath + ".tmp")
if err != nil {
return fmt.Errorf("create final file: %w", err)
}
var closed bool
defer func() {
if !closed {
if closeErr := out.Close(); closeErr != nil {
app.Logger.Error("Failed to close final file", "err", closeErr)
}
}
if removeErr := os.Remove(finalPath + ".tmp"); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove temp final file", "err", removeErr)
}
}()
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
return fmt.Errorf("create streamer: %w", err)
}
if err := streamer.EncryptStream(out, src); err != nil {
return fmt.Errorf("encrypt stream: %w", err)
}
if err := out.Close(); err != nil {
return fmt.Errorf("close final file: %w", err)
}
closed = true
if err := os.Rename(finalPath+".tmp", finalPath); err != nil {
return fmt.Errorf("rename final file: %w", err)
}
return nil
}
func (app *App) RegisterFile(id string, size int64) error {
retention := CalculateRetention(size, app.Conf.MaxMB)
meta := FileMeta{
ID: id,
Size: size,
CreatedAt: time.Now(),
ExpiresAt: time.Now().Add(retention),
}
return app.DB.Update(func(tx *bbolt.Tx) error {
bFiles := tx.Bucket([]byte(DBBucketName))
bIndex := tx.Bucket([]byte(DBBucketIndexName))
data, err := json.Marshal(meta)
if err != nil {
return err
}
if err := bFiles.Put([]byte(id), data); err != nil {
return err
}
indexKey := []byte(meta.ExpiresAt.Format(time.RFC3339) + "_" + id)
return bIndex.Put(indexKey, []byte(id))
})
}
func (app *App) CleanStorage() {
now := time.Now().Format(time.RFC3339)
var toDeleteIDs []string
var toDeleteKeys []string
err := app.DB.View(func(tx *bbolt.Tx) error {
bIndex := tx.Bucket([]byte(DBBucketIndexName))
if bIndex == nil {
return nil
}
c := bIndex.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if string(k) > now {
break
}
toDeleteKeys = append(toDeleteKeys, string(k))
toDeleteIDs = append(toDeleteIDs, string(v))
}
return nil
})
if err != nil {
app.Logger.Error("Failed to view DB for cleanup", "err", err)
return return
} }
for _, entry := range entries { if len(toDeleteIDs) == 0 {
info, err := entry.Info() return
if err != nil { }
continue
}
expiry := CalculateRetention(info.Size(), app.Conf.MaxMB) err = app.DB.Update(func(tx *bbolt.Tx) error {
bFiles := tx.Bucket([]byte(DBBucketName))
bIndex := tx.Bucket([]byte(DBBucketIndexName))
if time.Since(info.ModTime()) > expiry { for i, id := range toDeleteIDs {
if err := os.RemoveAll(filepath.Join(path, entry.Name())); err != nil { path := filepath.Join(app.Conf.StorageDir, id)
app.Logger.Error("Failed to remove expired file", "path", entry.Name(), "err", err) if err := os.RemoveAll(path); err != nil {
app.Logger.Error("Failed to remove expired file", "path", id, "err", err)
}
if err := bFiles.Delete([]byte(id)); err != nil {
app.Logger.Error("Failed to delete metadata", "id", id, "err", err)
}
if err := bIndex.Delete([]byte(toDeleteKeys[i])); err != nil {
app.Logger.Error("Failed to delete index", "key", toDeleteKeys[i], "err", err)
} }
} }
return nil
})
if err != nil {
app.Logger.Error("Failed to update DB during cleanup", "err", err)
} }
} }
@@ -67,7 +262,7 @@ func (app *App) CleanTemp(path string) {
continue continue
} }
if time.Since(info.ModTime()) > tempExpiry { if time.Since(info.ModTime()) > TempExpiry {
if err := os.RemoveAll(filepath.Join(path, entry.Name())); err != nil { if err := os.RemoveAll(filepath.Join(path, entry.Name())); err != nil {
app.Logger.Error("Failed to remove expired temp file", "path", entry.Name(), "err", err) app.Logger.Error("Failed to remove expired temp file", "path", entry.Name(), "err", err)
} }
@@ -76,13 +271,13 @@ func (app *App) CleanTemp(path string) {
} }
func CalculateRetention(fileSize, maxMB int64) time.Duration { func CalculateRetention(fileSize, maxMB int64) time.Duration {
ratio := math.Max(0, math.Min(1, float64(fileSize)/float64(maxMB*bytesInMB))) ratio := math.Max(0, math.Min(1, float64(fileSize)/float64(maxMB*MegaByte)))
invRatio := 1.0 - ratio invRatio := 1.0 - ratio
retention := float64(maxRetention) * (invRatio * invRatio * invRatio) retention := float64(MaxRetention) * (invRatio * invRatio * invRatio)
if retention < float64(minRetention) { if retention < float64(MinRetention) {
return minRetention return MinRetention
} }
return time.Duration(retention) return time.Duration(retention)
+218
View File
@@ -0,0 +1,218 @@
package app
import (
"bytes"
"crypto/rand"
"encoding/json"
"io"
"os"
"path/filepath"
"testing"
"time"
"github.com/skidoodle/safebin/internal/crypto"
"go.etcd.io/bbolt"
)
func TestCleanup_AbandonedChunks(t *testing.T) {
tmpDir := t.TempDir()
tmpStorage := filepath.Join(tmpDir, TempDirName)
if err := os.MkdirAll(tmpStorage, 0700); err != nil {
t.Fatalf("MkdirAll failed: %v", err)
}
db, err := InitDB(tmpDir)
if err != nil {
t.Fatalf("InitDB failed: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
}()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
DB: db,
}
chunkDir := filepath.Join(tmpStorage, "some_upload_id")
if err := os.MkdirAll(chunkDir, 0700); err != nil {
t.Fatalf("MkdirAll chunkDir failed: %v", err)
}
if err := os.WriteFile(filepath.Join(chunkDir, "0"), []byte("chunk data"), 0600); err != nil {
t.Fatalf("WriteFile chunk failed: %v", err)
}
oldTime := time.Now().Add(-TempExpiry - time.Hour)
if err := os.Chtimes(chunkDir, oldTime, oldTime); err != nil {
t.Fatalf("Chtimes failed: %v", err)
}
app.CleanTemp(tmpStorage)
if _, err := os.Stat(chunkDir); !os.IsNotExist(err) {
t.Error("Cleanup failed to remove abandoned chunk directory")
}
}
func TestCleanup_ExpiredStorage(t *testing.T) {
storageDir := t.TempDir()
db, err := InitDB(storageDir)
if err != nil {
t.Fatalf("InitDB failed: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
}()
app := &App{
Conf: Config{
StorageDir: storageDir,
MaxMB: 100,
},
Logger: discardLogger(),
DB: db,
}
filename := "large_file_id"
path := filepath.Join(storageDir, filename)
f, err := os.Create(path)
if err != nil {
t.Fatalf("Create file failed: %v", err)
}
if err := f.Truncate(100 * MegaByte); err != nil {
t.Fatalf("Truncate failed: %v", err)
}
if err := f.Close(); err != nil {
t.Fatalf("Close file failed: %v", err)
}
expiredMeta := FileMeta{
ID: filename,
Size: 100 * MegaByte,
CreatedAt: time.Now().Add(-MinRetention - 2*time.Hour),
ExpiresAt: time.Now().Add(-time.Hour),
}
if err := app.DB.Update(func(tx *bbolt.Tx) error {
bFiles := tx.Bucket([]byte(DBBucketName))
bIndex := tx.Bucket([]byte(DBBucketIndexName))
data, _ := json.Marshal(expiredMeta)
if err := bFiles.Put([]byte(filename), data); err != nil {
return err
}
indexKey := []byte(expiredMeta.ExpiresAt.Format(time.RFC3339) + "_" + filename)
return bIndex.Put(indexKey, []byte(filename))
}); err != nil {
t.Fatalf("DB Update failed: %v", err)
}
app.CleanStorage()
if _, err := os.Stat(path); !os.IsNotExist(err) {
t.Error("Cleanup failed to remove expired large file")
}
if err := app.DB.View(func(tx *bbolt.Tx) error {
bFiles := tx.Bucket([]byte(DBBucketName))
if v := bFiles.Get([]byte(filename)); v != nil {
t.Error("Cleanup failed to remove metadata")
}
bIndex := tx.Bucket([]byte(DBBucketIndexName))
indexKey := []byte(expiredMeta.ExpiresAt.Format(time.RFC3339) + "_" + filename)
if v := bIndex.Get(indexKey); v != nil {
t.Error("Cleanup failed to remove index entry")
}
return nil
}); err != nil {
t.Fatalf("DB View failed: %v", err)
}
}
func TestSaveChunk_EncryptsData(t *testing.T) {
tmpDir := t.TempDir()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
}
uid := "test-encrypt-chunk"
plaintext := make([]byte, 1024)
if _, err := rand.Read(plaintext); err != nil {
t.Fatal(err)
}
if err := app.saveChunk(uid, 0, bytes.NewReader(plaintext)); err != nil {
t.Fatalf("saveChunk failed: %v", err)
}
path := filepath.Join(tmpDir, TempDirName, uid, "0")
fileData, err := os.ReadFile(path)
if err != nil {
t.Fatalf("ReadFile failed: %v", err)
}
if bytes.Equal(fileData, plaintext) {
t.Fatal("Chunk stored as plaintext!")
}
if bytes.Contains(fileData, plaintext) {
t.Fatal("Chunk contains plaintext!")
}
expectedSize := crypto.KeySize + len(plaintext) + 16
if len(fileData) != expectedSize {
t.Errorf("Unexpected file size. Want %d, got %d", expectedSize, len(fileData))
}
}
func TestGetChunkDecryptors_RestoresData(t *testing.T) {
tmpDir := t.TempDir()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
}
uid := "test-restore"
data1 := []byte("chunk one data")
data2 := []byte("chunk two data")
if err := app.saveChunk(uid, 0, bytes.NewReader(data1)); err != nil {
t.Fatal(err)
}
if err := app.saveChunk(uid, 1, bytes.NewReader(data2)); err != nil {
t.Fatal(err)
}
decryptors, closeFn, err := app.getChunkDecryptors(uid, 2)
if err != nil {
t.Fatalf("getChunkDecryptors failed: %v", err)
}
defer closeFn()
if len(decryptors) != 2 {
t.Fatalf("Expected 2 decryptors, got %d", len(decryptors))
}
buf1, err := io.ReadAll(decryptors[0])
if err != nil {
t.Fatalf("Failed to read decryptor 1: %v", err)
}
if !bytes.Equal(buf1, data1) {
t.Errorf("Chunk 1 mismatch. Want %s, got %s", data1, buf1)
}
buf2, err := io.ReadAll(decryptors[1])
if err != nil {
t.Fatalf("Failed to read decryptor 2: %v", err)
}
if !bytes.Equal(buf2, data2) {
t.Errorf("Chunk 2 mismatch. Want %s, got %s", data2, buf2)
}
}
+230
View File
@@ -0,0 +1,230 @@
package app
import (
"crypto/rand"
"crypto/sha256"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"github.com/skidoodle/safebin/internal/crypto"
)
var reUploadID = regexp.MustCompile(`^[a-zA-Z0-9]{10,50}$`)
func (app *App) HandleUpload(writer http.ResponseWriter, request *http.Request) {
limit := (app.Conf.MaxMB * MegaByte) + MegaByte
request.Body = http.MaxBytesReader(writer, request.Body, limit)
file, header, err := request.FormFile("file")
if err != nil {
if err.Error() == "http: request body too large" {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.SendError(writer, request, http.StatusBadRequest)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close upload file", "err", closeErr)
}
}()
tmp, err := os.CreateTemp(filepath.Join(app.Conf.StorageDir, TempDirName), "up_*")
if err != nil {
app.Logger.Error("Failed to create temp file", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
tmpPath := tmp.Name()
defer func() {
_ = tmp.Close()
if removeErr := os.Remove(tmpPath); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove temp file", "err", removeErr)
}
}()
ephemeralKey := make([]byte, crypto.KeySize)
if _, err := rand.Read(ephemeralKey); err != nil {
app.Logger.Error("Failed to generate ephemeral key", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
pr, pw := io.Pipe()
hasher := sha256.New()
errChan := make(chan error, 1)
go func() {
_, err := io.Copy(io.MultiWriter(hasher, pw), file)
_ = pw.CloseWithError(err)
errChan <- err
}()
defer func() {
if closeErr := pr.Close(); closeErr != nil {
app.Logger.Error("Failed to close pipe reader", "err", closeErr)
}
}()
streamer, err := crypto.NewGCMStreamer(ephemeralKey)
if err != nil {
app.Logger.Error("Failed to create streamer", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := streamer.EncryptStream(tmp, pr); err != nil {
app.Logger.Error("Failed to encrypt stream", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := <-errChan; err != nil {
app.Logger.Error("Failed to read/hash upload", "err", err)
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
convergentKey := hasher.Sum(nil)[:crypto.KeySize]
if _, err := tmp.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
info, _ := tmp.Stat()
decryptor := crypto.NewDecryptor(tmp, streamer.AEAD, info.Size())
app.finalizeUpload(writer, request, decryptor, convergentKey, header.Filename)
}
func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) {
request.Body = http.MaxBytesReader(writer, request.Body, MaxRequestOverhead)
uid := request.FormValue("upload_id")
idx, err := strconv.Atoi(request.FormValue("index"))
if err != nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
maxChunks := int((app.Conf.MaxMB*MegaByte)/MinChunkSize) + ChunkSafetyMargin
if !reUploadID.MatchString(uid) || idx > maxChunks || idx < 0 {
app.SendError(writer, request, http.StatusBadRequest)
return
}
file, _, err := request.FormFile("chunk")
if err != nil {
if err.Error() == "http: request body too large" {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.SendError(writer, request, http.StatusBadRequest)
return
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk file", "err", closeErr)
}
}()
if err := app.saveChunk(uid, idx, file); err != nil {
app.Logger.Error("Failed to save chunk", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
}
}
func (app *App) HandleFinish(writer http.ResponseWriter, request *http.Request) {
uid := request.FormValue("upload_id")
total, err := strconv.Atoi(request.FormValue("total"))
if err != nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
maxChunks := int((app.Conf.MaxMB*MegaByte)/MinChunkSize) + ChunkSafetyMargin
if !reUploadID.MatchString(uid) || total > maxChunks || total <= 0 {
app.SendError(writer, request, http.StatusBadRequest)
return
}
decryptors, closeAll, err := app.getChunkDecryptors(uid, total)
if err != nil {
app.Logger.Error("Failed to open chunks", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
defer func() {
closeAll()
if err := os.RemoveAll(filepath.Join(app.Conf.StorageDir, TempDirName, uid)); err != nil {
app.Logger.Error("Failed to remove chunk dir", "err", err)
}
}()
readers := make([]io.Reader, len(decryptors))
for i, d := range decryptors {
readers[i] = d
}
hasher := sha256.New()
if _, err := io.Copy(hasher, io.MultiReader(readers...)); err != nil {
app.Logger.Error("Failed to hash chunks", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
convergentKey := hasher.Sum(nil)[:crypto.KeySize]
for _, d := range decryptors {
if _, err := d.Seek(0, io.SeekStart); err != nil {
app.Logger.Error("Failed to reset chunk decryptor", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
}
multiSrc := io.MultiReader(readers...)
app.finalizeUpload(writer, request, multiSrc, convergentKey, request.FormValue("filename"))
}
func (app *App) finalizeUpload(writer http.ResponseWriter, request *http.Request, src io.Reader, key []byte, filename string) {
ext := filepath.Ext(filename)
id := crypto.GetID(key, ext)
finalPath := filepath.Join(app.Conf.StorageDir, id)
if info, err := os.Stat(finalPath); err == nil {
if err := app.RegisterFile(id, info.Size()); err != nil {
app.Logger.Error("Failed to update metadata for existing file", "err", err)
}
app.RespondWithLink(writer, request, key, filename)
return
}
if err := app.encryptAndSave(src, key, finalPath); err != nil {
app.Logger.Error("Encryption failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if info, err := os.Stat(finalPath); err == nil {
if err := app.RegisterFile(id, info.Size()); err != nil {
app.Logger.Error("Failed to save metadata", "err", err)
}
} else {
app.Logger.Error("Failed to stat new file", "err", err)
}
app.RespondWithLink(writer, request, key, filename)
}
+152
View File
@@ -0,0 +1,152 @@
package crypto_test
import (
"bytes"
"crypto/rand"
"io"
"testing"
"github.com/skidoodle/safebin/internal/crypto"
)
func TestDeriveKey(t *testing.T) {
data := []byte("some random file content")
reader := bytes.NewReader(data)
key1, err := crypto.DeriveKey(reader)
if err != nil {
t.Fatalf("DeriveKey failed: %v", err)
}
if len(key1) != 16 {
t.Errorf("Expected key length 16, got %d", len(key1))
}
if _, err := reader.Seek(0, 0); err != nil {
t.Fatalf("Seek failed: %v", err)
}
key2, err := crypto.DeriveKey(reader)
if err != nil {
t.Fatalf("DeriveKey failed second time: %v", err)
}
if !bytes.Equal(key1, key2) {
t.Error("DeriveKey is not deterministic")
}
}
func TestGetID(t *testing.T) {
key := make([]byte, 16)
ext := ".txt"
id1 := crypto.GetID(key, ext)
id2 := crypto.GetID(key, ext)
if id1 != id2 {
t.Error("GetID is not deterministic")
}
if len(id1) == 0 {
t.Error("GetID returned empty string")
}
}
func TestEncryptDecryptStream(t *testing.T) {
payloadSize := (64 * 1024) * 3
payload := make([]byte, payloadSize)
if _, err := rand.Read(payload); err != nil {
t.Fatalf("rand.Read payload failed: %v", err)
}
key := make([]byte, 16)
if _, err := rand.Read(key); err != nil {
t.Fatalf("rand.Read key failed: %v", err)
}
var encryptedBuf bytes.Buffer
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
t.Fatalf("Failed to create streamer: %v", err)
}
if err := streamer.EncryptStream(&encryptedBuf, bytes.NewReader(payload)); err != nil {
t.Fatalf("EncryptStream failed: %v", err)
}
encryptedReader := bytes.NewReader(encryptedBuf.Bytes())
decryptor := crypto.NewDecryptor(encryptedReader, streamer.AEAD, int64(encryptedBuf.Len()))
decrypted := make([]byte, payloadSize)
n, err := io.ReadFull(decryptor, decrypted)
if err != nil {
t.Fatalf("ReadFull failed: %v", err)
}
if n != payloadSize {
t.Errorf("Expected %d bytes, got %d", payloadSize, n)
}
if !bytes.Equal(payload, decrypted) {
t.Error("Decrypted content does not match original payload")
}
}
func TestDecryptorSeeking(t *testing.T) {
chunkSize := 64 * 1024
payload := make([]byte, chunkSize*4)
for i := range len(payload) {
payload[i] = byte(i % 255)
}
key := make([]byte, 16)
if _, err := rand.Read(key); err != nil {
t.Fatalf("rand.Read key failed: %v", err)
}
var encryptedBuf bytes.Buffer
streamer, _ := crypto.NewGCMStreamer(key)
if err := streamer.EncryptStream(&encryptedBuf, bytes.NewReader(payload)); err != nil {
t.Fatalf("EncryptStream failed: %v", err)
}
r := bytes.NewReader(encryptedBuf.Bytes())
d := crypto.NewDecryptor(r, streamer.AEAD, int64(encryptedBuf.Len()))
tests := []struct {
name string
offset int64
whence int
read int
}{
{"Start of file", 0, io.SeekStart, 100},
{"Middle of chunk 1", 1000, io.SeekStart, 100},
{"Start of chunk 2", int64(chunkSize), io.SeekStart, 100},
{"Middle of chunk 2", int64(chunkSize) + 50, io.SeekStart, 100},
{"Near end", int64(len(payload)) - 10, io.SeekStart, 10},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
pos, err := d.Seek(tc.offset, tc.whence)
if err != nil {
t.Fatalf("Seek failed: %v", err)
}
if pos != tc.offset {
t.Errorf("Expected pos %d, got %d", tc.offset, pos)
}
buf := make([]byte, tc.read)
n, err := io.ReadFull(d, buf)
if err != nil {
t.Fatalf("Read failed: %v", err)
}
if n != tc.read {
t.Errorf("Expected %d bytes, got %d", tc.read, n)
}
expected := payload[tc.offset : tc.offset+int64(tc.read)]
if !bytes.Equal(buf, expected) {
t.Errorf("Data mismatch at offset %d", tc.offset)
}
})
}
}
+13 -3
View File
@@ -16,6 +16,7 @@ type Decryptor struct {
aead cipher.AEAD aead cipher.AEAD
size int64 size int64
offset int64 offset int64
phyOffset int64
} }
func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int64) *Decryptor { func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int64) *Decryptor {
@@ -35,6 +36,7 @@ func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int6
aead: aead, aead: aead,
size: plainSize, size: plainSize,
offset: 0, offset: 0,
phyOffset: -1,
} }
} }
@@ -49,14 +51,22 @@ func (d *Decryptor) Read(buf []byte) (int, error) {
overhead := int64(d.aead.Overhead()) overhead := int64(d.aead.Overhead())
actualChunkSize := int64(GCMChunkSize) + overhead actualChunkSize := int64(GCMChunkSize) + overhead
_, err := d.readSeeker.Seek(chunkIdx*actualChunkSize, io.SeekStart) targetOffset := chunkIdx * actualChunkSize
if err != nil {
return 0, fmt.Errorf("failed to seek: %w", err) if d.phyOffset != targetOffset {
if _, err := d.readSeeker.Seek(targetOffset, io.SeekStart); err != nil {
return 0, fmt.Errorf("failed to seek: %w", err)
}
d.phyOffset = targetOffset
} }
encrypted := make([]byte, actualChunkSize) encrypted := make([]byte, actualChunkSize)
bytesRead, err := io.ReadFull(d.readSeeker, encrypted) bytesRead, err := io.ReadFull(d.readSeeker, encrypted)
if bytesRead > 0 {
d.phyOffset += int64(bytesRead)
}
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
return 0, fmt.Errorf("failed to read encrypted data: %w", err) return 0, fmt.Errorf("failed to read encrypted data: %w", err)
} }
+21 -14
View File
@@ -10,15 +10,9 @@ import (
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"syscall" "syscall"
"time"
"github.com/skidoodle/safebin/internal/app" "github.com/skidoodle/safebin/internal/app"
) "github.com/skidoodle/safebin/web"
const (
permUserRWX = 0o700
serverTimeout = 10 * time.Minute
shutdownTimeout = 10 * time.Second
) )
func main() { func main() {
@@ -33,16 +27,29 @@ func main() {
"max_file_size", fmt.Sprintf("%dMB", cfg.MaxMB), "max_file_size", fmt.Sprintf("%dMB", cfg.MaxMB),
) )
tmpDir := filepath.Join(cfg.StorageDir, "tmp") tmpDir := filepath.Join(cfg.StorageDir, app.TempDirName)
if err := os.MkdirAll(tmpDir, permUserRWX); err != nil { if err := os.MkdirAll(tmpDir, app.PermUserRWX); err != nil {
logger.Error("Failed to initialize storage directory", "err", err) logger.Error("Failed to initialize storage directory", "err", err)
os.Exit(1) os.Exit(1)
} }
db, err := app.InitDB(cfg.StorageDir)
if err != nil {
logger.Error("Failed to initialize database", "err", err)
os.Exit(1)
}
defer func() {
if err := db.Close(); err != nil {
logger.Error("Failed to close database", "err", err)
}
}()
application := &app.App{ application := &app.App{
Conf: cfg, Conf: cfg,
Logger: logger, Logger: logger,
Tmpl: app.ParseTemplates(), Tmpl: app.ParseTemplates(web.Assets),
Assets: web.Assets,
DB: db,
} }
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
@@ -53,9 +60,9 @@ func main() {
srv := &http.Server{ srv := &http.Server{
Addr: cfg.Addr, Addr: cfg.Addr,
Handler: application.Routes(), Handler: application.Routes(),
ReadTimeout: serverTimeout, ReadTimeout: app.ServerTimeout,
WriteTimeout: serverTimeout, WriteTimeout: app.ServerTimeout,
IdleTimeout: serverTimeout, IdleTimeout: app.ServerTimeout,
} }
go func() { go func() {
@@ -70,7 +77,7 @@ func main() {
<-ctx.Done() <-ctx.Done()
application.Logger.Info("Shutting down gracefully...") application.Logger.Info("Shutting down gracefully...")
shutdownCtx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) shutdownCtx, cancel := context.WithTimeout(context.Background(), app.ShutdownTimeout)
defer cancel() defer cancel()
if err := srv.Shutdown(shutdownCtx); err != nil { if err := srv.Shutdown(shutdownCtx); err != nil {
+1 -1
View File
@@ -50,7 +50,7 @@ async function handleUpload(file) {
$("busy-state").classList.remove("hidden"); $("busy-state").classList.remove("hidden");
$("p-bar-container").classList.add("visible"); $("p-bar-container").classList.add("visible");
const uploadID = Math.random().toString(36).substring(2, 15); const uploadID = Array.from(window.crypto.getRandomValues(new Uint8Array(16)), (b) => b.toString(16).padStart(2, "0")).join("");
const chunkSize = 1024 * 1024 * 8; const chunkSize = 1024 * 1024 * 8;
const total = Math.ceil(file.size / chunkSize); const total = Math.ceil(file.size / chunkSize);
+6
View File
@@ -0,0 +1,6 @@
package web
import "embed"
//go:embed *.html *.css *.js *.ico
var Assets embed.FS
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

+4 -3
View File
@@ -1,11 +1,12 @@
{{define "base"}} {{define "layout"}}
<!doctype html> <!doctype html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="UTF-8" /> <meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="icon" type="image/vnd.microsoft.icon" href="/static/favicon.ico" />
<title>safebin</title> <title>safebin</title>
<link rel="stylesheet" href="/static/css/style.css" /> <link rel="stylesheet" href="/static/style.css" />
</head> </head>
<body> <body>
<div class="container"> <div class="container">
@@ -30,7 +31,7 @@
</section> </section>
</div> </div>
<input type="file" id="file-input" class="hidden" /> <input type="file" id="file-input" class="hidden" />
<script src="/static/js/app.js"></script> <script src="/static/app.js"></script>
</body> </body>
</html> </html>
{{end}} {{end}}