13 Commits

Author SHA1 Message Date
x 180f32902b fix: patch flaws and refactor routes
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-22 05:55:24 +01:00
x 89b4d3f4e6 chore: use scratch
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-22 04:37:33 +01:00
x 577c4b67f6 feat: implement sequential chunk reading and decryption
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-22 04:37:20 +01:00
x 5c13d24736 chore: update deps
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-22 04:10:46 +01:00
x 297db0effa chore: update readme
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-22 04:10:35 +01:00
x f0336b21b8 feat: show version on website
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-19 01:31:28 +01:00
x 2bcf339408 refactor: db location
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-19 00:44:03 +01:00
x 2df37e9002 fix: relax chunk limits, support proxies, optimize reads
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-19 00:33:09 +01:00
x 722dbaa6aa feat: implement encrypted chunked storage and convergent encryption
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 23:39:53 +01:00
x 2d6a3ab216 fix(web): use web crypto for upload id's
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 22:30:20 +01:00
x d18ef48bd4 perf(storage)!: optimize cleanup with secondary index
BREAKING CHANGE: This change requires a fresh database. Existing
databases will lack the index, and the cleanup routine will not function
correctly for pre-existing files.

Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 22:10:07 +01:00
x e18be18029 fix(download): enforce integrity check using db metadata
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 21:54:08 +01:00
x a69e5a52a3 perf: implement zero-copy merge for chunked uploads
Signed-off-by: skidoodle <contact@albert.lol>
2026-01-18 21:45:41 +01:00
20 changed files with 726 additions and 301 deletions
-1
View File
@@ -1,3 +1,2 @@
storage/* storage/*
# Added by goreleaser init:
dist/ dist/
+5 -1
View File
@@ -4,6 +4,9 @@ before:
hooks: hooks:
- go mod tidy - go mod tidy
snapshot:
version_template: "{{ .Version }}"
builds: builds:
- env: - env:
- CGO_ENABLED=0 - CGO_ENABLED=0
@@ -13,7 +16,8 @@ builds:
- amd64 - amd64
- arm64 - arm64
ldflags: ldflags:
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} - -s -w
- -X github.com/skidoodle/safebin/internal/app.Version={{.Version}}
flags: flags:
- -trimpath - -trimpath
+22 -21
View File
@@ -1,6 +1,5 @@
FROM --platform=$BUILDPLATFORM golang:1.25.6 AS builder FROM --platform=$BUILDPLATFORM golang:1.25.6-alpine AS builder
WORKDIR /src
WORKDIR /app
COPY go.mod go.sum ./ COPY go.mod go.sum ./
RUN go mod download RUN go mod download
@@ -9,33 +8,35 @@ COPY . .
ARG TARGETOS ARG TARGETOS
ARG TARGETARCH ARG TARGETARCH
ARG VERSION=dev
RUN --mount=type=cache,target=/root/.cache/go-build \ RUN --mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build \ CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
-ldflags="-s -w" \ -ldflags="-s -w -X github.com/skidoodle/safebin/internal/app.Version=$VERSION" \
-trimpath \ -trimpath \
-o /app/safebin . -o /bin/safebin .
FROM debian:trixie-slim FROM alpine:latest AS sys-context
RUN apk add --no-cache ca-certificates mailcap
RUN echo "appuser:x:10001:10001:appuser:/:/sbin/nologin" > /etc/passwd_app \
&& echo "appuser:x:10001:appuser" > /etc/group_app
RUN mkdir -p /app/storage
LABEL org.opencontainers.image.source="https://github.com/skidoodle/safebin" FROM scratch
LABEL org.opencontainers.image.description="Minimalist, self-hosted file storage with Zero-Knowledge at Rest encryption." COPY --from=sys-context /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
LABEL org.opencontainers.image.licenses="GPL-2.0-only" COPY --from=sys-context /etc/mime.types /etc/mime.types
COPY --from=sys-context /etc/passwd_app /etc/passwd
COPY --from=sys-context /etc/group_app /etc/group
COPY --from=builder /bin/safebin /app/safebin
COPY --from=sys-context --chown=10001:10001 /app/storage /app/storage
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
media-types \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -m -u 10001 -s /bin/bash appuser
WORKDIR /app WORKDIR /app
COPY --from=builder /app/safebin .
RUN mkdir -p /app/storage && chown 10001:10001 /app/storage
VOLUME ["/app/storage"]
USER 10001 USER 10001
VOLUME ["/app/storage"]
EXPOSE 8080 EXPOSE 8080
ENV SAFEBIN_HOST=0.0.0.0 \
SAFEBIN_PORT=8080 \
SAFEBIN_STORAGE=/app/storage
ENTRYPOINT ["/app/safebin"] ENTRYPOINT ["/app/safebin"]
+18 -12
View File
@@ -1,19 +1,25 @@
FROM debian:trixie-slim FROM alpine:latest AS sys-context
RUN apk add --no-cache ca-certificates mailcap
RUN echo "appuser:x:10001:10001:appuser:/:/sbin/nologin" > /etc/passwd_app \
&& echo "appuser:x:10001:appuser" > /etc/group_app
RUN mkdir -p /app/storage
RUN apt-get update && apt-get install -y --no-install-recommends \ FROM scratch
ca-certificates \ COPY --from=sys-context /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
media-types \ COPY --from=sys-context /etc/mime.types /etc/mime.types
&& rm -rf /var/lib/apt/lists/* COPY --from=sys-context /etc/passwd_app /etc/passwd
COPY --from=sys-context /etc/group_app /etc/group
COPY safebin /app/safebin
COPY --from=sys-context --chown=10001:10001 /app/storage /app/storage
RUN useradd -m -u 10001 -s /bin/bash appuser
WORKDIR /app WORKDIR /app
COPY safebin .
RUN mkdir -p /app/storage && chown 10001:10001 /app/storage
VOLUME ["/app/storage"]
USER 10001 USER 10001
VOLUME ["/app/storage"]
EXPOSE 8080 EXPOSE 8080
ENV SAFEBIN_HOST=0.0.0.0 \
SAFEBIN_PORT=8080 \
SAFEBIN_STORAGE=/app/storage
ENTRYPOINT ["/app/safebin"] ENTRYPOINT ["/app/safebin"]
+66 -46
View File
@@ -1,45 +1,36 @@
# safebin # safebin
`safebin` is a minimalist, self-hosted file storage service with **Zero-Knowledge at Rest** encryption. [![Go Version](https://img.shields.io/badge/Go-1.25+-00ADD8?style=flat-square&logo=go)](https://go.dev/)
[![License](https://img.shields.io/badge/License-GPLv2-blue.svg?style=flat-square)](LICENSE)
[![Docker Image](https://img.shields.io/badge/Docker-ghcr.io%2Fskidoodle%2Fsafebin-blue?style=flat-square&logo=docker)](https://github.com/skidoodle/safebin/pkgs/container/safebin)
## Features **safebin** is a minimalist, self-hosted file storage service designed for efficiency and privacy. It utilizes **Convergent Encryption** to provide secure storage at rest while automatically deduplicating identical files to save disk space.
- **End-to-End Encryption**: Files are encrypted using AES-128-GCM before being written to disk. ## 📖 Architecture & Security Model
- **Key-Derived URLs**: The decryption key is part of the URL. The server uses this key to locate and decrypt the file on the fly.
- **Integrity**: Uses GCM (Galois/Counter Mode) to ensure files cannot be tampered with while stored.
- **Storage Deduplication**: Identical files result in the same ID, saving disk space.
- **Chunked Uploads**: Supports large file uploads via the web interface using 8MB chunks.
## Usage Safebin is designed to be **Host-Proof at Rest**. While it is not a client-side E2EE solution, it ensures that the server cannot access stored data without the specific link generated at upload time.
### Web Interface ### How it Works
Simply drag and drop files into the browser. The interface handles chunking and provides a shareable link once the upload is finalized. 1. **Upload**: The server receives the file stream and calculates a SHA-256 hash of the content.
2. **Key Generation**: This hash becomes the encryption key (Convergent Encryption).
3. **Encryption**: The file is encrypted using **AES-128-GCM** and written to disk.
4. **Deduplication**: Because the key is derived from the content, identical files generate the same ID. The server detects this and stores only one physical copy, regardless of how many times it is uploaded.
5. **Zero-Knowledge Storage**: The server saves the file metadata (ID, size, expiry) but **discards the encryption key**.
6. **Link Generation**: The key is encoded into the URL fragment returned to the user.
### Command Line (CLI) > **Security Note**: If the server's database or physical storage is seized, the files are mathematically inaccessible. However, because encryption occurs on the server, the process does have access to the plaintext in memory during the brief window of upload and download.
You can upload files directly using `curl`:
```bash ## ✨ Features
curl -F 'file=@photo.jpg' https://bin.example.com
```
The server will return a direct link: - **Convergent Encryption & Deduplication**: Files are addressed by their content. Uploading the same file twice results in a single storage entry, significantly reducing disk usage.
`https://bin.example.com/0iEZGtW-ikVdu...jpg` - **Tamper-Proof Storage**: Uses Galois/Counter Mode (GCM) to ensure data integrity. Modified files will fail decryption.
- **Volatile Keys**: Decryption keys reside only in the generated URLs, not in the database.
- **Smart Retention**: A cubic scaling algorithm prioritizes keeping small files (snippets, logs) for a long time, while large binaries expire quickly.
- **Chunked Uploads**: Robust handling of large files via the web interface using 8MB chunks.
## Configuration ## 🚀 Deployment
`safebin` can be configured via environment variables or command-line flags: ### Docker Compose (Recommended)
| Flag | Environment Variable | Description | Default |
| :--- | :--- | :--- | :--- |
| `-h` | `SAFEBIN_HOST` | Bind address for the server. | `0.0.0.0` |
| `-p` | `SAFEBIN_PORT` | Port to listen on. | `8080` |
| `-s` | `SAFEBIN_STORAGE` | Directory for encrypted storage. | `./storage` |
| `-m` | `SAFEBIN_MAX_MB` | Maximum file size in MB. | `512` |
## Deployment
### Docker Compose
The easiest way to deploy is using the provided `compose.yaml`:
```yaml ```yaml
services: services:
@@ -48,35 +39,64 @@ services:
container_name: safebin container_name: safebin
restart: unless-stopped restart: unless-stopped
ports: ports:
- 8080:8080 - "8080:8080"
environment: environment:
- SAFEBIN_HOST=0.0.0.0
- SAFEBIN_PORT=8080
- SAFEBIN_STORAGE=/app/storage
- SAFEBIN_MAX_MB=512 - SAFEBIN_MAX_MB=512
volumes: volumes:
- data:/app/storage - safebin_data:/app/storage
volumes: volumes:
data: safebin_data:
``` ```
### Manual Build ### Manual Installation
Requires Go 1.25 or higher. Requires Go 1.25 or higher.
```bash ```bash
# Build the binary
go build -o safebin . go build -o safebin .
./safebin -p 8080 -s ./data
# Run the server
./safebin -p 8080 -s ./data -m 1024
``` ```
## Retention Policy ## ⚙️ Configuration
The server runs a background cleanup task every hour. Retention is calculated using a cubic scaling formula to prioritize small files: Configuration is handled via environment variables or command-line flags. Flags take precedence over environment variables.
- **Small files (e.g., < 1MB)**: Kept for up to **365 days**. | Flag | Environment Variable | Description | Default |
- **Large files (at Max MB)**: Kept for **24 hours**. | :--- | :--- | :--- | :--- |
- **Temporary Uploads**: Unfinished chunked uploads are purged after **4 hours**. | `-h` | `SAFEBIN_HOST` | Interface/Bind address. | `0.0.0.0` |
| `-p` | `SAFEBIN_PORT` | Port to listen on. | `8080` |
| `-s` | `SAFEBIN_STORAGE` | Directory for database and files. | `./storage` |
| `-m` | `SAFEBIN_MAX_MB` | Maximum allowed file size in MB. | `512` |
## License ## 💻 Usage
This project is licensed under the **GNU General Public License v2.0**. ### Web Interface
Navigate to `http://localhost:8080`. Drag and drop files to upload. The browser handles chunking automatically.
### CLI (curl)
Safebin is optimized for terminal usage. You can upload files directly via `curl`:
```bash
# Upload a file
curl -F 'file=@screenshot.png' https://bin.example.com
# Response
https://bin.example.com/0iEZGtW-ikVdu...png
```
## ⏳ Retention Policy
To keep storage manageable, Safebin runs a cleanup task every hour. File lifetime is determined by size using a cubic curve:
* **Small Files (< 1MB)**: Retained for **365 days**.
* **Medium Files (~50% Max Size)**: Retained for ~30 days.
* **Large Files (Max Size)**: Retained for **24 hours**.
* **Incomplete Uploads**: Purged after **4 hours**.
## 📄 License
This project is licensed under the [GNU General Public License v2.0](LICENSE).
+1 -1
View File
@@ -4,4 +4,4 @@ go 1.25.6
require go.etcd.io/bbolt v1.4.3 require go.etcd.io/bbolt v1.4.3
require golang.org/x/sys v0.29.0 // indirect require golang.org/x/sys v0.40.0 // indirect
+2 -2
View File
@@ -8,7 +8,7 @@ go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+10 -3
View File
@@ -13,6 +13,10 @@ import (
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
) )
var (
Version = "dev"
)
const ( const (
DefaultHost = "0.0.0.0" DefaultHost = "0.0.0.0"
DefaultPort = 8080 DefaultPort = 8080
@@ -22,6 +26,7 @@ const (
ShutdownTimeout = 10 * time.Second ShutdownTimeout = 10 * time.Second
UploadChunkSize = 8 << 20 UploadChunkSize = 8 << 20
MinChunkSize = 1 << 20
MaxRequestOverhead = 10 << 20 MaxRequestOverhead = 10 << 20
PermUserRWX = 0o700 PermUserRWX = 0o700
MegaByte = 1 << 20 MegaByte = 1 << 20
@@ -35,9 +40,11 @@ const (
MinRetention = 24 * time.Hour MinRetention = 24 * time.Hour
MaxRetention = 365 * 24 * time.Hour MaxRetention = 365 * 24 * time.Hour
DBFileName = "safebin.db" DBDirName = "db"
DBBucketName = "files" DBFileName = "safebin.db"
TempDirName = "tmp" DBBucketName = "files"
DBBucketIndexName = "expiry_index"
TempDirName = "tmp"
) )
type Config struct { type Config struct {
+14 -3
View File
@@ -1,6 +1,7 @@
package app package app
import ( import (
"os"
"path/filepath" "path/filepath"
"time" "time"
@@ -15,15 +16,25 @@ type FileMeta struct {
} }
func InitDB(storageDir string) (*bbolt.DB, error) { func InitDB(storageDir string) (*bbolt.DB, error) {
path := filepath.Join(storageDir, DBFileName) dbDir := filepath.Join(storageDir, DBDirName)
if err := os.MkdirAll(dbDir, PermUserRWX); err != nil {
return nil, err
}
path := filepath.Join(dbDir, DBFileName)
db, err := bbolt.Open(path, 0600, &bbolt.Options{Timeout: 1 * time.Second}) db, err := bbolt.Open(path, 0600, &bbolt.Options{Timeout: 1 * time.Second})
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = db.Update(func(tx *bbolt.Tx) error { err = db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(DBBucketName)) if _, err := tx.CreateBucketIfNotExists([]byte(DBBucketName)); err != nil {
return err return err
}
if _, err := tx.CreateBucketIfNotExists([]byte(DBBucketIndexName)); err != nil {
return err
}
return nil
}) })
if err != nil { if err != nil {
+14 -3
View File
@@ -23,16 +23,18 @@ func TestInitDB(t *testing.T) {
} }
}() }()
dbPath := filepath.Join(tmpDir, DBFileName) dbPath := filepath.Join(tmpDir, DBDirName, DBFileName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) { if _, err := os.Stat(dbPath); os.IsNotExist(err) {
t.Error("Database file was not created") t.Error("Database file was not created")
} }
err = db.View(func(tx *bbolt.Tx) error { err = db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) if b := tx.Bucket([]byte(DBBucketName)); b == nil {
if b == nil {
t.Errorf("Bucket '%s' was not created", DBBucketName) t.Errorf("Bucket '%s' was not created", DBBucketName)
} }
if b := tx.Bucket([]byte(DBBucketIndexName)); b == nil {
t.Errorf("Bucket '%s' was not created", DBBucketIndexName)
}
return nil return nil
}) })
if err != nil { if err != nil {
@@ -85,6 +87,15 @@ func TestDB_MetadataLifecycle(t *testing.T) {
if meta.ExpiresAt.Before(time.Now()) { if meta.ExpiresAt.Before(time.Now()) {
t.Error("Expiration time is in the past") t.Error("Expiration time is in the past")
} }
bIndex := tx.Bucket([]byte(DBBucketIndexName))
indexKey := []byte(meta.ExpiresAt.Format(time.RFC3339) + "_" + fileID)
if val := bIndex.Get(indexKey); val == nil {
t.Error("Index entry not found")
} else if string(val) != fileID {
t.Errorf("Index value mismatch: want %s, got %s", fileID, string(val))
}
return nil return nil
}) })
if err != nil { if err != nil {
+31 -1
View File
@@ -2,6 +2,7 @@ package app
import ( import (
"encoding/base64" "encoding/base64"
"encoding/json"
"fmt" "fmt"
"mime" "mime"
"net/http" "net/http"
@@ -9,6 +10,7 @@ import (
"path/filepath" "path/filepath"
"github.com/skidoodle/safebin/internal/crypto" "github.com/skidoodle/safebin/internal/crypto"
"go.etcd.io/bbolt"
) )
func (app *App) HandleGetFile(writer http.ResponseWriter, request *http.Request) { func (app *App) HandleGetFile(writer http.ResponseWriter, request *http.Request) {
@@ -28,14 +30,42 @@ func (app *App) HandleGetFile(writer http.ResponseWriter, request *http.Request)
} }
id := crypto.GetID(key, ext) id := crypto.GetID(key, ext)
path := filepath.Join(app.Conf.StorageDir, id)
var meta FileMeta
err = app.DB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName))
if b == nil {
return fmt.Errorf("bucket not found")
}
data := b.Get([]byte(id))
if data == nil {
return fmt.Errorf("file not found")
}
return json.Unmarshal(data, &meta)
})
if err != nil {
app.SendError(writer, request, http.StatusNotFound)
return
}
path := filepath.Join(app.Conf.StorageDir, id)
info, err := os.Stat(path) info, err := os.Stat(path)
if err != nil { if err != nil {
app.SendError(writer, request, http.StatusNotFound) app.SendError(writer, request, http.StatusNotFound)
return return
} }
if info.Size() != meta.Size {
app.Logger.Error("Integrity check failed: disk size mismatch",
"id", id,
"disk_bytes", info.Size(),
"expected_bytes", meta.Size,
)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
+32 -23
View File
@@ -11,23 +11,7 @@ import (
func (app *App) Routes() *http.ServeMux { func (app *App) Routes() *http.ServeMux {
mux := http.NewServeMux() mux := http.NewServeMux()
fileServer := http.FileServer(http.FS(app.Assets)) mux.Handle("GET /static/", http.StripPrefix("/static/", app.handleStatic()))
staticHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" || strings.HasSuffix(r.URL.Path, "/") {
http.NotFound(w, r)
return
}
if strings.HasSuffix(r.URL.Path, ".html") {
http.NotFound(w, r)
return
}
fileServer.ServeHTTP(w, r)
})
mux.Handle("GET /static/", http.StripPrefix("/static/", staticHandler))
mux.HandleFunc("GET /{$}", app.HandleHome) mux.HandleFunc("GET /{$}", app.HandleHome)
mux.HandleFunc("POST /{$}", app.HandleUpload) mux.HandleFunc("POST /{$}", app.HandleUpload)
mux.HandleFunc("POST /upload/chunk", app.HandleChunk) mux.HandleFunc("POST /upload/chunk", app.HandleChunk)
@@ -37,10 +21,23 @@ func (app *App) Routes() *http.ServeMux {
return mux return mux
} }
func (app *App) handleStatic() http.Handler {
fs := http.FileServer(http.FS(app.Assets))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" || strings.HasSuffix(r.URL.Path, "/") || strings.HasSuffix(r.URL.Path, ".html") {
http.NotFound(w, r)
return
}
fs.ServeHTTP(w, r)
})
}
func (app *App) HandleHome(writer http.ResponseWriter, request *http.Request) { func (app *App) HandleHome(writer http.ResponseWriter, request *http.Request) {
err := app.Tmpl.ExecuteTemplate(writer, "layout", map[string]any{ err := app.Tmpl.ExecuteTemplate(writer, "layout", map[string]any{
"MaxMB": app.Conf.MaxMB, "MaxMB": app.Conf.MaxMB,
"Host": request.Host, "Host": request.Host,
"Version": Version,
}) })
if err != nil { if err != nil {
@@ -51,7 +48,16 @@ func (app *App) HandleHome(writer http.ResponseWriter, request *http.Request) {
func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Request, key []byte, originalName string) { func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Request, key []byte, originalName string) {
keySlug := base64.RawURLEncoding.EncodeToString(key) keySlug := base64.RawURLEncoding.EncodeToString(key)
ext := filepath.Ext(originalName) ext := filepath.Ext(originalName)
link := fmt.Sprintf("%s/%s%s", request.Host, keySlug, ext)
const unsafeChars = "\"<> \\/:;?@[]^`{}|~"
safeExt := strings.Map(func(r rune) rune {
if strings.ContainsRune(unsafeChars, r) {
return -1
}
return r
}, ext)
link := fmt.Sprintf("%s/%s%s", request.Host, keySlug, safeExt)
if request.Header.Get("X-Requested-With") == "XMLHttpRequest" { if request.Header.Get("X-Requested-With") == "XMLHttpRequest" {
html := ` html := `
@@ -72,9 +78,12 @@ func (app *App) RespondWithLink(writer http.ResponseWriter, request *http.Reques
return return
} }
scheme := "https" scheme := request.Header.Get("X-Forwarded-Proto")
if request.TLS == nil { if scheme == "" {
scheme = "http" scheme = "https"
if request.TLS == nil {
scheme = "http"
}
} }
if _, err := fmt.Fprintf(writer, "%s://%s\n", scheme, link); err != nil { if _, err := fmt.Fprintf(writer, "%s://%s\n", scheme, link); err != nil {
+110
View File
@@ -2,6 +2,7 @@ package app
import ( import (
"bytes" "bytes"
"encoding/base64"
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
@@ -12,6 +13,8 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"github.com/skidoodle/safebin/internal/crypto"
) )
func setupTestApp(t *testing.T) (*App, string) { func setupTestApp(t *testing.T) (*App, string) {
@@ -176,6 +179,113 @@ func TestIntegration_ChunkedUpload(t *testing.T) {
} }
} }
func TestIntegration_ChunkedUpload_VerifyEncryption(t *testing.T) {
app, storageDir := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
uploadID := "securechunk123"
plaintext := []byte("This is a secret message that should be encrypted")
uploadChunk(t, server.URL, uploadID, 0, plaintext)
chunkPath := filepath.Join(storageDir, TempDirName, uploadID, "0")
encryptedData, err := os.ReadFile(chunkPath)
if err != nil {
t.Fatalf("Failed to read chunk file: %v", err)
}
if bytes.Contains(encryptedData, plaintext) {
t.Fatal("Chunk file contains plaintext data!")
}
if len(encryptedData) <= crypto.KeySize {
t.Fatalf("Chunk file too small: %d bytes", len(encryptedData))
}
key := encryptedData[:crypto.KeySize]
ciphertext := encryptedData[crypto.KeySize:]
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
t.Fatalf("Failed to create streamer: %v", err)
}
r := bytes.NewReader(ciphertext)
d := crypto.NewDecryptor(r, streamer.AEAD, int64(len(ciphertext)))
decrypted, err := io.ReadAll(d)
if err != nil {
t.Fatalf("Failed to decrypt chunk: %v", err)
}
if !bytes.Equal(decrypted, plaintext) {
t.Errorf("Decrypted data mismatch.\nWant: %s\nGot: %s", plaintext, decrypted)
}
}
func TestIntegration_Upload_VerifyEncryption(t *testing.T) {
app, storageDir := setupTestApp(t)
server := httptest.NewServer(app.Routes())
defer server.Close()
plaintext := []byte("Sensitive Data For Full Upload")
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "secret.txt")
if err != nil {
t.Fatalf("CreateFormFile failed: %v", err)
}
if _, err := part.Write(plaintext); err != nil {
t.Fatalf("Write failed: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("Writer close failed: %v", err)
}
req, _ := http.NewRequest("POST", server.URL+"/", body)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
t.Errorf("Failed to close response body: %v", err)
}
}()
respBytes, _ := io.ReadAll(resp.Body)
slug := filepath.Base(strings.TrimSpace(string(respBytes)))
if len(slug) < SlugLength {
t.Fatalf("Invalid slug: %s", slug)
}
keyBase64 := slug[:SlugLength]
key, _ := base64.RawURLEncoding.DecodeString(keyBase64)
ext := filepath.Ext("secret.txt")
id := crypto.GetID(key, ext)
finalPath := filepath.Join(storageDir, id)
finalData, err := os.ReadFile(finalPath)
if err != nil {
t.Fatalf("Failed to read final file: %v", err)
}
if bytes.Contains(finalData, plaintext) {
t.Fatal("Final file contains plaintext!")
}
streamer, _ := crypto.NewGCMStreamer(key)
d := crypto.NewDecryptor(bytes.NewReader(finalData), streamer.AEAD, int64(len(finalData)))
decrypted, _ := io.ReadAll(d)
if !bytes.Equal(decrypted, plaintext) {
t.Error("Final file decryption failed")
}
}
func uploadChunk(t *testing.T, baseURL, uid string, idx int, data []byte) { func uploadChunk(t *testing.T, baseURL, uid string, idx int, data []byte) {
body := &bytes.Buffer{} body := &bytes.Buffer{}
writer := multipart.NewWriter(body) writer := multipart.NewWriter(body)
+131 -55
View File
@@ -2,6 +2,7 @@ package app
import ( import (
"context" "context"
"crypto/rand"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -48,55 +49,114 @@ func (app *App) saveChunk(uid string, idx int, src io.Reader) error {
} }
}() }()
if _, err := io.Copy(dest, src); err != nil { key := make([]byte, crypto.KeySize)
return fmt.Errorf("copy chunk: %w", err) if _, err := rand.Read(key); err != nil {
return fmt.Errorf("generate chunk key: %w", err)
}
if _, err := dest.Write(key); err != nil {
return fmt.Errorf("write chunk key: %w", err)
}
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
return fmt.Errorf("create streamer: %w", err)
}
if err := streamer.EncryptStream(dest, src); err != nil {
return fmt.Errorf("encrypt chunk: %w", err)
} }
return nil return nil
} }
func (app *App) mergeChunks(uid string, total int) (string, error) { func (app *App) openChunkDecryptor(uid string, idx int) (io.ReadCloser, error) {
tmpPath := filepath.Join(app.Conf.StorageDir, TempDirName, "m_"+uid) partPath := filepath.Join(app.Conf.StorageDir, TempDirName, uid, strconv.Itoa(idx))
f, err := os.Open(partPath)
merged, err := os.Create(tmpPath)
if err != nil { if err != nil {
return "", fmt.Errorf("create merge file: %w", err) return nil, fmt.Errorf("open chunk %d: %w", idx, err)
} }
defer func() { key := make([]byte, crypto.KeySize)
if closeErr := merged.Close(); closeErr != nil { if _, err := io.ReadFull(f, key); err != nil {
app.Logger.Error("Failed to close merged file", "err", closeErr) _ = f.Close()
} return nil, fmt.Errorf("read chunk key %d: %w", idx, err)
}()
limit := app.Conf.MaxMB * MegaByte
var written int64
for i := range total {
partPath := filepath.Join(app.Conf.StorageDir, TempDirName, uid, strconv.Itoa(i))
part, err := os.Open(partPath)
if err != nil {
return "", fmt.Errorf("open chunk %d: %w", i, err)
}
n, err := io.Copy(merged, part)
if closeErr := part.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk part", "err", closeErr)
}
if err != nil {
return "", fmt.Errorf("append chunk %d: %w", i, err)
}
written += n
if written > limit {
return "", io.ErrShortWrite
}
} }
return tmpPath, nil info, err := f.Stat()
if err != nil {
_ = f.Close()
return nil, fmt.Errorf("stat chunk %d: %w", idx, err)
}
bodySize := info.Size() - int64(crypto.KeySize)
if bodySize < 0 {
_ = f.Close()
return nil, fmt.Errorf("invalid chunk size %d", idx)
}
bodyReader := io.NewSectionReader(f, int64(crypto.KeySize), bodySize)
streamer, err := crypto.NewGCMStreamer(key)
if err != nil {
_ = f.Close()
return nil, fmt.Errorf("create streamer %d: %w", idx, err)
}
decryptor := crypto.NewDecryptor(bodyReader, streamer.AEAD, bodySize)
return &chunkReadCloser{Decryptor: decryptor, f: f}, nil
}
type chunkReadCloser struct {
*crypto.Decryptor
f *os.File
}
func (c *chunkReadCloser) Close() error {
return c.f.Close()
}
type SequentialChunkReader struct {
app *App
uid string
total int
currentIdx int
currentRC io.ReadCloser
}
func (s *SequentialChunkReader) Read(p []byte) (n int, err error) {
if s.currentRC == nil {
if s.currentIdx >= s.total {
return 0, io.EOF
}
rc, err := s.app.openChunkDecryptor(s.uid, s.currentIdx)
if err != nil {
return 0, err
}
s.currentRC = rc
}
n, err = s.currentRC.Read(p)
if err == io.EOF {
_ = s.currentRC.Close()
s.currentRC = nil
s.currentIdx++
if n > 0 {
return n, nil
}
return s.Read(p)
}
return n, err
}
func (s *SequentialChunkReader) Close() error {
if s.currentRC != nil {
return s.currentRC.Close()
}
return nil
} }
func (app *App) encryptAndSave(src io.Reader, key []byte, finalPath string) error { func (app *App) encryptAndSave(src io.Reader, key []byte, finalPath string) error {
@@ -151,32 +211,42 @@ func (app *App) RegisterFile(id string, size int64) error {
} }
return app.DB.Update(func(tx *bbolt.Tx) error { return app.DB.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) bFiles := tx.Bucket([]byte(DBBucketName))
bIndex := tx.Bucket([]byte(DBBucketIndexName))
data, err := json.Marshal(meta) data, err := json.Marshal(meta)
if err != nil { if err != nil {
return err return err
} }
return b.Put([]byte(id), data)
if err := bFiles.Put([]byte(id), data); err != nil {
return err
}
indexKey := []byte(meta.ExpiresAt.Format(time.RFC3339) + "_" + id)
return bIndex.Put(indexKey, []byte(id))
}) })
} }
func (app *App) CleanStorage() { func (app *App) CleanStorage() {
now := time.Now() now := time.Now().Format(time.RFC3339)
var toDelete []string var toDeleteIDs []string
var toDeleteKeys []string
err := app.DB.View(func(tx *bbolt.Tx) error { err := app.DB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) bIndex := tx.Bucket([]byte(DBBucketIndexName))
c := b.Cursor() if bIndex == nil {
return nil
}
c := bIndex.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
var meta FileMeta if string(k) > now {
if err := json.Unmarshal(v, &meta); err != nil { break
continue
} }
if now.After(meta.ExpiresAt) { toDeleteKeys = append(toDeleteKeys, string(k))
toDelete = append(toDelete, string(k)) toDeleteIDs = append(toDeleteIDs, string(v))
}
} }
return nil return nil
}) })
@@ -186,21 +256,27 @@ func (app *App) CleanStorage() {
return return
} }
if len(toDelete) == 0 { if len(toDeleteIDs) == 0 {
return return
} }
err = app.DB.Update(func(tx *bbolt.Tx) error { err = app.DB.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) bFiles := tx.Bucket([]byte(DBBucketName))
for _, id := range toDelete { bIndex := tx.Bucket([]byte(DBBucketIndexName))
for i, id := range toDeleteIDs {
path := filepath.Join(app.Conf.StorageDir, id) path := filepath.Join(app.Conf.StorageDir, id)
if err := os.RemoveAll(path); err != nil { if err := os.RemoveAll(path); err != nil {
app.Logger.Error("Failed to remove expired file", "path", id, "err", err) app.Logger.Error("Failed to remove expired file", "path", id, "err", err)
} }
if err := b.Delete([]byte(id)); err != nil { if err := bFiles.Delete([]byte(id)); err != nil {
app.Logger.Error("Failed to delete metadata", "id", id, "err", err) app.Logger.Error("Failed to delete metadata", "id", id, "err", err)
} }
if err := bIndex.Delete([]byte(toDeleteKeys[i])); err != nil {
app.Logger.Error("Failed to delete index", "key", toDeleteKeys[i], "err", err)
}
} }
return nil return nil
}) })
+97 -44
View File
@@ -1,55 +1,19 @@
package app package app
import ( import (
"bytes"
"crypto/rand"
"encoding/json" "encoding/json"
"io"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/skidoodle/safebin/internal/crypto"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
) )
func TestCleanup_AbandonedMerge(t *testing.T) {
tmpDir := t.TempDir()
tmpStorage := filepath.Join(tmpDir, TempDirName)
if err := os.MkdirAll(tmpStorage, 0700); err != nil {
t.Fatalf("MkdirAll failed: %v", err)
}
db, err := InitDB(tmpDir)
if err != nil {
t.Fatalf("InitDB failed: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Failed to close DB: %v", err)
}
}()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
DB: db,
}
abandonedFile := filepath.Join(tmpStorage, "m_abandoned_upload_id")
if err := os.WriteFile(abandonedFile, []byte("partial data"), 0600); err != nil {
t.Fatal(err)
}
oldTime := time.Now().Add(-TempExpiry - time.Hour)
if err := os.Chtimes(abandonedFile, oldTime, oldTime); err != nil {
t.Fatal(err)
}
app.CleanTemp(tmpStorage)
if _, err := os.Stat(abandonedFile); !os.IsNotExist(err) {
t.Error("Cleanup failed to remove abandoned merge file from crashed session")
}
}
func TestCleanup_AbandonedChunks(t *testing.T) { func TestCleanup_AbandonedChunks(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
tmpStorage := filepath.Join(tmpDir, TempDirName) tmpStorage := filepath.Join(tmpDir, TempDirName)
@@ -135,9 +99,16 @@ func TestCleanup_ExpiredStorage(t *testing.T) {
} }
if err := app.DB.Update(func(tx *bbolt.Tx) error { if err := app.DB.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) bFiles := tx.Bucket([]byte(DBBucketName))
bIndex := tx.Bucket([]byte(DBBucketIndexName))
data, _ := json.Marshal(expiredMeta) data, _ := json.Marshal(expiredMeta)
return b.Put([]byte(filename), data) if err := bFiles.Put([]byte(filename), data); err != nil {
return err
}
indexKey := []byte(expiredMeta.ExpiresAt.Format(time.RFC3339) + "_" + filename)
return bIndex.Put(indexKey, []byte(filename))
}); err != nil { }); err != nil {
t.Fatalf("DB Update failed: %v", err) t.Fatalf("DB Update failed: %v", err)
} }
@@ -149,12 +120,94 @@ func TestCleanup_ExpiredStorage(t *testing.T) {
} }
if err := app.DB.View(func(tx *bbolt.Tx) error { if err := app.DB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DBBucketName)) bFiles := tx.Bucket([]byte(DBBucketName))
if v := b.Get([]byte(filename)); v != nil { if v := bFiles.Get([]byte(filename)); v != nil {
t.Error("Cleanup failed to remove metadata") t.Error("Cleanup failed to remove metadata")
} }
bIndex := tx.Bucket([]byte(DBBucketIndexName))
indexKey := []byte(expiredMeta.ExpiresAt.Format(time.RFC3339) + "_" + filename)
if v := bIndex.Get(indexKey); v != nil {
t.Error("Cleanup failed to remove index entry")
}
return nil return nil
}); err != nil { }); err != nil {
t.Fatalf("DB View failed: %v", err) t.Fatalf("DB View failed: %v", err)
} }
} }
func TestSaveChunk_EncryptsData(t *testing.T) {
tmpDir := t.TempDir()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
}
uid := "test-encrypt-chunk"
plaintext := make([]byte, 1024)
if _, err := rand.Read(plaintext); err != nil {
t.Fatal(err)
}
if err := app.saveChunk(uid, 0, bytes.NewReader(plaintext)); err != nil {
t.Fatalf("saveChunk failed: %v", err)
}
path := filepath.Join(tmpDir, TempDirName, uid, "0")
fileData, err := os.ReadFile(path)
if err != nil {
t.Fatalf("ReadFile failed: %v", err)
}
if bytes.Equal(fileData, plaintext) {
t.Fatal("Chunk stored as plaintext!")
}
if bytes.Contains(fileData, plaintext) {
t.Fatal("Chunk contains plaintext!")
}
expectedSize := crypto.KeySize + len(plaintext) + 16
if len(fileData) != expectedSize {
t.Errorf("Unexpected file size. Want %d, got %d", expectedSize, len(fileData))
}
}
func TestSequentialChunkReader_RestoresData(t *testing.T) {
tmpDir := t.TempDir()
app := &App{
Conf: Config{StorageDir: tmpDir},
Logger: discardLogger(),
}
uid := "test-restore"
data1 := []byte("chunk one data")
data2 := []byte("chunk two data")
if err := app.saveChunk(uid, 0, bytes.NewReader(data1)); err != nil {
t.Fatal(err)
}
if err := app.saveChunk(uid, 1, bytes.NewReader(data2)); err != nil {
t.Fatal(err)
}
reader := &SequentialChunkReader{
app: app,
uid: uid,
total: 2,
}
defer func() {
if err := reader.Close(); err != nil {
t.Errorf("Failed to close reader: %v", err)
}
}()
restored, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("ReadAll failed: %v", err)
}
expected := append(data1, data2...)
if !bytes.Equal(restored, expected) {
t.Errorf("Restored data mismatch.\nWant: %s\nGot: %s", expected, restored)
}
}
+134 -81
View File
@@ -1,6 +1,8 @@
package app package app
import ( import (
"crypto/rand"
"crypto/sha256"
"errors" "errors"
"io" "io"
"net/http" "net/http"
@@ -8,6 +10,7 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"strconv" "strconv"
"strings"
"github.com/skidoodle/safebin/internal/crypto" "github.com/skidoodle/safebin/internal/crypto"
) )
@@ -18,67 +21,120 @@ func (app *App) HandleUpload(writer http.ResponseWriter, request *http.Request)
limit := (app.Conf.MaxMB * MegaByte) + MegaByte limit := (app.Conf.MaxMB * MegaByte) + MegaByte
request.Body = http.MaxBytesReader(writer, request.Body, limit) request.Body = http.MaxBytesReader(writer, request.Body, limit)
file, header, err := request.FormFile("file") mr, err := request.MultipartReader()
if err != nil { if err != nil {
if err.Error() == "http: request body too large" {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
return return
} }
defer func() { var filename string
if closeErr := file.Close(); closeErr != nil { var partReader io.Reader
app.Logger.Error("Failed to close upload file", "err", closeErr)
for {
part, err := mr.NextPart()
if err == io.EOF {
break
} }
}() if err != nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
if part.FormName() == "file" {
filename = part.FileName()
partReader = part
break
}
}
if partReader == nil {
app.SendError(writer, request, http.StatusBadRequest)
return
}
tmp, err := os.CreateTemp(filepath.Join(app.Conf.StorageDir, TempDirName), "up_*") tmp, err := os.CreateTemp(filepath.Join(app.Conf.StorageDir, TempDirName), "up_*")
if err != nil { if err != nil {
app.Logger.Error("Failed to create temp file", "err", err) app.Logger.Error("Failed to create temp file", "err", err)
app.SendError(writer, request, http.StatusInternalServerError) app.SendError(writer, request, http.StatusInternalServerError)
return return
} }
tmpPath := tmp.Name() tmpPath := tmp.Name()
defer func() { defer func() {
_ = tmp.Close()
if removeErr := os.Remove(tmpPath); removeErr != nil && !os.IsNotExist(removeErr) { if removeErr := os.Remove(tmpPath); removeErr != nil && !os.IsNotExist(removeErr) {
app.Logger.Error("Failed to remove temp file", "err", removeErr) app.Logger.Error("Failed to remove temp file", "err", removeErr)
} }
}() }()
defer func() { ephemeralKey := make([]byte, crypto.KeySize)
if closeErr := tmp.Close(); closeErr != nil { if _, err := rand.Read(ephemeralKey); err != nil {
app.Logger.Error("Failed to close temp file", "err", closeErr) app.Logger.Error("Failed to generate ephemeral key", "err", err)
} app.SendError(writer, request, http.StatusInternalServerError)
}()
if _, err := io.Copy(tmp, file); err != nil {
app.Logger.Error("Failed to write temp file", "err", err)
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return return
} }
app.FinalizeFile(writer, request, tmp, header.Filename) pr, pw := io.Pipe()
hasher := sha256.New()
errChan := make(chan error, 1)
go func() {
_, err := io.Copy(io.MultiWriter(hasher, pw), partReader)
_ = pw.CloseWithError(err)
errChan <- err
}()
streamer, err := crypto.NewGCMStreamer(ephemeralKey)
if err != nil {
_ = pr.Close()
app.Logger.Error("Failed to create streamer", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := streamer.EncryptStream(tmp, pr); err != nil {
_ = pr.Close()
app.Logger.Error("Failed to encrypt stream", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := <-errChan; err != nil {
if errors.Is(err, http.ErrMissingBoundary) || strings.Contains(err.Error(), "request body too large") {
app.SendError(writer, request, http.StatusRequestEntityTooLarge)
} else {
app.Logger.Error("Failed to read/hash upload", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
}
return
}
convergentKey := hasher.Sum(nil)[:crypto.KeySize]
if _, err := tmp.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
info, _ := tmp.Stat()
decryptor := crypto.NewDecryptor(tmp, streamer.AEAD, info.Size())
app.finalizeUpload(writer, request, decryptor, convergentKey, filename)
} }
func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) { func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) {
request.Body = http.MaxBytesReader(writer, request.Body, MaxRequestOverhead) const MaxChunkBody = UploadChunkSize + (1 << 20)
request.Body = http.MaxBytesReader(writer, request.Body, MaxChunkBody)
uid := request.FormValue("upload_id") uid := request.FormValue("upload_id")
idx, err := strconv.Atoi(request.FormValue("index")) idx, err := strconv.Atoi(request.FormValue("index"))
if err != nil { if err != nil {
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
return return
} }
maxChunks := int((app.Conf.MaxMB*MegaByte)/UploadChunkSize) + ChunkSafetyMargin maxChunks := int((app.Conf.MaxMB*MegaByte)/MinChunkSize) + ChunkSafetyMargin
if !reUploadID.MatchString(uid) || idx > maxChunks || idx < 0 { if !reUploadID.MatchString(uid) || idx > maxChunks || idx < 0 {
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
@@ -86,17 +142,14 @@ func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) {
} }
file, _, err := request.FormFile("chunk") file, _, err := request.FormFile("chunk")
if err != nil { if err != nil {
if err.Error() == "http: request body too large" { if strings.Contains(err.Error(), "request body too large") {
app.SendError(writer, request, http.StatusRequestEntityTooLarge) app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return return
} }
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
return return
} }
defer func() { defer func() {
if closeErr := file.Close(); closeErr != nil { if closeErr := file.Close(); closeErr != nil {
app.Logger.Error("Failed to close chunk file", "err", closeErr) app.Logger.Error("Failed to close chunk file", "err", closeErr)
@@ -111,75 +164,81 @@ func (app *App) HandleChunk(writer http.ResponseWriter, request *http.Request) {
func (app *App) HandleFinish(writer http.ResponseWriter, request *http.Request) { func (app *App) HandleFinish(writer http.ResponseWriter, request *http.Request) {
uid := request.FormValue("upload_id") uid := request.FormValue("upload_id")
total, err := strconv.Atoi(request.FormValue("total")) total, err := strconv.Atoi(request.FormValue("total"))
if err != nil { if err != nil {
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
return return
} }
maxChunks := int((app.Conf.MaxMB*MegaByte)/UploadChunkSize) + ChunkSafetyMargin maxChunks := int((app.Conf.MaxMB*MegaByte)/MinChunkSize) + ChunkSafetyMargin
if !reUploadID.MatchString(uid) || total > maxChunks || total <= 0 { if !reUploadID.MatchString(uid) || total > maxChunks || total <= 0 {
app.SendError(writer, request, http.StatusBadRequest) app.SendError(writer, request, http.StatusBadRequest)
return return
} }
mergedPath, err := app.mergeChunks(uid, total) defer func() {
if err := os.RemoveAll(filepath.Join(app.Conf.StorageDir, TempDirName, uid)); err != nil {
app.Logger.Error("Failed to remove chunk dir", "err", err)
}
}()
if err != nil { var totalSize int64
app.Logger.Error("Merge failed", "err", err) for i := range total {
info, err := os.Stat(filepath.Join(app.Conf.StorageDir, TempDirName, uid, strconv.Itoa(i)))
if err != nil {
app.Logger.Error("Missing chunk", "index", i, "err", err)
app.SendError(writer, request, http.StatusBadRequest)
return
}
chunkContentSize := info.Size() - crypto.KeySize
if chunkContentSize < 0 {
app.SendError(writer, request, http.StatusBadRequest)
return
}
totalSize += chunkContentSize
}
if errors.Is(err, io.ErrShortWrite) { if totalSize > (app.Conf.MaxMB * MegaByte) {
app.SendError(writer, request, http.StatusRequestEntityTooLarge) app.Logger.Warn("Upload exceeded quota", "uid", uid, "size", totalSize)
} else { app.SendError(writer, request, http.StatusRequestEntityTooLarge)
return
}
hasher := sha256.New()
for i := range total {
rc, err := app.openChunkDecryptor(uid, i)
if err != nil {
app.Logger.Error("Failed to open chunk for hashing", "index", i, "err", err)
app.SendError(writer, request, http.StatusInternalServerError) app.SendError(writer, request, http.StatusInternalServerError)
return
} }
return if _, err := io.Copy(hasher, rc); err != nil {
_ = rc.Close()
app.Logger.Error("Failed to hash chunk", "index", i, "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
_ = rc.Close()
} }
convergentKey := hasher.Sum(nil)[:crypto.KeySize]
multiSrc := &SequentialChunkReader{
app: app,
uid: uid,
total: total,
}
defer func() { defer func() {
if removeErr := os.Remove(mergedPath); removeErr != nil && !os.IsNotExist(removeErr) { if err := multiSrc.Close(); err != nil {
app.Logger.Error("Failed to remove merged file", "err", removeErr) app.Logger.Error("Failed to close sequential reader", "uid", uid, "err", err)
} }
}() }()
mergedRead, err := os.Open(mergedPath) app.finalizeUpload(writer, request, multiSrc, convergentKey, request.FormValue("filename"))
if err != nil {
app.Logger.Error("Failed to open merged file", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
defer func() {
if closeErr := mergedRead.Close(); closeErr != nil {
app.Logger.Error("Failed to close merged reader", "err", closeErr)
}
}()
app.FinalizeFile(writer, request, mergedRead, request.FormValue("filename"))
if err := os.RemoveAll(filepath.Join(app.Conf.StorageDir, TempDirName, uid)); err != nil {
app.Logger.Error("Failed to remove chunk dir", "err", err)
}
} }
func (app *App) FinalizeFile(writer http.ResponseWriter, request *http.Request, src *os.File, filename string) { func (app *App) finalizeUpload(writer http.ResponseWriter, request *http.Request, src io.Reader, key []byte, filename string) {
if _, err := src.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
key, err := crypto.DeriveKey(src)
if err != nil {
app.Logger.Error("Key derivation failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
ext := filepath.Ext(filename) ext := filepath.Ext(filename)
id := crypto.GetID(key, ext) id := crypto.GetID(key, ext)
finalPath := filepath.Join(app.Conf.StorageDir, id) finalPath := filepath.Join(app.Conf.StorageDir, id)
@@ -192,12 +251,6 @@ func (app *App) FinalizeFile(writer http.ResponseWriter, request *http.Request,
return return
} }
if _, err := src.Seek(0, 0); err != nil {
app.Logger.Error("Seek failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError)
return
}
if err := app.encryptAndSave(src, key, finalPath); err != nil { if err := app.encryptAndSave(src, key, finalPath); err != nil {
app.Logger.Error("Encryption failed", "err", err) app.Logger.Error("Encryption failed", "err", err)
app.SendError(writer, request, http.StatusInternalServerError) app.SendError(writer, request, http.StatusInternalServerError)
+13 -3
View File
@@ -16,6 +16,7 @@ type Decryptor struct {
aead cipher.AEAD aead cipher.AEAD
size int64 size int64
offset int64 offset int64
phyOffset int64
} }
func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int64) *Decryptor { func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int64) *Decryptor {
@@ -35,6 +36,7 @@ func NewDecryptor(readSeeker io.ReadSeeker, aead cipher.AEAD, encryptedSize int6
aead: aead, aead: aead,
size: plainSize, size: plainSize,
offset: 0, offset: 0,
phyOffset: -1,
} }
} }
@@ -49,14 +51,22 @@ func (d *Decryptor) Read(buf []byte) (int, error) {
overhead := int64(d.aead.Overhead()) overhead := int64(d.aead.Overhead())
actualChunkSize := int64(GCMChunkSize) + overhead actualChunkSize := int64(GCMChunkSize) + overhead
_, err := d.readSeeker.Seek(chunkIdx*actualChunkSize, io.SeekStart) targetOffset := chunkIdx * actualChunkSize
if err != nil {
return 0, fmt.Errorf("failed to seek: %w", err) if d.phyOffset != targetOffset {
if _, err := d.readSeeker.Seek(targetOffset, io.SeekStart); err != nil {
return 0, fmt.Errorf("failed to seek: %w", err)
}
d.phyOffset = targetOffset
} }
encrypted := make([]byte, actualChunkSize) encrypted := make([]byte, actualChunkSize)
bytesRead, err := io.ReadFull(d.readSeeker, encrypted) bytesRead, err := io.ReadFull(d.readSeeker, encrypted)
if bytesRead > 0 {
d.phyOffset += int64(bytesRead)
}
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
return 0, fmt.Errorf("failed to read encrypted data: %w", err) return 0, fmt.Errorf("failed to read encrypted data: %w", err)
} }
+1 -1
View File
@@ -50,7 +50,7 @@ async function handleUpload(file) {
$("busy-state").classList.remove("hidden"); $("busy-state").classList.remove("hidden");
$("p-bar-container").classList.add("visible"); $("p-bar-container").classList.add("visible");
const uploadID = Math.random().toString(36).substring(2, 15); const uploadID = Array.from(window.crypto.getRandomValues(new Uint8Array(16)), (b) => b.toString(16).padStart(2, "0")).join("");
const chunkSize = 1024 * 1024 * 8; const chunkSize = 1024 * 1024 * 8;
const total = Math.ceil(file.size / chunkSize); const total = Math.ceil(file.size / chunkSize);
+9
View File
@@ -29,6 +29,15 @@
<div class="dim cli-label">CLI Usage</div> <div class="dim cli-label">CLI Usage</div>
<pre class="cli-pre">curl -F file=@yourfile {{.Host}}</pre> <pre class="cli-pre">curl -F file=@yourfile {{.Host}}</pre>
</section> </section>
<footer class="footer">
<div class="dim">
{{if eq .Version "dev"}}
<a href="https://github.com/skidoodle/safebin" target="_blank" rel="noopener noreferrer">dev</a>
{{else}}
<a href="https://github.com/skidoodle/safebin/releases/tag/v{{.Version}}" target="_blank" rel="noopener noreferrer">v{{.Version}}</a>
{{end}}
</div>
</footer>
</div> </div>
<input type="file" id="file-input" class="hidden" /> <input type="file" id="file-input" class="hidden" />
<script src="/static/app.js"></script> <script src="/static/app.js"></script>
+16
View File
@@ -237,10 +237,26 @@ button {
display: none !important; display: none !important;
} }
.footer {
margin-top: 20px;
text-align: center;
opacity: 0.5;
}
.footer a {
color: inherit;
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
@media (max-width: 400px) { @media (max-width: 400px) {
.github-btn span { .github-btn span {
display: none; display: none;
} }
.github-btn { .github-btn {
padding: 6px; padding: 6px;
} }