61 Commits

Author SHA1 Message Date
237063d9fc Merge pull request #8 from ferdzo/fix/bucket-creation-status201-to-200ok
Changed 201 Created to 200 OK when creating bucket
2026-03-12 00:29:05 +01:00
c2215d8589 Changed 201 Created to 200 OK when creating bucket 2026-03-12 00:28:01 +01:00
82cb58dff1 README.md edit 2026-03-11 23:39:25 +01:00
b592d6a2f0 Merge pull request #7 from ferdzo/feat/cli
Fs CLI
2026-03-11 20:27:22 +01:00
ef12326975 Copilot suggestions fixed 2026-03-11 20:26:17 +01:00
a23577d531 Update cmd/admin_snapshot.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-11 20:19:50 +01:00
e8256d66e3 Added Github Action for build and release. 2026-03-11 20:18:24 +01:00
ad53a6d8ac Backup/restore option for cli 2026-03-11 20:12:00 +01:00
cfb9b591ac Policy example documentation 2026-03-11 00:50:09 +01:00
b27f1186cf Remove Role command. 2026-03-11 00:47:24 +01:00
f57c7b8390 Initial FS CLI 2026-03-11 00:40:51 +01:00
181cd42bbf Changed environment variable names for auth settings. 2026-03-03 23:27:16 +01:00
1a0f15a313 Merge pull request #5 from ferdzo/feature/auth
Authentication
2026-03-03 16:12:54 +01:00
46eb093d83 Fixed copilot suggestions. 2026-03-03 00:38:49 +01:00
9abffd056e Update metrics/metrics.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-03 00:35:16 +01:00
cd7a1b4956 Update storage/blob.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-03 00:34:53 +01:00
cfec3afb49 S3 compatibility and openapi spec. 2026-03-03 00:24:45 +01:00
66e3db44dc add admin endpoints for user policy and status updates 2026-03-03 00:11:39 +01:00
57951fda38 add admin user delete endpoint and service support 2026-03-02 23:58:12 +01:00
22cfb820f9 skip S3 policy check for admin routes after signature verify 2026-03-02 23:54:05 +01:00
93a3aabf7d allow signed admin routes before S3 policy resolution 2026-03-02 23:51:46 +01:00
9b8d0b2b3e add auth service tests for user admin primitives 2026-03-02 23:39:39 +01:00
7a7f570882 add admin v1 create/list/get HTTP endpoints 2026-03-02 23:38:58 +01:00
828b7c7c34 add auth service user create/list/get primitives 2026-03-02 23:37:33 +01:00
96e3b0e042 wire admin API feature flag 2026-03-02 23:36:09 +01:00
651413d494 Merge pull request #6 from ferdzo/feature/metrics
Metrics endpoint
2026-03-02 23:28:05 +01:00
c03bd3e3a2 Minimal fixes for metrics 2026-03-02 23:26:57 +01:00
8c9cd96213 Auth for metrics, removed unwanted metrics and fixed tests. 2026-03-02 22:30:15 +01:00
Andrej Mickov
6ca3fb8701 Updated metrics 2026-02-27 16:38:51 +01:00
Andrej Mickov
f04f7601c0 Initial metrics endpoint added in Prometheus style 2026-02-27 14:59:23 +01:00
Andrej Mickov
2fea3da9ee Added ListObjectsV1 2026-02-27 14:01:10 +01:00
ba4256fd00 Update .env.example
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-27 10:39:12 +01:00
0edaae6dad Update README.md
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-27 10:38:55 +01:00
678c10a3ad Initial working authentication with SigV4 2026-02-27 01:35:20 +01:00
79819ad2d0 Merge pull request #4 from ferdzo/fix/general
General enhancements and fixes
2026-02-25 00:46:30 +01:00
abe1f453fc Enhance API with health check endpoints and improve multipart upload management 2026-02-25 00:34:06 +01:00
a9fbc06dd0 Fix formatting in README for object operations 2026-02-24 14:04:09 +01:00
edfb5f5b2a Refine object operations and multi-object delete sections
Updated the formatting and structure of the object operations and multi-object delete sections in the README.
2026-02-24 10:23:53 +01:00
c997fe8471 Improve formatting of features in README
Updated feature list formatting in README.md for better readability.
2026-02-24 10:23:24 +01:00
fca553028c Merge pull request #3 from ferdzo/LICENSE
License
2026-02-24 10:20:05 +01:00
3630aad584 Merge branch 'develop' into LICENSE 2026-02-24 10:19:41 +01:00
93296ff74e Merge pull request #2 from ferdzo/feature/garbage-collection
Garbage collection and few other things
2026-02-24 10:18:11 +01:00
1b7393a545 Fix license section heading in README.md 2026-02-24 10:17:05 +01:00
Andrej Mickov
a3fad34272 LICENSE.md 2026-02-24 10:14:00 +01:00
06c90be50f Fixed copilot suggestions. 2026-02-24 00:28:33 +01:00
5e87247087 Introduced garbage collection, safe data write to storage and improved S3 compatibility. 2026-02-24 00:05:53 +01:00
a4990dae01 Merge pull request #1 from ferdzo/feature/multipart-upload
Multipart Upload support
2026-02-23 22:37:33 +01:00
d9a1bd9001 Applied Copilot review suggestions 2026-02-23 22:35:42 +01:00
a8204de914 Fixed logging, added config and .env example 2026-02-23 21:52:45 +01:00
d7bdb3177b Added logging 2026-02-23 00:42:38 +01:00
c989037160 Finialized multipart upload and graceful shutdown. Added Dockerfile. 2026-02-22 23:00:33 +01:00
5d41ec9e0a Implemented bulk delete from bucket, AWS SigV4 framing problems solved. 2026-02-22 15:32:04 +01:00
111ce5b669 Working MultipartUpload that needs minor tweaks. 2026-02-22 14:46:04 +01:00
5438a7f4b4 Updated gitignore 2026-02-22 13:43:23 +01:00
9b5035dfa0 Initial Multipart Upload 2026-02-22 13:42:23 +01:00
65a7a7eef8 S3 Error handling 2026-02-22 13:02:22 +01:00
eb798be550 Updated error handling to be S3 XML compatible. Implemented DeleteObject. 2026-02-22 13:01:59 +01:00
b19c24d9b7 Object prefix list filtering 2026-02-21 21:35:15 +01:00
6fe5a8a629 Added Bucket routes and bucket logic 2026-02-21 21:27:34 +01:00
151c11a636 HEAD Object route 2026-02-21 12:13:33 +01:00
f151f8055a Working simple PUT/GET API 2026-02-21 11:55:14 +01:00
51 changed files with 9611 additions and 100 deletions

4
.dockerignore Normal file
View File

@@ -0,0 +1,4 @@
*.md
.gocache/
blobs/
data/

20
.env.example Normal file
View File

@@ -0,0 +1,20 @@
LOG_LEVEL=debug
LOG_FORMAT=text
DATA_PATH=data/
PORT=2600
AUDIT_LOG=true
ADDRESS=0.0.0.0
GC_INTERVAL=10
GC_ENABLED=true
MULTIPART_RETENTION_HOURS=24
FS_AUTH_ENABLED=false
FS_AUTH_REGION=us-east-1
FS_AUTH_CLOCK_SKEW_SECONDS=300
FS_AUTH_MAX_PRESIGN_SECONDS=86400
# When FS_AUTH_ENABLED=true you MUST set FS_MASTER_KEY to a strong random value, e.g.:
# openssl rand -base64 32
FS_MASTER_KEY=REPLACE_WITH_SECURE_RANDOM_KEY
FS_ROOT_USER=
FS_ROOT_PASSWORD=
FS_ROOT_POLICY_JSON=
ADMIN_API_ENABLED=true

24
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: CI
on:
push:
branches: ["main"]
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run tests
run: |
export GOCACHE=/tmp/go-build-cache
go test ./...

66
.github/workflows/release-image.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Release Image
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
permissions:
contents: read
packages: write
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set build date
id: date
run: echo "value=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$GITHUB_OUTPUT"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
labels: |
org.opencontainers.image.title=fs
org.opencontainers.image.description=Lightweight S3-compatible object storage
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.created=${{ steps.date.outputs.value }}
- name: Build and push image
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ github.ref_name }}
COMMIT=${{ github.sha }}
DATE=${{ steps.date.outputs.value }}

5
.gitignore vendored
View File

@@ -1,5 +1,8 @@
.env
*.db
.vscode/
blobs/
*.db
data/
.idea/
.gocache/
.gomodcache/

24
Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM golang:1.25-alpine AS build
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ARG VERSION=dev
ARG COMMIT=none
ARG DATE=unknown
RUN CGO_ENABLED=0 GOOS=linux go build \
-trimpath \
-ldflags "-s -w -X main.version=${VERSION} -X main.commit=${COMMIT} -X main.date=${DATE}" \
-o /app/fs .
FROM alpine:3.23 AS runner
COPY --from=build /app/fs /app/fs
WORKDIR /app
EXPOSE 2600
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 CMD wget -q -O /dev/null "http://127.0.0.1:${PORT:-2600}/healthz" || exit 1
CMD ["/app/fs"]

8
LICENSE.md Normal file
View File

@@ -0,0 +1,8 @@
Copyright 2025 ferdzo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

161
README.md
View File

@@ -1,3 +1,162 @@
# fs
An experimental Object Storage written in Go that should be compatible with S3
An experimental Object Storage written in Go that should be partially compatible with S3
## Running
Single binary, two modes:
- `fs` (no subcommand) starts the server (backward compatible)
- `fs server` starts the server explicitly
- `fs admin ...` runs admin CLI commands
## Features
Bucket operations:
- `PUT /{bucket}`
- `HEAD /{bucket}`
- `DELETE /{bucket}`
- `GET /` (list buckets)
Object operations:
- `PUT /{bucket}/{key}`
- `GET /{bucket}/{key}`
- `HEAD /{bucket}/{key}`
- `DELETE /{bucket}/{key}`
- `GET /{bucket}?list-type=2&prefix=...` (ListObjectsV2-style)
Multipart upload:
- `POST /{bucket}/{key}?uploads` (initiate)
- `PUT /{bucket}/{key}?uploadId=...&partNumber=N` (upload part)
- `GET /{bucket}/{key}?uploadId=...` (list parts)
- `POST /{bucket}/{key}?uploadId=...` (complete)
- `DELETE /{bucket}/{key}?uploadId=...` (abort)
Multi-object delete:
- `POST /{bucket}?delete` with S3-style XML body
AWS SigV4 streaming payload decoding for uploads (`aws-chunked` request bodies)
Authentication:
- AWS SigV4 request verification (header and presigned URL forms)
- Local credential/policy store in bbolt
- Bootstrap access key/secret via environment variables
Admin API (JSON):
- `POST /_admin/v1/users`
- `GET /_admin/v1/users`
- `GET /_admin/v1/users/{accessKeyId}`
- `PUT /_admin/v1/users/{accessKeyId}/policy`
- `PUT /_admin/v1/users/{accessKeyId}/status`
- `DELETE /_admin/v1/users/{accessKeyId}`
Admin API policy examples (SigV4):
```bash
ENDPOINT="http://localhost:2600"
REGION="us-east-1"
ADMIN_ACCESS_KEY="${FS_ROOT_USER}"
ADMIN_SECRET_KEY="${FS_ROOT_PASSWORD}"
SIGV4="aws:amz:${REGION}:s3"
```
Replace user policy with one scoped statement:
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "backup",
"prefix": "restic/*"
}
]
}
}'
```
Set multiple statements (for multiple buckets):
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject"],
"bucket": "test-bucket",
"prefix": "*"
},
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "test-bucket-2",
"prefix": "*"
}
]
}
}'
```
Admin CLI:
- `fs admin user create --access-key backup-user --role readwrite`
- `fs admin user list`
- `fs admin user get backup-user`
- `fs admin user set-status backup-user --status disabled`
- `fs admin user set-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role readwrite --bucket backups-2` (appends another statement)
- `fs admin user remove-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role admin --replace` (replaces all statements)
- `fs admin user delete backup-user`
- `fs admin snapshot create --data-path /var/lib/fs --out /backup/fs-20260311.tar.gz`
- `fs admin snapshot inspect --file /backup/fs-20260311.tar.gz`
- `fs admin snapshot restore --file /backup/fs-20260311.tar.gz --data-path /var/lib/fs --force`
- `fs admin diag health`
- `fs admin diag version`
## Auth Setup
Required when `FS_AUTH_ENABLED=true`:
- `FS_MASTER_KEY` must be base64 for 32 decoded bytes (AES-256 key), e.g. `openssl rand -base64 32`
- `FS_ROOT_USER` and `FS_ROOT_PASSWORD` define initial credentials
- `ADMIN_API_ENABLED=true` enables `/_admin/v1/*` routes (bootstrap key only)
Reference: `auth/README.md`
Additional docs:
- Admin OpenAPI spec: `docs/admin-api-openapi.yaml`
- S3 compatibility matrix: `docs/s3-compatibility.md`
CLI credential/env resolution for `fs admin`:
- Flags: `--access-key`, `--secret-key`, `--endpoint`, `--region`
- Env fallback:
- `FS_ROOT_USER` / `FS_ROOT_PASSWORD` (same defaults as server bootstrap)
- `FSCLI_ACCESS_KEY` / `FSCLI_SECRET_KEY`
- `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY`
- `FSCLI_ENDPOINT` (fallback to `ADDRESS` + `PORT`, then `http://localhost:2600`)
- `FSCLI_REGION` (fallback `FS_AUTH_REGION`, default `us-east-1`)
Note:
- `fs admin snapshot ...` commands operate locally on filesystem paths and do not require endpoint or auth credentials.
Health:
- `GET /healthz`
- `HEAD /healthz`
- `GET /metrics` (Prometheus exposition format)
- `HEAD /metrics`
## Limitations
- Not full S3 API coverage.
- No versioning or lifecycle policies.
- Error and edge-case behavior is still being refined for client compatibility.
## License
MIT License

300
api/admin_api.go Normal file
View File

@@ -0,0 +1,300 @@
package api
import (
"encoding/json"
"errors"
"fs/auth"
"fs/models"
"io"
"net/http"
"strconv"
"strings"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
const (
maxAdminJSONBodyBytes = 1 << 20
defaultAdminPageSize = 100
maxAdminPageSize = 1000
)
type adminErrorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
RequestID string `json:"requestId,omitempty"`
}
type adminCreateUserRequest struct {
AccessKeyID string `json:"accessKeyId"`
SecretKey string `json:"secretKey,omitempty"`
Status string `json:"status,omitempty"`
Policy models.AuthPolicy `json:"policy"`
}
type adminSetPolicyRequest struct {
Policy models.AuthPolicy `json:"policy"`
}
type adminSetStatusRequest struct {
Status string `json:"status"`
}
type adminUserListItem struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
}
type adminUserListResponse struct {
Items []adminUserListItem `json:"items"`
NextCursor string `json:"nextCursor,omitempty"`
}
type adminUserResponse struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
Policy *models.AuthPolicy `json:"policy,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
}
func (h *Handler) registerAdminRoutes() {
h.router.Route("/_admin/v1", func(r chi.Router) {
r.Post("/users", h.handleAdminCreateUser)
r.Get("/users", h.handleAdminListUsers)
r.Get("/users/{accessKeyId}", h.handleAdminGetUser)
r.Put("/users/{accessKeyId}/policy", h.handleAdminSetUserPolicy)
r.Put("/users/{accessKeyId}/status", h.handleAdminSetUserStatus)
r.Delete("/users/{accessKeyId}", h.handleAdminDeleteUser)
})
}
func (h *Handler) handleAdminCreateUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
var req adminCreateUserRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
created, err := h.authSvc.CreateUser(auth.CreateUserInput{
AccessKeyID: req.AccessKeyID,
SecretKey: req.SecretKey,
Status: req.Status,
Policy: req.Policy,
})
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: created.AccessKeyID,
Status: created.Status,
CreatedAt: created.CreatedAt,
UpdatedAt: created.UpdatedAt,
Policy: &created.Policy,
SecretKey: created.SecretKey,
}
writeJSON(w, http.StatusCreated, resp)
}
func (h *Handler) handleAdminListUsers(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
limit := defaultAdminPageSize
if raw := strings.TrimSpace(r.URL.Query().Get("limit")); raw != "" {
parsed, err := strconv.Atoi(raw)
if err != nil || parsed < 1 || parsed > maxAdminPageSize {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", "limit must be between 1 and 1000")
return
}
limit = parsed
}
cursor := strings.TrimSpace(r.URL.Query().Get("cursor"))
users, nextCursor, err := h.authSvc.ListUsers(limit, cursor)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
items := make([]adminUserListItem, 0, len(users))
for _, user := range users {
items = append(items, adminUserListItem{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
})
}
writeJSON(w, http.StatusOK, adminUserListResponse{
Items: items,
NextCursor: nextCursor,
})
}
func (h *Handler) handleAdminGetUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
user, err := h.authSvc.GetUser(accessKeyID)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) handleAdminDeleteUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
if err := h.authSvc.DeleteUser(accessKeyID); err != nil {
writeMappedAdminError(w, r, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (h *Handler) handleAdminSetUserPolicy(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
var req adminSetPolicyRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
user, err := h.authSvc.SetUserPolicy(accessKeyID, req.Policy)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) handleAdminSetUserStatus(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
var req adminSetStatusRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
user, err := h.authSvc.SetUserStatus(accessKeyID, req.Status)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) requireBootstrapAdmin(w http.ResponseWriter, r *http.Request) bool {
authCtx, ok := auth.GetRequestContext(r.Context())
if !ok || !authCtx.Authenticated {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin credentials are required")
return false
}
if h.authSvc == nil {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin access is not configured")
return false
}
bootstrap := strings.TrimSpace(h.authSvc.Config().BootstrapAccessKey)
if bootstrap == "" || authCtx.AccessKeyID != bootstrap {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin access denied")
return false
}
return true
}
func decodeJSONBody(w http.ResponseWriter, r *http.Request, dst any) error {
r.Body = http.MaxBytesReader(w, r.Body, maxAdminJSONBodyBytes)
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(dst); err != nil {
return err
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
return errors.New("request body must contain a single JSON object")
}
return nil
}
func writeMappedAdminError(w http.ResponseWriter, r *http.Request, err error) {
switch {
case errors.Is(err, auth.ErrInvalidUserInput):
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
case errors.Is(err, auth.ErrUserAlreadyExists):
writeAdminError(w, r, http.StatusConflict, "UserAlreadyExists", "user already exists")
case errors.Is(err, auth.ErrUserNotFound):
writeAdminError(w, r, http.StatusNotFound, "UserNotFound", "user was not found")
case errors.Is(err, auth.ErrAuthNotEnabled):
writeAdminError(w, r, http.StatusServiceUnavailable, "AuthDisabled", "authentication subsystem is disabled")
default:
writeAdminError(w, r, http.StatusInternalServerError, "InternalError", "internal server error")
}
}
func writeAdminError(w http.ResponseWriter, r *http.Request, status int, code string, message string) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
requestID := middleware.GetReqID(r.Context())
if requestID != "" {
w.Header().Set("x-amz-request-id", requestID)
}
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(adminErrorResponse{
Code: code,
Message: message,
RequestID: requestID,
})
}
func writeJSON(w http.ResponseWriter, status int, payload any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(payload)
}

1228
api/api.go

File diff suppressed because it is too large Load Diff

242
api/s3_errors.go Normal file
View File

@@ -0,0 +1,242 @@
package api
import (
"encoding/xml"
"errors"
"fs/auth"
"fs/metadata"
"fs/metrics"
"fs/models"
"fs/service"
"net/http"
"github.com/go-chi/chi/v5/middleware"
)
type s3APIError struct {
Status int
Code string
Message string
}
var (
s3ErrInvalidObjectKey = s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidArgument",
Message: "Object key is required.",
}
s3ErrKeyTooLong = s3APIError{
Status: http.StatusBadRequest,
Code: "KeyTooLongError",
Message: "Your key is too long.",
}
s3ErrNotImplemented = s3APIError{
Status: http.StatusNotImplemented,
Code: "NotImplemented",
Message: "A header you provided implies functionality that is not implemented.",
}
s3ErrInvalidPart = s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidPart",
Message: "One or more of the specified parts could not be found.",
}
s3ErrInvalidPartOrder = s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidPartOrder",
Message: "The list of parts was not in ascending order.",
}
s3ErrMalformedXML = s3APIError{
Status: http.StatusBadRequest,
Code: "MalformedXML",
Message: "The XML you provided was not well-formed or did not validate against our published schema.",
}
s3ErrInvalidArgument = s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidArgument",
Message: "Invalid argument.",
}
s3ErrInvalidRange = s3APIError{
Status: http.StatusRequestedRangeNotSatisfiable,
Code: "InvalidRange",
Message: "The requested range is not satisfiable.",
}
s3ErrPreconditionFailed = s3APIError{
Status: http.StatusPreconditionFailed,
Code: "PreconditionFailed",
Message: "At least one of the pre-conditions you specified did not hold.",
}
s3ErrEntityTooSmall = s3APIError{
Status: http.StatusBadRequest,
Code: "EntityTooSmall",
Message: "Your proposed upload is smaller than the minimum allowed object size.",
}
s3ErrEntityTooLarge = s3APIError{
Status: http.StatusRequestEntityTooLarge,
Code: "EntityTooLarge",
Message: "Your proposed upload exceeds the maximum allowed size.",
}
s3ErrTooManyDeleteObjects = s3APIError{
Status: http.StatusBadRequest,
Code: "MalformedXML",
Message: "The request must contain no more than 1000 object identifiers.",
}
s3ErrAccessDenied = s3APIError{
Status: http.StatusForbidden,
Code: "AccessDenied",
Message: "Access Denied.",
}
s3ErrInvalidAccessKeyID = s3APIError{
Status: http.StatusForbidden,
Code: "InvalidAccessKeyId",
Message: "The AWS Access Key Id you provided does not exist in our records.",
}
s3ErrSignatureDoesNotMatch = s3APIError{
Status: http.StatusForbidden,
Code: "SignatureDoesNotMatch",
Message: "The request signature we calculated does not match the signature you provided.",
}
s3ErrAuthorizationHeaderMalformed = s3APIError{
Status: http.StatusBadRequest,
Code: "AuthorizationHeaderMalformed",
Message: "The authorization header is malformed; the region/service/date is wrong or missing.",
}
s3ErrRequestTimeTooSkewed = s3APIError{
Status: http.StatusForbidden,
Code: "RequestTimeTooSkewed",
Message: "The difference between the request time and the server's time is too large.",
}
s3ErrExpiredToken = s3APIError{
Status: http.StatusBadRequest,
Code: "ExpiredToken",
Message: "The provided token has expired.",
}
s3ErrInvalidPresign = s3APIError{
Status: http.StatusBadRequest,
Code: "AuthorizationQueryParametersError",
Message: "Error parsing the X-Amz-Credential parameter.",
}
s3ErrInternal = s3APIError{
Status: http.StatusInternalServerError,
Code: "InternalError",
Message: "We encountered an internal error. Please try again.",
}
)
func mapToS3Error(err error) s3APIError {
switch {
case errors.Is(err, metadata.ErrInvalidBucketName):
return s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidBucketName",
Message: "The specified bucket is not valid.",
}
case errors.Is(err, metadata.ErrBucketAlreadyExists):
return s3APIError{
Status: http.StatusConflict,
Code: "BucketAlreadyOwnedByYou",
Message: "Your previous request to create the named bucket succeeded and you already own it.",
}
case errors.Is(err, metadata.ErrBucketNotFound):
return s3APIError{
Status: http.StatusNotFound,
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
}
case errors.Is(err, metadata.ErrBucketNotEmpty):
return s3APIError{
Status: http.StatusConflict,
Code: "BucketNotEmpty",
Message: "The bucket you tried to delete is not empty.",
}
case errors.Is(err, metadata.ErrObjectNotFound):
return s3APIError{
Status: http.StatusNotFound,
Code: "NoSuchKey",
Message: "The specified key does not exist.",
}
case errors.Is(err, metadata.ErrMultipartNotFound):
return s3APIError{
Status: http.StatusNotFound,
Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.",
}
case errors.Is(err, metadata.ErrMultipartNotPending):
return s3APIError{
Status: http.StatusBadRequest,
Code: "InvalidRequest",
Message: "The multipart upload is not in a valid state for this operation.",
}
case errors.Is(err, service.ErrInvalidPart):
return s3ErrInvalidPart
case errors.Is(err, service.ErrInvalidPartOrder):
return s3ErrInvalidPartOrder
case errors.Is(err, service.ErrInvalidCompleteRequest):
return s3ErrMalformedXML
case errors.Is(err, service.ErrEntityTooSmall):
return s3ErrEntityTooSmall
case errors.Is(err, auth.ErrAccessDenied):
return s3ErrAccessDenied
case errors.Is(err, auth.ErrInvalidAccessKeyID):
return s3ErrInvalidAccessKeyID
case errors.Is(err, auth.ErrSignatureDoesNotMatch):
return s3ErrSignatureDoesNotMatch
case errors.Is(err, auth.ErrAuthorizationHeaderMalformed):
return s3ErrAuthorizationHeaderMalformed
case errors.Is(err, auth.ErrRequestTimeTooSkewed):
return s3ErrRequestTimeTooSkewed
case errors.Is(err, auth.ErrExpiredToken):
return s3ErrExpiredToken
case errors.Is(err, auth.ErrCredentialDisabled):
return s3ErrAccessDenied
case errors.Is(err, auth.ErrNoAuthCredentials):
return s3ErrAccessDenied
case errors.Is(err, auth.ErrUnsupportedAuthScheme):
return s3ErrAuthorizationHeaderMalformed
case errors.Is(err, auth.ErrInvalidPresign):
return s3ErrInvalidPresign
default:
return s3ErrInternal
}
}
func writeS3Error(w http.ResponseWriter, r *http.Request, apiErr s3APIError, resource string) {
requestID := ""
op := "other"
if r != nil {
requestID = middleware.GetReqID(r.Context())
isDeletePost := false
if r.Method == http.MethodPost {
_, isDeletePost = r.URL.Query()["delete"]
}
op = metrics.NormalizeHTTPOperation(r.Method, isDeletePost)
if requestID != "" {
w.Header().Set("x-amz-request-id", requestID)
}
}
metrics.Default.ObserveError(op, apiErr.Code)
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.WriteHeader(apiErr.Status)
if r != nil && r.Method == http.MethodHead {
return
}
payload := models.S3ErrorResponse{
Code: apiErr.Code,
Message: apiErr.Message,
Resource: resource,
RequestID: requestID,
}
out, err := xml.MarshalIndent(payload, "", " ")
if err != nil {
return
}
_, _ = w.Write([]byte(xml.Header))
_, _ = w.Write(out)
}
func writeMappedS3Error(w http.ResponseWriter, r *http.Request, err error) {
writeS3Error(w, r, mapToS3Error(err), r.URL.Path)
}

90
app/server.go Normal file
View File

@@ -0,0 +1,90 @@
package app
import (
"context"
"fs/api"
"fs/auth"
"fs/logging"
"fs/metadata"
"fs/service"
"fs/storage"
"fs/utils"
"os"
"path/filepath"
"strconv"
"time"
)
func RunServer(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
config := utils.NewConfig()
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog)
authConfig := auth.ConfigFromValues(
config.AuthEnabled,
config.AuthRegion,
config.AuthSkew,
config.AuthMaxPresign,
config.AuthMasterKey,
config.AuthBootstrapAccessKey,
config.AuthBootstrapSecretKey,
config.AuthBootstrapPolicy,
)
logger := logging.NewLogger(logConfig)
logger.Info("boot",
"log_level", logConfig.LevelName,
"log_format", logConfig.Format,
"audit_log", logConfig.Audit,
"data_path", config.DataPath,
"multipart_retention_hours", int(config.MultipartCleanupRetention/time.Hour),
"auth_enabled", authConfig.Enabled,
"auth_region", authConfig.Region,
"admin_api_enabled", config.AdminAPIEnabled,
)
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
return err
}
dbPath := filepath.Join(config.DataPath, "metadata.db")
metadataHandler, err := metadata.NewMetadataHandler(dbPath)
if err != nil {
logger.Error("failed_to_initialize_metadata_handler", "error", err)
return err
}
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_blob_store", "error", err)
return err
}
objectService := service.NewObjectService(metadataHandler, blobHandler, config.MultipartCleanupRetention)
authService, err := auth.NewService(authConfig, metadataHandler)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_auth_service", "error", err)
return err
}
if err := authService.EnsureBootstrap(); err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_ensure_bootstrap_auth_identity", "error", err)
return err
}
handler := api.NewHandler(objectService, logger, logConfig, authService, config.AdminAPIEnabled)
addr := config.Address + ":" + strconv.Itoa(config.Port)
if config.GcEnabled {
go objectService.RunGC(ctx, config.GcInterval)
}
if err := handler.Start(ctx, addr); err != nil {
logger.Error("server_stopped_with_error", "error", err)
return err
}
return nil
}

142
auth/README.md Normal file
View File

@@ -0,0 +1,142 @@
# Authentication Design
This folder implements S3-compatible request authentication using AWS Signature Version 4 (SigV4), with local identity and policy data stored in bbolt.
## Goals
- Keep S3 client compatibility for request signing.
- Avoid external auth databases.
- Store secrets encrypted at rest (not plaintext in bbolt).
- Keep authorization simple and explicit.
## High-Level Architecture
- `auth/middleware.go`
- HTTP middleware that enforces auth before API handlers.
- Exempts `/healthz`.
- Calls auth service and writes mapped S3 XML errors on failure.
- `auth/service.go`
- Main auth orchestration:
- parse SigV4 from request
- validate timestamp/scope/service/region
- load identity from metadata
- decrypt secret
- verify signature
- evaluate policy against requested S3 action
- `auth/sigv4.go`
- Canonical SigV4 parsing and verification helpers.
- Supports header auth and presigned query auth.
- `auth/policy.go`
- Authorization evaluator (deny overrides allow).
- `auth/action.go`
- Maps HTTP method/path/query to logical S3 action + resource target.
- `auth/crypto.go`
- AES-256-GCM encryption/decryption for stored secret keys.
- `auth/context.go`
- Carries authentication result in request context for downstream logic.
- `auth/config.go`
- Normalized auth configuration.
- `auth/errors.go`
- Domain auth errors used by API S3 error mapping.
## Config Model
Auth is configured through env (read in `utils/config.go`, converted in `auth/config.go`):
- `FS_AUTH_ENABLED`
- `FS_AUTH_REGION`
- `FS_AUTH_CLOCK_SKEW_SECONDS`
- `FS_AUTH_MAX_PRESIGN_SECONDS`
- `FS_MASTER_KEY`
- `FS_ROOT_USER`
- `FS_ROOT_PASSWORD`
- `FS_ROOT_POLICY_JSON` (optional JSON)
Important:
- If `FS_AUTH_ENABLED=true`, `FS_MASTER_KEY` is required.
- `FS_MASTER_KEY` must be base64 that decodes to exactly 32 bytes (AES-256 key).
## Persistence Model (bbolt)
Implemented in metadata layer:
- `__AUTH_IDENTITIES__` bucket stores `models.AuthIdentity`
- `access_key_id`
- encrypted secret (`secret_enc`, `secret_nonce`)
- status (`active`/disabled)
- timestamps
- `__AUTH_POLICIES__` bucket stores `models.AuthPolicy`
- `principal`
- statements (`effect`, `actions`, `bucket`, `prefix`)
## Bootstrap Identity
On startup (`main.go`):
1. Build auth config.
2. Create auth service with metadata store.
3. Call `EnsureBootstrap()`.
If bootstrap env key/secret are set:
- identity is created/updated
- secret is encrypted with AES-GCM and stored
- policy is created:
- default: full access (`s3:*`, `bucket=*`, `prefix=*`)
- or overridden by `FS_ROOT_POLICY_JSON`
## Request Authentication Flow
For each non-health request:
1. Parse SigV4 input (header or presigned query).
2. Validate structural fields:
- algorithm
- credential scope
- service must be `s3`
- region must match config
3. Validate time:
- `x-amz-date` format
- skew within `FS_AUTH_CLOCK_SKEW_SECONDS`
- presigned expiry within `FS_AUTH_MAX_PRESIGN_SECONDS`
4. Load identity by access key id.
5. Ensure identity status is active.
6. Decrypt stored secret using master key.
7. Recompute canonical request and expected signature.
8. Compare signatures.
9. Resolve target action from request.
10. Evaluate policy; deny overrides allow.
11. Store auth result in request context and continue.
## Authorization Semantics
Policy evaluator rules:
- No matching allow => denied.
- Any matching deny => denied (even if allow also matches).
- Wildcards supported:
- action: `*` or `s3:*`
- bucket: `*`
- prefix: `*`
Action resolution includes:
- bucket APIs (`CreateBucket`, `ListBucket`, `HeadBucket`, `DeleteBucket`)
- object APIs (`GetObject`, `PutObject`, `DeleteObject`)
- multipart APIs (`CreateMultipartUpload`, `UploadPart`, `ListMultipartUploadParts`, `CompleteMultipartUpload`, `AbortMultipartUpload`)
## Error Behavior
Auth errors are mapped to S3-style XML errors in `api/s3_errors.go`, including:
- `AccessDenied`
- `InvalidAccessKeyId`
- `SignatureDoesNotMatch`
- `AuthorizationHeaderMalformed`
- `RequestTimeTooSkewed`
- `ExpiredToken`
- `AuthorizationQueryParametersError`
## Audit Logging
When `AUDIT_LOG=true` and auth is enabled:
- successful auth attempts emit `auth_success`
- failed auth attempts emit `auth_failed`
Each audit entry includes method, path, remote IP, and request ID (if present). Success logs also include access key ID and auth type.
## Security Notes
- Secret keys are recoverable by server design (required for SigV4 verification).
- They are encrypted at rest, not hashed.
- Master key rotation is not implemented yet.
- Keep `FS_MASTER_KEY` protected (secret manager/systemd env file/etc.).
## Current Scope / Limitations
- No STS/session-token auth yet.
- Policy language is intentionally minimal, not full IAM.
- No automatic key rotation workflows.
- No key rotation endpoint for existing users yet.

93
auth/action.go Normal file
View File

@@ -0,0 +1,93 @@
package auth
import (
"net/http"
"strings"
)
type Action string
const (
ActionListAllMyBuckets Action = "s3:ListAllMyBuckets"
ActionCreateBucket Action = "s3:CreateBucket"
ActionHeadBucket Action = "s3:HeadBucket"
ActionDeleteBucket Action = "s3:DeleteBucket"
ActionListBucket Action = "s3:ListBucket"
ActionGetObject Action = "s3:GetObject"
ActionPutObject Action = "s3:PutObject"
ActionDeleteObject Action = "s3:DeleteObject"
ActionCreateMultipartUpload Action = "s3:CreateMultipartUpload"
ActionUploadPart Action = "s3:UploadPart"
ActionListMultipartParts Action = "s3:ListMultipartUploadParts"
ActionCompleteMultipart Action = "s3:CompleteMultipartUpload"
ActionAbortMultipartUpload Action = "s3:AbortMultipartUpload"
)
type RequestTarget struct {
Action Action
Bucket string
Key string
}
func resolveTarget(r *http.Request) RequestTarget {
path := strings.TrimPrefix(r.URL.Path, "/")
if path == "" {
return RequestTarget{Action: ActionListAllMyBuckets}
}
parts := strings.SplitN(path, "/", 2)
bucket := parts[0]
key := ""
if len(parts) > 1 {
key = parts[1]
}
if key == "" {
switch r.Method {
case http.MethodPut:
return RequestTarget{Action: ActionCreateBucket, Bucket: bucket}
case http.MethodHead:
return RequestTarget{Action: ActionHeadBucket, Bucket: bucket}
case http.MethodDelete:
return RequestTarget{Action: ActionDeleteBucket, Bucket: bucket}
case http.MethodGet:
return RequestTarget{Action: ActionListBucket, Bucket: bucket}
case http.MethodPost:
if _, ok := r.URL.Query()["delete"]; ok {
return RequestTarget{Action: ActionDeleteObject, Bucket: bucket}
}
}
return RequestTarget{Bucket: bucket}
}
uploadID := r.URL.Query().Get("uploadId")
partNumber := r.URL.Query().Get("partNumber")
if _, ok := r.URL.Query()["uploads"]; ok && r.Method == http.MethodPost {
return RequestTarget{Action: ActionCreateMultipartUpload, Bucket: bucket, Key: key}
}
if uploadID != "" {
switch r.Method {
case http.MethodPut:
if partNumber != "" {
return RequestTarget{Action: ActionUploadPart, Bucket: bucket, Key: key}
}
case http.MethodGet:
return RequestTarget{Action: ActionListMultipartParts, Bucket: bucket, Key: key}
case http.MethodPost:
return RequestTarget{Action: ActionCompleteMultipart, Bucket: bucket, Key: key}
case http.MethodDelete:
return RequestTarget{Action: ActionAbortMultipartUpload, Bucket: bucket, Key: key}
}
}
switch r.Method {
case http.MethodGet, http.MethodHead:
return RequestTarget{Action: ActionGetObject, Bucket: bucket, Key: key}
case http.MethodPut:
return RequestTarget{Action: ActionPutObject, Bucket: bucket, Key: key}
case http.MethodDelete:
return RequestTarget{Action: ActionDeleteObject, Bucket: bucket, Key: key}
}
return RequestTarget{Bucket: bucket, Key: key}
}

50
auth/config.go Normal file
View File

@@ -0,0 +1,50 @@
package auth
import (
"strings"
"time"
)
type Config struct {
Enabled bool
Region string
ClockSkew time.Duration
MaxPresignDuration time.Duration
MasterKey string
BootstrapAccessKey string
BootstrapSecretKey string
BootstrapPolicy string
}
func ConfigFromValues(
enabled bool,
region string,
skew time.Duration,
maxPresign time.Duration,
masterKey string,
bootstrapAccessKey string,
bootstrapSecretKey string,
bootstrapPolicy string,
) Config {
region = strings.TrimSpace(region)
if region == "" {
region = "us-east-1"
}
if skew <= 0 {
skew = 5 * time.Minute
}
if maxPresign <= 0 {
maxPresign = 24 * time.Hour
}
return Config{
Enabled: enabled,
Region: region,
ClockSkew: skew,
MaxPresignDuration: maxPresign,
MasterKey: strings.TrimSpace(masterKey),
BootstrapAccessKey: strings.TrimSpace(bootstrapAccessKey),
BootstrapSecretKey: strings.TrimSpace(bootstrapSecretKey),
BootstrapPolicy: strings.TrimSpace(bootstrapPolicy),
}
}

23
auth/context.go Normal file
View File

@@ -0,0 +1,23 @@
package auth
import "context"
type RequestContext struct {
Authenticated bool
AccessKeyID string
AuthType string
}
type contextKey int
const requestContextKey contextKey = iota
func WithRequestContext(ctx context.Context, authCtx RequestContext) context.Context {
return context.WithValue(ctx, requestContextKey, authCtx)
}
func GetRequestContext(ctx context.Context) (RequestContext, bool) {
value := ctx.Value(requestContextKey)
authCtx, ok := value.(RequestContext)
return authCtx, ok
}

74
auth/crypto.go Normal file
View File

@@ -0,0 +1,74 @@
package auth
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
)
const (
masterKeyLength = 32
gcmNonceLength = 12
)
func decodeMasterKey(raw string) ([]byte, error) {
decoded, err := base64.StdEncoding.DecodeString(raw)
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrInvalidMasterKey, err)
}
if len(decoded) != masterKeyLength {
return nil, fmt.Errorf("%w: expected %d-byte decoded key", ErrInvalidMasterKey, masterKeyLength)
}
return decoded, nil
}
func encryptSecret(masterKey []byte, accessKeyID, secret string) (ciphertextB64 string, nonceB64 string, err error) {
block, err := aes.NewCipher(masterKey)
if err != nil {
return "", "", err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return "", "", err
}
nonce := make([]byte, gcmNonceLength)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return "", "", err
}
ciphertext := gcm.Seal(nil, nonce, []byte(secret), []byte(accessKeyID))
return base64.StdEncoding.EncodeToString(ciphertext), base64.StdEncoding.EncodeToString(nonce), nil
}
func decryptSecret(masterKey []byte, accessKeyID, ciphertextB64, nonceB64 string) (string, error) {
block, err := aes.NewCipher(masterKey)
if err != nil {
return "", err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
ciphertext, err := base64.StdEncoding.DecodeString(ciphertextB64)
if err != nil {
return "", err
}
nonce, err := base64.StdEncoding.DecodeString(nonceB64)
if err != nil {
return "", err
}
if len(nonce) != gcmNonceLength {
return "", fmt.Errorf("invalid nonce length: %d", len(nonce))
}
plaintext, err := gcm.Open(nil, nonce, ciphertext, []byte(accessKeyID))
if err != nil {
return "", err
}
return string(plaintext), nil
}

22
auth/errors.go Normal file
View File

@@ -0,0 +1,22 @@
package auth
import "errors"
var (
ErrAccessDenied = errors.New("access denied")
ErrInvalidAccessKeyID = errors.New("invalid access key id")
ErrUserAlreadyExists = errors.New("user already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidUserInput = errors.New("invalid user input")
ErrSignatureDoesNotMatch = errors.New("signature does not match")
ErrAuthorizationHeaderMalformed = errors.New("authorization header malformed")
ErrRequestTimeTooSkewed = errors.New("request time too skewed")
ErrExpiredToken = errors.New("expired token")
ErrCredentialDisabled = errors.New("credential disabled")
ErrAuthNotEnabled = errors.New("authentication is not enabled")
ErrMasterKeyRequired = errors.New("auth master key is required")
ErrInvalidMasterKey = errors.New("invalid auth master key")
ErrNoAuthCredentials = errors.New("no auth credentials found")
ErrUnsupportedAuthScheme = errors.New("unsupported auth scheme")
ErrInvalidPresign = errors.New("invalid presigned request")
)

111
auth/middleware.go Normal file
View File

@@ -0,0 +1,111 @@
package auth
import (
"errors"
"fs/metrics"
"log/slog"
"net"
"net/http"
"github.com/go-chi/chi/v5/middleware"
)
func Middleware(
svc *Service,
logger *slog.Logger,
auditEnabled bool,
onError func(http.ResponseWriter, *http.Request, error),
) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
authCtx := RequestContext{Authenticated: false, AuthType: "none"}
if svc == nil || !svc.Config().Enabled {
metrics.Default.ObserveAuth("bypass", "disabled", "auth_disabled")
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx)))
return
}
if r.URL.Path == "/healthz" {
metrics.Default.ObserveAuth("bypass", "none", "public_endpoint")
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx)))
return
}
resolvedCtx, err := svc.AuthenticateRequest(r)
if err != nil {
metrics.Default.ObserveAuth("error", "sigv4", authErrorClass(err))
if auditEnabled && logger != nil {
requestID := middleware.GetReqID(r.Context())
attrs := []any{
"method", r.Method,
"path", r.URL.Path,
"remote_ip", clientIP(r.RemoteAddr),
"error", err.Error(),
}
if requestID != "" {
attrs = append(attrs, "request_id", requestID)
}
logger.Warn("auth_failed", attrs...)
}
if onError != nil {
onError(w, r, err)
return
}
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}
metrics.Default.ObserveAuth("ok", resolvedCtx.AuthType, "none")
if auditEnabled && logger != nil {
requestID := middleware.GetReqID(r.Context())
attrs := []any{
"method", r.Method,
"path", r.URL.Path,
"remote_ip", clientIP(r.RemoteAddr),
"access_key_id", resolvedCtx.AccessKeyID,
"auth_type", resolvedCtx.AuthType,
}
if requestID != "" {
attrs = append(attrs, "request_id", requestID)
}
logger.Info("auth_success", attrs...)
}
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), resolvedCtx)))
})
}
}
func authErrorClass(err error) string {
switch {
case errors.Is(err, ErrInvalidAccessKeyID):
return "invalid_access_key"
case errors.Is(err, ErrSignatureDoesNotMatch):
return "signature_mismatch"
case errors.Is(err, ErrAuthorizationHeaderMalformed):
return "auth_header_malformed"
case errors.Is(err, ErrRequestTimeTooSkewed):
return "time_skew"
case errors.Is(err, ErrExpiredToken):
return "expired_token"
case errors.Is(err, ErrNoAuthCredentials):
return "missing_credentials"
case errors.Is(err, ErrUnsupportedAuthScheme):
return "unsupported_auth_scheme"
case errors.Is(err, ErrInvalidPresign):
return "invalid_presign"
case errors.Is(err, ErrCredentialDisabled):
return "credential_disabled"
case errors.Is(err, ErrAccessDenied):
return "access_denied"
default:
return "other"
}
}
func clientIP(remoteAddr string) string {
host, _, err := net.SplitHostPort(remoteAddr)
if err == nil && host != "" {
return host
}
return remoteAddr
}

66
auth/policy.go Normal file
View File

@@ -0,0 +1,66 @@
package auth
import (
"fs/models"
"strings"
)
func isAllowed(policy *models.AuthPolicy, target RequestTarget) bool {
if policy == nil {
return false
}
allowed := false
for _, stmt := range policy.Statements {
if !statementMatches(stmt, target) {
continue
}
effect := strings.ToLower(strings.TrimSpace(stmt.Effect))
if effect == "deny" {
return false
}
if effect == "allow" {
allowed = true
}
}
return allowed
}
func statementMatches(stmt models.AuthPolicyStatement, target RequestTarget) bool {
if !actionMatches(stmt.Actions, target.Action) {
return false
}
if !bucketMatches(stmt.Bucket, target.Bucket) {
return false
}
if target.Key == "" {
return true
}
prefix := strings.TrimSpace(stmt.Prefix)
if prefix == "" || prefix == "*" {
return true
}
return strings.HasPrefix(target.Key, prefix)
}
func actionMatches(actions []string, action Action) bool {
if len(actions) == 0 {
return false
}
for _, current := range actions {
normalized := strings.TrimSpace(current)
if normalized == "*" || normalized == "s3:*" || strings.EqualFold(normalized, string(action)) {
return true
}
}
return false
}
func bucketMatches(pattern, bucket string) bool {
pattern = strings.TrimSpace(pattern)
if pattern == "" || pattern == "*" {
return true
}
return pattern == bucket
}

554
auth/service.go Normal file
View File

@@ -0,0 +1,554 @@
package auth
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"fs/metadata"
"fs/models"
"net/http"
"regexp"
"strings"
"time"
)
type Store interface {
GetAuthIdentity(accessKeyID string) (*models.AuthIdentity, error)
PutAuthIdentity(identity *models.AuthIdentity) error
DeleteAuthIdentity(accessKeyID string) error
ListAuthIdentities(limit int, after string) ([]models.AuthIdentity, string, error)
GetAuthPolicy(accessKeyID string) (*models.AuthPolicy, error)
PutAuthPolicy(policy *models.AuthPolicy) error
DeleteAuthPolicy(accessKeyID string) error
}
type CreateUserInput struct {
AccessKeyID string
SecretKey string
Status string
Policy models.AuthPolicy
}
type UserSummary struct {
AccessKeyID string
Status string
CreatedAt int64
UpdatedAt int64
}
type UserDetails struct {
AccessKeyID string
Status string
CreatedAt int64
UpdatedAt int64
Policy models.AuthPolicy
}
type CreateUserResult struct {
UserDetails
SecretKey string
}
var validAccessKeyID = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9._-]{2,127}$`)
type Service struct {
cfg Config
store Store
masterKey []byte
now func() time.Time
}
func NewService(cfg Config, store Store) (*Service, error) {
if store == nil {
return nil, errors.New("auth store is required")
}
svc := &Service{
cfg: cfg,
store: store,
now: func() time.Time { return time.Now().UTC() },
}
if !cfg.Enabled {
return svc, nil
}
if strings.TrimSpace(cfg.MasterKey) == "" {
return nil, ErrMasterKeyRequired
}
masterKey, err := decodeMasterKey(cfg.MasterKey)
if err != nil {
return nil, err
}
svc.masterKey = masterKey
return svc, nil
}
func (s *Service) Config() Config {
return s.cfg
}
func (s *Service) EnsureBootstrap() error {
if !s.cfg.Enabled {
return nil
}
accessKey := strings.TrimSpace(s.cfg.BootstrapAccessKey)
secret := strings.TrimSpace(s.cfg.BootstrapSecretKey)
if accessKey == "" || secret == "" {
return nil
}
if len(accessKey) < 3 {
return errors.New("bootstrap access key must be at least 3 characters")
}
if len(secret) < 8 {
return errors.New("bootstrap secret key must be at least 8 characters")
}
now := s.now().Unix()
ciphertext, nonce, err := encryptSecret(s.masterKey, accessKey, secret)
if err != nil {
return err
}
identity := &models.AuthIdentity{
AccessKeyID: accessKey,
SecretEnc: ciphertext,
SecretNonce: nonce,
EncAlg: "AES-256-GCM",
KeyVersion: "v1",
Status: "active",
CreatedAt: now,
UpdatedAt: now,
}
if existing, err := s.store.GetAuthIdentity(accessKey); err == nil && existing != nil {
identity.CreatedAt = existing.CreatedAt
}
if err := s.store.PutAuthIdentity(identity); err != nil {
return err
}
policy := defaultBootstrapPolicy(accessKey)
if strings.TrimSpace(s.cfg.BootstrapPolicy) != "" {
parsed, err := parsePolicyJSON(s.cfg.BootstrapPolicy)
if err != nil {
return err
}
policy = parsed
policy.Principal = accessKey
}
return s.store.PutAuthPolicy(policy)
}
func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
if !s.cfg.Enabled {
return RequestContext{Authenticated: false, AuthType: "disabled"}, nil
}
input, err := parseSigV4(r)
if err != nil {
return RequestContext{}, err
}
if err := validateSigV4Input(s.now(), s.cfg, input); err != nil {
return RequestContext{}, err
}
identity, err := s.store.GetAuthIdentity(input.AccessKeyID)
if err != nil {
return RequestContext{}, ErrInvalidAccessKeyID
}
if !strings.EqualFold(identity.Status, "active") {
return RequestContext{}, ErrCredentialDisabled
}
secret, err := decryptSecret(s.masterKey, identity.AccessKeyID, identity.SecretEnc, identity.SecretNonce)
if err != nil {
return RequestContext{}, ErrSignatureDoesNotMatch
}
ok, err := signatureMatches(secret, r, input)
if err != nil {
return RequestContext{}, err
}
if !ok {
return RequestContext{}, ErrSignatureDoesNotMatch
}
authType := "sigv4-header"
if input.Presigned {
authType = "sigv4-presign"
}
if strings.HasPrefix(r.URL.Path, "/_admin/") {
return RequestContext{
Authenticated: true,
AccessKeyID: identity.AccessKeyID,
AuthType: authType,
}, nil
}
policy, err := s.store.GetAuthPolicy(identity.AccessKeyID)
if err != nil {
return RequestContext{}, ErrAccessDenied
}
target := resolveTarget(r)
if target.Action == "" {
return RequestContext{}, ErrAccessDenied
}
if !isAllowed(policy, target) {
return RequestContext{}, ErrAccessDenied
}
return RequestContext{
Authenticated: true,
AccessKeyID: identity.AccessKeyID,
AuthType: authType,
}, nil
}
func (s *Service) CreateUser(input CreateUserInput) (*CreateUserResult, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID := strings.TrimSpace(input.AccessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
secretKey := strings.TrimSpace(input.SecretKey)
if secretKey == "" {
generated, err := generateSecretKey(32)
if err != nil {
return nil, err
}
secretKey = generated
}
if len(secretKey) < 8 {
return nil, fmt.Errorf("%w: secret key must be at least 8 characters", ErrInvalidUserInput)
}
status := normalizeUserStatus(input.Status)
if status == "" {
return nil, fmt.Errorf("%w: status must be active or disabled", ErrInvalidUserInput)
}
policy, err := normalizePolicy(input.Policy, accessKeyID)
if err != nil {
return nil, err
}
existing, err := s.store.GetAuthIdentity(accessKeyID)
if err == nil && existing != nil {
return nil, ErrUserAlreadyExists
}
if err != nil && !errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, err
}
now := s.now().Unix()
ciphertext, nonce, err := encryptSecret(s.masterKey, accessKeyID, secretKey)
if err != nil {
return nil, err
}
identity := &models.AuthIdentity{
AccessKeyID: accessKeyID,
SecretEnc: ciphertext,
SecretNonce: nonce,
EncAlg: "AES-256-GCM",
KeyVersion: "v1",
Status: status,
CreatedAt: now,
UpdatedAt: now,
}
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
if err := s.store.PutAuthPolicy(&policy); err != nil {
return nil, err
}
return &CreateUserResult{
UserDetails: UserDetails{
AccessKeyID: accessKeyID,
Status: status,
CreatedAt: now,
UpdatedAt: now,
Policy: policy,
},
SecretKey: secretKey,
}, nil
}
func (s *Service) ListUsers(limit int, cursor string) ([]UserSummary, string, error) {
if !s.cfg.Enabled {
return nil, "", ErrAuthNotEnabled
}
if limit <= 0 {
limit = 100
}
if limit > 1000 {
limit = 1000
}
identities, nextCursor, err := s.store.ListAuthIdentities(limit, cursor)
if err != nil {
return nil, "", err
}
users := make([]UserSummary, 0, len(identities))
for _, identity := range identities {
users = append(users, UserSummary{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
})
}
return users, nextCursor, nil
}
func (s *Service) GetUser(accessKeyID string) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return nil, fmt.Errorf("%w: access key id is required", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: *policy,
}, nil
}
func (s *Service) DeleteUser(accessKeyID string) error {
if !s.cfg.Enabled {
return ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
bootstrap := strings.TrimSpace(s.cfg.BootstrapAccessKey)
if bootstrap != "" && accessKeyID == bootstrap {
return fmt.Errorf("%w: bootstrap user cannot be deleted", ErrInvalidUserInput)
}
if _, err := s.store.GetAuthIdentity(accessKeyID); err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return ErrUserNotFound
}
return err
}
if err := s.store.DeleteAuthIdentity(accessKeyID); err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return ErrUserNotFound
}
return err
}
if err := s.store.DeleteAuthPolicy(accessKeyID); err != nil && !errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return err
}
return nil
}
func (s *Service) SetUserPolicy(accessKeyID string, policy models.AuthPolicy) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
normalizedPolicy, err := normalizePolicy(policy, accessKeyID)
if err != nil {
return nil, err
}
if err := s.store.PutAuthPolicy(&normalizedPolicy); err != nil {
return nil, err
}
identity.UpdatedAt = s.now().Unix()
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: normalizedPolicy,
}, nil
}
func (s *Service) SetUserStatus(accessKeyID, status string) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
status = strings.TrimSpace(status)
if status == "" {
return nil, fmt.Errorf("%w: status is required", ErrInvalidUserInput)
}
normalizedStatus := normalizeUserStatus(status)
if normalizedStatus == "" {
return nil, fmt.Errorf("%w: status must be active or disabled", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
identity.Status = normalizedStatus
identity.UpdatedAt = s.now().Unix()
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: *policy,
}, nil
}
func parsePolicyJSON(raw string) (*models.AuthPolicy, error) {
policy := models.AuthPolicy{}
if err := json.Unmarshal([]byte(raw), &policy); err != nil {
return nil, fmt.Errorf("invalid bootstrap policy: %w", err)
}
if len(policy.Statements) == 0 {
return nil, errors.New("bootstrap policy must contain at least one statement")
}
return &policy, nil
}
func defaultBootstrapPolicy(principal string) *models.AuthPolicy {
return &models.AuthPolicy{
Principal: principal,
Statements: []models.AuthPolicyStatement{
{
Effect: "allow",
Actions: []string{"s3:*"},
Bucket: "*",
Prefix: "*",
},
},
}
}
func generateSecretKey(length int) (string, error) {
if length <= 0 {
length = 32
}
buf := make([]byte, length)
if _, err := rand.Read(buf); err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(buf), nil
}
func normalizeUserStatus(raw string) string {
status := strings.ToLower(strings.TrimSpace(raw))
if status == "" {
return "active"
}
if status != "active" && status != "disabled" {
return ""
}
return status
}
func normalizePolicy(policy models.AuthPolicy, principal string) (models.AuthPolicy, error) {
if len(policy.Statements) == 0 {
return models.AuthPolicy{}, fmt.Errorf("%w: at least one policy statement is required", ErrInvalidUserInput)
}
out := models.AuthPolicy{
Principal: principal,
Statements: make([]models.AuthPolicyStatement, 0, len(policy.Statements)),
}
for _, stmt := range policy.Statements {
effect := strings.ToLower(strings.TrimSpace(stmt.Effect))
if effect != "allow" && effect != "deny" {
return models.AuthPolicy{}, fmt.Errorf("%w: invalid policy effect %q", ErrInvalidUserInput, stmt.Effect)
}
actions := make([]string, 0, len(stmt.Actions))
for _, action := range stmt.Actions {
action = strings.TrimSpace(action)
if action == "" {
continue
}
actions = append(actions, action)
}
if len(actions) == 0 {
return models.AuthPolicy{}, fmt.Errorf("%w: policy statement must include at least one action", ErrInvalidUserInput)
}
bucket := strings.TrimSpace(stmt.Bucket)
if bucket == "" {
bucket = "*"
}
prefix := strings.TrimSpace(stmt.Prefix)
if prefix == "" {
prefix = "*"
}
out.Statements = append(out.Statements, models.AuthPolicyStatement{
Effect: effect,
Actions: actions,
Bucket: bucket,
Prefix: prefix,
})
}
return out, nil
}

223
auth/service_admin_test.go Normal file
View File

@@ -0,0 +1,223 @@
package auth
import (
"encoding/base64"
"errors"
"fs/metadata"
"fs/models"
"path/filepath"
"testing"
)
func TestAdminCreateListGetUser(t *testing.T) {
meta, svc := newTestAuthService(t)
created, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "backup-user",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{
Effect: "allow",
Actions: []string{"s3:GetObject"},
Bucket: "backup-bucket",
Prefix: "restic/",
},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
if created.SecretKey == "" {
t.Fatalf("CreateUser should return generated secret")
}
if created.AccessKeyID != "backup-user" {
t.Fatalf("CreateUser access key mismatch: got %q", created.AccessKeyID)
}
if created.Policy.Principal != "backup-user" {
t.Fatalf("policy principal mismatch: got %q", created.Policy.Principal)
}
users, nextCursor, err := svc.ListUsers(100, "")
if err != nil {
t.Fatalf("ListUsers returned error: %v", err)
}
if nextCursor != "" {
t.Fatalf("unexpected next cursor: %q", nextCursor)
}
if len(users) != 1 {
t.Fatalf("ListUsers returned %d users, want 1", len(users))
}
if users[0].AccessKeyID != "backup-user" {
t.Fatalf("ListUsers returned wrong user: %q", users[0].AccessKeyID)
}
got, err := svc.GetUser("backup-user")
if err != nil {
t.Fatalf("GetUser returned error: %v", err)
}
if got.AccessKeyID != "backup-user" {
t.Fatalf("GetUser access key mismatch: got %q", got.AccessKeyID)
}
if got.Policy.Principal != "backup-user" {
t.Fatalf("GetUser policy principal mismatch: got %q", got.Policy.Principal)
}
if len(got.Policy.Statements) != 1 {
t.Fatalf("GetUser policy statement count = %d, want 1", len(got.Policy.Statements))
}
_ = meta
}
func TestCreateUserDuplicateFails(t *testing.T) {
_, svc := newTestAuthService(t)
input := CreateUserInput{
AccessKeyID: "duplicate-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
}
if _, err := svc.CreateUser(input); err != nil {
t.Fatalf("first CreateUser returned error: %v", err)
}
if _, err := svc.CreateUser(input); !errors.Is(err, ErrUserAlreadyExists) {
t.Fatalf("second CreateUser error = %v, want ErrUserAlreadyExists", err)
}
}
func TestCreateUserRejectsInvalidAccessKey(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "x",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if !errors.Is(err, ErrInvalidUserInput) {
t.Fatalf("CreateUser error = %v, want ErrInvalidUserInput", err)
}
}
func TestDeleteUser(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "delete-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
if err := svc.DeleteUser("delete-user"); err != nil {
t.Fatalf("DeleteUser returned error: %v", err)
}
if _, err := svc.GetUser("delete-user"); !errors.Is(err, ErrUserNotFound) {
t.Fatalf("GetUser after delete error = %v, want ErrUserNotFound", err)
}
}
func TestDeleteBootstrapUserRejected(t *testing.T) {
_, svc := newTestAuthService(t)
if err := svc.DeleteUser("root-user"); !errors.Is(err, ErrInvalidUserInput) {
t.Fatalf("DeleteUser bootstrap error = %v, want ErrInvalidUserInput", err)
}
}
func TestSetUserPolicy(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "policy-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:GetObject"}, Bucket: "b1", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
updated, err := svc.SetUserPolicy("policy-user", models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:PutObject"}, Bucket: "b2", Prefix: "p/"},
},
})
if err != nil {
t.Fatalf("SetUserPolicy returned error: %v", err)
}
if len(updated.Policy.Statements) != 1 || updated.Policy.Statements[0].Actions[0] != "s3:PutObject" {
t.Fatalf("SetUserPolicy did not apply new policy: %+v", updated.Policy)
}
}
func TestSetUserStatus(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "status-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
updated, err := svc.SetUserStatus("status-user", "disabled")
if err != nil {
t.Fatalf("SetUserStatus returned error: %v", err)
}
if updated.Status != "disabled" {
t.Fatalf("SetUserStatus status = %q, want disabled", updated.Status)
}
}
func newTestAuthService(t *testing.T) (*metadata.MetadataHandler, *Service) {
t.Helper()
dbPath := filepath.Join(t.TempDir(), "metadata.db")
meta, err := metadata.NewMetadataHandler(dbPath)
if err != nil {
t.Fatalf("NewMetadataHandler returned error: %v", err)
}
t.Cleanup(func() {
_ = meta.Close()
})
masterKey := base64.StdEncoding.EncodeToString(make([]byte, 32))
cfg := ConfigFromValues(
true,
"us-east-1",
0,
0,
masterKey,
"root-user",
"root-secret-123",
"",
)
svc, err := NewService(cfg, meta)
if err != nil {
t.Fatalf("NewService returned error: %v", err)
}
return meta, svc
}

372
auth/sigv4.go Normal file
View File

@@ -0,0 +1,372 @@
package auth
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
const (
sigV4Algorithm = "AWS4-HMAC-SHA256"
)
type sigV4Input struct {
AccessKeyID string
Date string
Region string
Service string
Scope string
SignedHeaders []string
SignedHeadersRaw string
SignatureHex string
AmzDate string
ExpiresSeconds int
Presigned bool
}
func parseSigV4(r *http.Request) (*sigV4Input, error) {
if r == nil {
return nil, fmt.Errorf("%w: nil request", ErrAuthorizationHeaderMalformed)
}
if strings.EqualFold(r.URL.Query().Get("X-Amz-Algorithm"), sigV4Algorithm) {
return parsePresignedSigV4(r)
}
return parseHeaderSigV4(r)
}
func parseHeaderSigV4(r *http.Request) (*sigV4Input, error) {
header := strings.TrimSpace(r.Header.Get("Authorization"))
if header == "" {
return nil, ErrNoAuthCredentials
}
if !strings.HasPrefix(header, sigV4Algorithm+" ") {
return nil, fmt.Errorf("%w: unsupported authorization algorithm", ErrUnsupportedAuthScheme)
}
params := parseAuthorizationParams(strings.TrimSpace(strings.TrimPrefix(header, sigV4Algorithm)))
credentialRaw := params["Credential"]
signedHeadersRaw := params["SignedHeaders"]
signatureHex := params["Signature"]
if credentialRaw == "" || signedHeadersRaw == "" || signatureHex == "" {
return nil, fmt.Errorf("%w: missing required authorization fields", ErrAuthorizationHeaderMalformed)
}
accessKeyID, date, region, service, scope, err := parseCredential(credentialRaw)
if err != nil {
return nil, err
}
amzDate := strings.TrimSpace(r.Header.Get("x-amz-date"))
if amzDate == "" {
return nil, fmt.Errorf("%w: x-amz-date is required", ErrAuthorizationHeaderMalformed)
}
signedHeaders := splitSignedHeaders(signedHeadersRaw)
if len(signedHeaders) == 0 {
return nil, fmt.Errorf("%w: signed headers are required", ErrAuthorizationHeaderMalformed)
}
return &sigV4Input{
AccessKeyID: accessKeyID,
Date: date,
Region: region,
Service: service,
Scope: scope,
SignedHeaders: signedHeaders,
SignedHeadersRaw: strings.ToLower(strings.TrimSpace(signedHeadersRaw)),
SignatureHex: strings.ToLower(strings.TrimSpace(signatureHex)),
AmzDate: amzDate,
Presigned: false,
}, nil
}
func parsePresignedSigV4(r *http.Request) (*sigV4Input, error) {
query := r.URL.Query()
if !strings.EqualFold(query.Get("X-Amz-Algorithm"), sigV4Algorithm) {
return nil, fmt.Errorf("%w: invalid X-Amz-Algorithm", ErrInvalidPresign)
}
credentialRaw := strings.TrimSpace(query.Get("X-Amz-Credential"))
signedHeadersRaw := strings.TrimSpace(query.Get("X-Amz-SignedHeaders"))
signatureHex := strings.TrimSpace(query.Get("X-Amz-Signature"))
amzDate := strings.TrimSpace(query.Get("X-Amz-Date"))
expiresRaw := strings.TrimSpace(query.Get("X-Amz-Expires"))
if credentialRaw == "" || signedHeadersRaw == "" || signatureHex == "" || amzDate == "" || expiresRaw == "" {
return nil, fmt.Errorf("%w: missing presigned query fields", ErrInvalidPresign)
}
expires, err := strconv.Atoi(expiresRaw)
if err != nil || expires < 0 {
return nil, fmt.Errorf("%w: invalid X-Amz-Expires", ErrInvalidPresign)
}
accessKeyID, date, region, service, scope, err := parseCredential(credentialRaw)
if err != nil {
return nil, err
}
signedHeaders := splitSignedHeaders(signedHeadersRaw)
if len(signedHeaders) == 0 {
return nil, fmt.Errorf("%w: signed headers are required", ErrInvalidPresign)
}
return &sigV4Input{
AccessKeyID: accessKeyID,
Date: date,
Region: region,
Service: service,
Scope: scope,
SignedHeaders: signedHeaders,
SignedHeadersRaw: strings.ToLower(strings.TrimSpace(signedHeadersRaw)),
SignatureHex: strings.ToLower(signatureHex),
AmzDate: amzDate,
ExpiresSeconds: expires,
Presigned: true,
}, nil
}
func parseCredential(raw string) (accessKeyID string, date string, region string, service string, scope string, err error) {
parts := strings.Split(strings.TrimSpace(raw), "/")
if len(parts) != 5 {
return "", "", "", "", "", fmt.Errorf("%w: invalid credential scope", ErrAuthorizationHeaderMalformed)
}
accessKeyID = strings.TrimSpace(parts[0])
date = strings.TrimSpace(parts[1])
region = strings.TrimSpace(parts[2])
service = strings.TrimSpace(parts[3])
terminal := strings.TrimSpace(parts[4])
if accessKeyID == "" || date == "" || region == "" || service == "" || terminal != "aws4_request" {
return "", "", "", "", "", fmt.Errorf("%w: invalid credential scope", ErrAuthorizationHeaderMalformed)
}
scope = strings.Join(parts[1:], "/")
return accessKeyID, date, region, service, scope, nil
}
func splitSignedHeaders(raw string) []string {
raw = strings.ToLower(strings.TrimSpace(raw))
if raw == "" {
return nil
}
parts := strings.Split(raw, ";")
headers := make([]string, 0, len(parts))
for _, current := range parts {
current = strings.TrimSpace(current)
if current == "" {
continue
}
headers = append(headers, current)
}
return headers
}
func parseAuthorizationParams(raw string) map[string]string {
params := make(map[string]string)
raw = strings.TrimSpace(raw)
raw = strings.TrimPrefix(raw, " ")
for _, token := range strings.Split(raw, ",") {
token = strings.TrimSpace(token)
key, value, found := strings.Cut(token, "=")
if !found {
continue
}
params[strings.TrimSpace(key)] = strings.TrimSpace(value)
}
return params
}
func validateSigV4Input(now time.Time, cfg Config, input *sigV4Input) error {
if input == nil {
return fmt.Errorf("%w: empty signature input", ErrAuthorizationHeaderMalformed)
}
if !strings.EqualFold(input.Service, "s3") {
return fmt.Errorf("%w: unsupported service", ErrAuthorizationHeaderMalformed)
}
if !strings.EqualFold(input.Region, cfg.Region) {
return fmt.Errorf("%w: region mismatch", ErrAuthorizationHeaderMalformed)
}
requestTime, err := time.Parse("20060102T150405Z", input.AmzDate)
if err != nil {
return fmt.Errorf("%w: invalid x-amz-date", ErrAuthorizationHeaderMalformed)
}
delta := now.Sub(requestTime)
if delta > cfg.ClockSkew || delta < -cfg.ClockSkew {
return ErrRequestTimeTooSkewed
}
if input.Presigned {
if input.ExpiresSeconds > int(cfg.MaxPresignDuration.Seconds()) {
return fmt.Errorf("%w: presign expires too large", ErrInvalidPresign)
}
expiresAt := requestTime.Add(time.Duration(input.ExpiresSeconds) * time.Second)
if now.After(expiresAt) {
return ErrExpiredToken
}
}
return nil
}
func signatureMatches(secret string, r *http.Request, input *sigV4Input) (bool, error) {
payloadHash := resolvePayloadHash(r, input.Presigned)
canonicalRequest, err := buildCanonicalRequest(r, input.SignedHeaders, payloadHash, input.Presigned)
if err != nil {
return false, err
}
stringToSign := buildStringToSign(input.AmzDate, input.Scope, canonicalRequest)
signingKey := deriveSigningKey(secret, input.Date, input.Region, input.Service)
expectedSig := hex.EncodeToString(hmacSHA256(signingKey, stringToSign))
return hmac.Equal([]byte(expectedSig), []byte(input.SignatureHex)), nil
}
func resolvePayloadHash(r *http.Request, presigned bool) string {
if presigned {
return "UNSIGNED-PAYLOAD"
}
hash := strings.TrimSpace(r.Header.Get("x-amz-content-sha256"))
if hash == "" {
return "UNSIGNED-PAYLOAD"
}
return hash
}
func buildCanonicalRequest(r *http.Request, signedHeaders []string, payloadHash string, presigned bool) (string, error) {
canonicalURI := canonicalPath(r.URL)
canonicalQuery := canonicalQueryString(r.URL.RawQuery, presigned)
canonicalHeaders, signedHeadersRaw, err := canonicalHeadersForRequest(r, signedHeaders)
if err != nil {
return "", err
}
return strings.Join([]string{
r.Method,
canonicalURI,
canonicalQuery,
canonicalHeaders,
signedHeadersRaw,
payloadHash,
}, "\n"), nil
}
func canonicalPath(u *url.URL) string {
if u == nil {
return "/"
}
path := u.EscapedPath()
if path == "" {
return "/"
}
return path
}
type queryPair struct {
Key string
Value string
}
func canonicalQueryString(rawQuery string, presigned bool) string {
if rawQuery == "" {
return ""
}
values, _ := url.ParseQuery(rawQuery)
pairs := make([]queryPair, 0)
for key, valueList := range values {
if presigned && strings.EqualFold(key, "X-Amz-Signature") {
continue
}
if len(valueList) == 0 {
pairs = append(pairs, queryPair{Key: key, Value: ""})
continue
}
for _, value := range valueList {
pairs = append(pairs, queryPair{Key: key, Value: value})
}
}
sort.Slice(pairs, func(i, j int) bool {
if pairs[i].Key == pairs[j].Key {
return pairs[i].Value < pairs[j].Value
}
return pairs[i].Key < pairs[j].Key
})
encoded := make([]string, 0, len(pairs))
for _, pair := range pairs {
encoded = append(encoded, awsEncodeQuery(pair.Key)+"="+awsEncodeQuery(pair.Value))
}
return strings.Join(encoded, "&")
}
func awsEncodeQuery(value string) string {
encoded := url.QueryEscape(value)
encoded = strings.ReplaceAll(encoded, "+", "%20")
encoded = strings.ReplaceAll(encoded, "*", "%2A")
encoded = strings.ReplaceAll(encoded, "%7E", "~")
return encoded
}
func canonicalHeadersForRequest(r *http.Request, signedHeaders []string) (canonical string, signedRaw string, err error) {
if len(signedHeaders) == 0 {
return "", "", fmt.Errorf("%w: empty signed headers", ErrAuthorizationHeaderMalformed)
}
normalized := make([]string, 0, len(signedHeaders))
lines := make([]string, 0, len(signedHeaders))
for _, headerName := range signedHeaders {
headerName = strings.ToLower(strings.TrimSpace(headerName))
if headerName == "" {
continue
}
var value string
if headerName == "host" {
value = r.Host
} else {
values, ok := r.Header[http.CanonicalHeaderKey(headerName)]
if !ok || len(values) == 0 {
return "", "", fmt.Errorf("%w: missing signed header %q", ErrAuthorizationHeaderMalformed, headerName)
}
value = strings.Join(values, ",")
}
value = normalizeHeaderValue(value)
normalized = append(normalized, headerName)
lines = append(lines, headerName+":"+value)
}
if len(lines) == 0 {
return "", "", fmt.Errorf("%w: no valid signed headers", ErrAuthorizationHeaderMalformed)
}
signedRaw = strings.Join(normalized, ";")
canonical = strings.Join(lines, "\n") + "\n"
return canonical, signedRaw, nil
}
func normalizeHeaderValue(value string) string {
value = strings.TrimSpace(value)
parts := strings.Fields(value)
return strings.Join(parts, " ")
}
func buildStringToSign(amzDate string, scope string, canonicalRequest string) string {
canonicalHash := sha256.Sum256([]byte(canonicalRequest))
return strings.Join([]string{
sigV4Algorithm,
amzDate,
scope,
hex.EncodeToString(canonicalHash[:]),
}, "\n")
}
func deriveSigningKey(secret, date, region, service string) []byte {
kDate := hmacSHA256([]byte("AWS4"+secret), date)
kRegion := hmacSHA256(kDate, region)
kService := hmacSHA256(kRegion, service)
return hmacSHA256(kService, "aws4_request")
}
func hmacSHA256(key []byte, value string) []byte {
mac := hmac.New(sha256.New, key)
_, _ = mac.Write([]byte(value))
return mac.Sum(nil)
}

123
cmd/admin.go Normal file
View File

@@ -0,0 +1,123 @@
package cmd
import (
"errors"
"fmt"
"fs/utils"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
)
const (
defaultAdminEndpoint = "http://localhost:2600"
defaultAdminRegion = "us-east-1"
)
type adminOptions struct {
Endpoint string
Region string
AccessKey string
SecretKey string
JSON bool
Timeout time.Duration
}
func newAdminCommand(build BuildInfo) *cobra.Command {
opts := &adminOptions{}
cmd := &cobra.Command{
Use: "admin",
Short: "Admin operations over the fs admin API",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.PersistentFlags().StringVar(&opts.Endpoint, "endpoint", "", "Admin API endpoint (env: FSCLI_ENDPOINT, fallback ADDRESS+PORT)")
cmd.PersistentFlags().StringVar(&opts.Region, "region", "", "SigV4 region (env: FSCLI_REGION or FS_AUTH_REGION)")
cmd.PersistentFlags().StringVar(&opts.AccessKey, "access-key", "", "Admin access key (env: FS_ROOT_USER, FSCLI_ACCESS_KEY)")
cmd.PersistentFlags().StringVar(&opts.SecretKey, "secret-key", "", "Admin secret key (env: FS_ROOT_PASSWORD, FSCLI_SECRET_KEY)")
cmd.PersistentFlags().BoolVar(&opts.JSON, "json", false, "Emit JSON output")
cmd.PersistentFlags().DurationVar(&opts.Timeout, "timeout", 15*time.Second, "HTTP timeout")
cmd.AddCommand(newAdminUserCommand(opts))
cmd.AddCommand(newAdminDiagCommand(opts, build))
cmd.AddCommand(newAdminSnapshotCommand(opts))
return cmd
}
func (o *adminOptions) resolve(requireCredentials bool) error {
serverCfg := utils.NewConfig()
o.Endpoint = strings.TrimSpace(firstNonEmpty(
o.Endpoint,
os.Getenv("FSCLI_ENDPOINT"),
endpointFromServerConfig(serverCfg.Address, serverCfg.Port),
defaultAdminEndpoint,
))
o.Region = strings.TrimSpace(firstNonEmpty(
o.Region,
os.Getenv("FSCLI_REGION"),
os.Getenv("FS_AUTH_REGION"),
serverCfg.AuthRegion,
defaultAdminRegion,
))
o.AccessKey = strings.TrimSpace(firstNonEmpty(
o.AccessKey,
os.Getenv("FS_ROOT_USER"),
os.Getenv("FSCLI_ACCESS_KEY"),
os.Getenv("AWS_ACCESS_KEY_ID"),
serverCfg.AuthBootstrapAccessKey,
))
o.SecretKey = strings.TrimSpace(firstNonEmpty(
o.SecretKey,
os.Getenv("FS_ROOT_PASSWORD"),
os.Getenv("FSCLI_SECRET_KEY"),
os.Getenv("AWS_SECRET_ACCESS_KEY"),
serverCfg.AuthBootstrapSecretKey,
))
if o.Timeout <= 0 {
o.Timeout = 15 * time.Second
}
if o.Endpoint == "" {
return errors.New("admin endpoint is required")
}
parsed, err := url.Parse(o.Endpoint)
if err != nil || parsed.Scheme == "" || parsed.Host == "" {
return fmt.Errorf("invalid endpoint %q", o.Endpoint)
}
if o.Region == "" {
return errors.New("region is required")
}
if requireCredentials && (o.AccessKey == "" || o.SecretKey == "") {
return errors.New("credentials required: set --access-key/--secret-key or FSCLI_ACCESS_KEY/FSCLI_SECRET_KEY")
}
return nil
}
func endpointFromServerConfig(address string, port int) string {
host := strings.TrimSpace(address)
if host == "" || host == "0.0.0.0" || host == "::" || host == "[::]" {
host = "localhost"
}
if port <= 0 || port > 65535 {
port = 2600
}
return "http://" + net.JoinHostPort(host, strconv.Itoa(port))
}
func firstNonEmpty(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
return strings.TrimSpace(v)
}
}
return ""
}

281
cmd/admin_client.go Normal file
View File

@@ -0,0 +1,281 @@
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
)
type adminUserListItem struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
}
type adminUserListResponse struct {
Items []adminUserListItem `json:"items"`
NextCursor string `json:"nextCursor,omitempty"`
}
type adminPolicyStatement struct {
Effect string `json:"effect"`
Actions []string `json:"actions"`
Bucket string `json:"bucket"`
Prefix string `json:"prefix"`
}
type adminPolicy struct {
Principal string `json:"principal,omitempty"`
Statements []adminPolicyStatement `json:"statements"`
}
type adminUserResponse struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
Policy *adminPolicy `json:"policy,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
}
type createUserRequest struct {
AccessKeyID string `json:"accessKeyId"`
SecretKey string `json:"secretKey,omitempty"`
Status string `json:"status,omitempty"`
Policy adminPolicy `json:"policy"`
}
type setStatusRequest struct {
Status string `json:"status"`
}
type setPolicyRequest struct {
Policy adminPolicy `json:"policy"`
}
type adminErrorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
RequestID string `json:"requestId,omitempty"`
}
type adminAPIError struct {
StatusCode int
Code string
Message string
RequestID string
}
func (e *adminAPIError) Error() string {
if e == nil {
return ""
}
if e.Code == "" {
return fmt.Sprintf("admin API request failed: status=%d", e.StatusCode)
}
if e.RequestID == "" {
return fmt.Sprintf("%s: %s", e.Code, e.Message)
}
return fmt.Sprintf("%s: %s (requestId=%s)", e.Code, e.Message, e.RequestID)
}
type adminAPIClient struct {
baseURL *url.URL
region string
accessKey string
secretKey string
client *http.Client
}
func newAdminAPIClient(opts *adminOptions, requireCredentials bool) (*adminAPIClient, error) {
if opts == nil {
return nil, errors.New("admin options are required")
}
if err := opts.resolve(requireCredentials); err != nil {
return nil, err
}
baseURL, err := url.Parse(opts.Endpoint)
if err != nil {
return nil, err
}
return &adminAPIClient{
baseURL: baseURL,
region: opts.Region,
accessKey: opts.AccessKey,
secretKey: opts.SecretKey,
client: &http.Client{
Timeout: opts.Timeout,
},
}, nil
}
func (c *adminAPIClient) CreateUser(ctx context.Context, request createUserRequest) (*adminUserResponse, error) {
var out adminUserResponse
if err := c.doJSON(ctx, http.MethodPost, "/_admin/v1/users", nil, request, &out, http.StatusCreated); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) ListUsers(ctx context.Context, limit int, cursor string) (*adminUserListResponse, error) {
query := make(url.Values)
if limit > 0 {
query.Set("limit", strconv.Itoa(limit))
}
if strings.TrimSpace(cursor) != "" {
query.Set("cursor", strings.TrimSpace(cursor))
}
var out adminUserListResponse
if err := c.doJSON(ctx, http.MethodGet, "/_admin/v1/users", query, nil, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) GetUser(ctx context.Context, accessKeyID string) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
if err := c.doJSON(ctx, http.MethodGet, path, nil, nil, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) DeleteUser(ctx context.Context, accessKeyID string) error {
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
return c.doJSON(ctx, http.MethodDelete, path, nil, nil, nil, http.StatusNoContent)
}
func (c *adminAPIClient) SetUserStatus(ctx context.Context, accessKeyID, status string) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/status"
if err := c.doJSON(ctx, http.MethodPut, path, nil, setStatusRequest{Status: status}, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) SetUserPolicy(ctx context.Context, accessKeyID string, policy adminPolicy) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/policy"
if err := c.doJSON(ctx, http.MethodPut, path, nil, setPolicyRequest{Policy: policy}, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) Health(ctx context.Context) (string, error) {
req, err := c.newRequest(ctx, http.MethodGet, "/healthz", nil, nil)
if err != nil {
return "", err
}
resp, err := c.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<10))
text := strings.TrimSpace(string(body))
if resp.StatusCode != http.StatusOK {
if text == "" {
text = http.StatusText(resp.StatusCode)
}
return text, fmt.Errorf("health check failed: status=%d", resp.StatusCode)
}
if text == "" {
text = "ok"
}
return text, nil
}
func (c *adminAPIClient) doJSON(
ctx context.Context,
method string,
path string,
query url.Values,
body any,
out any,
expectedStatus int,
) error {
var payload []byte
var err error
if body != nil {
payload, err = json.Marshal(body)
if err != nil {
return err
}
}
req, err := c.newRequest(ctx, method, path, query, payload)
if err != nil {
return err
}
if len(payload) > 0 {
req.Header.Set("Content-Type", "application/json")
}
req.Header.Set("Accept", "application/json")
if err := signSigV4Request(req, payload, c.accessKey, c.secretKey, c.region, "s3"); err != nil {
return err
}
resp, err := c.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
raw, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20))
if readErr != nil {
return readErr
}
if resp.StatusCode != expectedStatus {
apiErr := &adminAPIError{StatusCode: resp.StatusCode}
parsed := adminErrorResponse{}
if len(raw) > 0 && json.Unmarshal(raw, &parsed) == nil {
apiErr.Code = parsed.Code
apiErr.Message = parsed.Message
apiErr.RequestID = parsed.RequestID
}
if apiErr.Message == "" {
apiErr.Message = strings.TrimSpace(string(raw))
}
return apiErr
}
if out == nil || len(raw) == 0 {
return nil
}
return json.NewDecoder(bytes.NewReader(raw)).Decode(out)
}
func (c *adminAPIClient) newRequest(
ctx context.Context,
method string,
path string,
query url.Values,
payload []byte,
) (*http.Request, error) {
u := *c.baseURL
u.Path = strings.TrimRight(c.baseURL.Path, "/") + path
u.RawQuery = ""
if len(query) > 0 {
u.RawQuery = query.Encode()
}
req, err := http.NewRequestWithContext(ctx, method, u.String(), bytes.NewReader(payload))
if err != nil {
return nil, err
}
return req, nil
}

68
cmd/admin_diag.go Normal file
View File

@@ -0,0 +1,68 @@
package cmd
import (
"context"
"fmt"
"runtime"
"github.com/spf13/cobra"
)
func newAdminDiagCommand(opts *adminOptions, build BuildInfo) *cobra.Command {
cmd := &cobra.Command{
Use: "diag",
Short: "Diagnostics and connectivity checks",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminDiagHealthCommand(opts))
cmd.AddCommand(newAdminDiagVersionCommand(build, opts))
return cmd
}
func newAdminDiagHealthCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "health",
Short: "Check server health endpoint",
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, false)
if err != nil {
return err
}
status, err := client.Health(context.Background())
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]string{
"status": status,
})
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "health: %s\n", status)
return err
},
}
return cmd
}
func newAdminDiagVersionCommand(build BuildInfo, opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Print CLI version metadata",
RunE: func(cmd *cobra.Command, args []string) error {
out := map[string]string{
"version": build.Version,
"commit": build.Commit,
"date": build.Date,
"go": runtime.Version(),
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", out["version"], out["commit"], out["date"], out["go"])
return err
},
}
return cmd
}

80
cmd/admin_output.go Normal file
View File

@@ -0,0 +1,80 @@
package cmd
import (
"encoding/json"
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
)
func writeJSON(out io.Writer, value any) error {
encoder := json.NewEncoder(out)
encoder.SetIndent("", " ")
return encoder.Encode(value)
}
func formatUnix(ts int64) string {
if ts <= 0 {
return "-"
}
return time.Unix(ts, 0).UTC().Format(time.RFC3339)
}
func writeUserListTable(out io.Writer, value *adminUserListResponse) error {
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
if _, err := fmt.Fprintln(w, "ACCESS_KEY_ID\tSTATUS\tCREATED_AT\tUPDATED_AT"); err != nil {
return err
}
for _, item := range value.Items {
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", item.AccessKeyID, item.Status, formatUnix(item.CreatedAt), formatUnix(item.UpdatedAt)); err != nil {
return err
}
}
if strings.TrimSpace(value.NextCursor) != "" {
if _, err := fmt.Fprintf(w, "\nNEXT_CURSOR\t%s\t\t\n", value.NextCursor); err != nil {
return err
}
}
return w.Flush()
}
func writeUserTable(out io.Writer, value *adminUserResponse, includeSecret bool) error {
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
if _, err := fmt.Fprintf(w, "accessKeyId\t%s\n", value.AccessKeyID); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "status\t%s\n", value.Status); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "createdAt\t%s\n", formatUnix(value.CreatedAt)); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "updatedAt\t%s\n", formatUnix(value.UpdatedAt)); err != nil {
return err
}
if includeSecret && strings.TrimSpace(value.SecretKey) != "" {
if _, err := fmt.Fprintf(w, "secretKey\t%s\n", value.SecretKey); err != nil {
return err
}
}
if value.Policy != nil {
for i, stmt := range value.Policy.Statements {
idx := i + 1
if _, err := fmt.Fprintf(w, "policy[%d].effect\t%s\n", idx, stmt.Effect); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].actions\t%s\n", idx, strings.Join(stmt.Actions, ",")); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].bucket\t%s\n", idx, stmt.Bucket); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].prefix\t%s\n", idx, stmt.Prefix); err != nil {
return err
}
}
}
return w.Flush()
}

47
cmd/admin_policy.go Normal file
View File

@@ -0,0 +1,47 @@
package cmd
import (
"fmt"
"strings"
)
type rolePolicyOptions struct {
Role string
Bucket string
Prefix string
}
func buildPolicyFromRole(opts rolePolicyOptions) (adminPolicy, error) {
role := strings.ToLower(strings.TrimSpace(opts.Role))
bucket := strings.TrimSpace(opts.Bucket)
prefix := strings.TrimSpace(opts.Prefix)
if bucket == "" {
bucket = "*"
}
if prefix == "" {
prefix = "*"
}
var actions []string
switch role {
case "admin":
actions = []string{"s3:*"}
case "readwrite":
actions = []string{"s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"}
case "readonly":
actions = []string{"s3:ListBucket", "s3:GetObject"}
default:
return adminPolicy{}, fmt.Errorf("invalid role %q (allowed: admin, readwrite, readonly)", opts.Role)
}
return adminPolicy{
Statements: []adminPolicyStatement{
{
Effect: "allow",
Actions: actions,
Bucket: bucket,
Prefix: prefix,
},
},
}, nil
}

166
cmd/admin_sigv4.go Normal file
View File

@@ -0,0 +1,166 @@
package cmd
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
const sigV4Algorithm = "AWS4-HMAC-SHA256"
func signSigV4Request(req *http.Request, body []byte, accessKey, secretKey, region, service string) error {
if req == nil {
return fmt.Errorf("nil request")
}
if strings.TrimSpace(accessKey) == "" || strings.TrimSpace(secretKey) == "" {
return fmt.Errorf("missing signing credentials")
}
if strings.TrimSpace(region) == "" || strings.TrimSpace(service) == "" {
return fmt.Errorf("missing signing scope")
}
now := time.Now().UTC()
amzDate := now.Format("20060102T150405Z")
shortDate := now.Format("20060102")
scope := shortDate + "/" + region + "/" + service + "/aws4_request"
payloadHash := sha256Hex(body)
req.Header.Set("x-amz-date", amzDate)
req.Header.Set("x-amz-content-sha256", payloadHash)
host := req.URL.Host
signedHeaders := []string{"host", "x-amz-content-sha256", "x-amz-date"}
canonicalRequest, signedHeadersRaw := buildCanonicalRequest(req, signedHeaders, payloadHash)
stringToSign := buildStringToSign(amzDate, scope, canonicalRequest)
signature := hex.EncodeToString(hmacSHA256(deriveSigningKey(secretKey, shortDate, region, service), stringToSign))
authHeader := fmt.Sprintf(
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
sigV4Algorithm,
accessKey,
scope,
signedHeadersRaw,
signature,
)
req.Header.Set("Authorization", authHeader)
req.Host = host
return nil
}
func buildCanonicalRequest(req *http.Request, signedHeaders []string, payloadHash string) (string, string) {
canonicalHeaders, signedHeadersRaw := canonicalHeaders(req, signedHeaders)
return strings.Join([]string{
req.Method,
canonicalPath(req.URL),
canonicalQuery(req.URL),
canonicalHeaders,
signedHeadersRaw,
payloadHash,
}, "\n"), signedHeadersRaw
}
func canonicalPath(u *url.URL) string {
if u == nil {
return "/"
}
path := u.EscapedPath()
if path == "" {
return "/"
}
return path
}
func canonicalQuery(u *url.URL) string {
if u == nil {
return ""
}
values := u.Query()
type pair struct {
key string
value string
}
pairs := make([]pair, 0, len(values))
for key, vals := range values {
if len(vals) == 0 {
pairs = append(pairs, pair{key: key, value: ""})
continue
}
for _, v := range vals {
pairs = append(pairs, pair{key: key, value: v})
}
}
sort.Slice(pairs, func(i, j int) bool {
if pairs[i].key == pairs[j].key {
return pairs[i].value < pairs[j].value
}
return pairs[i].key < pairs[j].key
})
out := make([]string, 0, len(pairs))
for _, p := range pairs {
out = append(out, awsEncodeQuery(p.key)+"="+awsEncodeQuery(p.value))
}
return strings.Join(out, "&")
}
func awsEncodeQuery(value string) string {
encoded := url.QueryEscape(value)
encoded = strings.ReplaceAll(encoded, "+", "%20")
encoded = strings.ReplaceAll(encoded, "*", "%2A")
encoded = strings.ReplaceAll(encoded, "%7E", "~")
return encoded
}
func canonicalHeaders(req *http.Request, headers []string) (string, string) {
names := make([]string, 0, len(headers))
lines := make([]string, 0, len(headers))
for _, h := range headers {
name := strings.ToLower(strings.TrimSpace(h))
if name == "" {
continue
}
var value string
if name == "host" {
value = req.URL.Host
} else {
value = strings.Join(req.Header.Values(http.CanonicalHeaderKey(name)), ",")
}
value = strings.Join(strings.Fields(strings.TrimSpace(value)), " ")
names = append(names, name)
lines = append(lines, name+":"+value)
}
return strings.Join(lines, "\n") + "\n", strings.Join(names, ";")
}
func buildStringToSign(amzDate, scope, canonicalRequest string) string {
hash := sha256.Sum256([]byte(canonicalRequest))
return strings.Join([]string{
sigV4Algorithm,
amzDate,
scope,
hex.EncodeToString(hash[:]),
}, "\n")
}
func deriveSigningKey(secret, date, region, service string) []byte {
kDate := hmacSHA256([]byte("AWS4"+secret), date)
kRegion := hmacSHA256(kDate, region)
kService := hmacSHA256(kRegion, service)
return hmacSHA256(kService, "aws4_request")
}
func hmacSHA256(key []byte, message string) []byte {
mac := hmac.New(sha256.New, key)
_, _ = mac.Write([]byte(message))
return mac.Sum(nil)
}
func sha256Hex(payload []byte) string {
sum := sha256.Sum256(payload)
return hex.EncodeToString(sum[:])
}

728
cmd/admin_snapshot.go Normal file
View File

@@ -0,0 +1,728 @@
package cmd
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
bolt "go.etcd.io/bbolt"
"github.com/spf13/cobra"
)
const (
snapshotManifestPath = ".fs-snapshot/manifest.json"
snapshotFormat = 1
)
type snapshotFileEntry struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
}
type snapshotManifest struct {
FormatVersion int `json:"formatVersion"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
Files []snapshotFileEntry `json:"files"`
}
type snapshotSummary struct {
SnapshotFile string `json:"snapshotFile"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
FileCount int `json:"fileCount"`
TotalBytes int64 `json:"totalBytes"`
}
func newAdminSnapshotCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "snapshot",
Short: "Offline snapshot and restore utilities",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminSnapshotCreateCommand(opts))
cmd.AddCommand(newAdminSnapshotInspectCommand(opts))
cmd.AddCommand(newAdminSnapshotRestoreCommand(opts))
return cmd
}
func newAdminSnapshotCreateCommand(opts *adminOptions) *cobra.Command {
var dataPath string
var outFile string
cmd := &cobra.Command{
Use: "create",
Short: "Create offline snapshot tarball (.tar.gz)",
RunE: func(cmd *cobra.Command, args []string) error {
dataPath = strings.TrimSpace(dataPath)
outFile = strings.TrimSpace(outFile)
if dataPath == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--data-path is required")
}
if outFile == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--out is required")
}
result, err := createSnapshotArchive(context.Background(), dataPath, outFile)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "snapshot created: %s (files=%d bytes=%d)\n", result.SnapshotFile, result.FileCount, result.TotalBytes)
return err
},
}
cmd.Flags().StringVar(&dataPath, "data-path", "", "Source data path (must contain metadata.db)")
cmd.Flags().StringVar(&outFile, "out", "", "Output snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotInspectCommand(opts *adminOptions) *cobra.Command {
var filePath string
cmd := &cobra.Command{
Use: "inspect",
Short: "Inspect and verify snapshot archive integrity",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return usageError("fs admin snapshot inspect --file <snapshot.tar.gz>", "--file is required")
}
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]any{
"summary": summary,
"manifest": manifest,
})
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot ok: %s\ncreated_at=%s source=%s files=%d bytes=%d\n",
summary.SnapshotFile,
summary.CreatedAt,
summary.SourcePath,
summary.FileCount,
summary.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotRestoreCommand(opts *adminOptions) *cobra.Command {
var filePath string
var dataPath string
var force bool
cmd := &cobra.Command{
Use: "restore",
Short: "Restore snapshot into a data path (offline only)",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
dataPath = strings.TrimSpace(dataPath)
if filePath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--file is required")
}
if dataPath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--data-path is required")
}
result, err := restoreSnapshotArchive(context.Background(), filePath, dataPath, force)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot restored to %s (files=%d bytes=%d)\n",
result.SourcePath,
result.FileCount,
result.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
cmd.Flags().StringVar(&dataPath, "data-path", "", "Destination data path")
cmd.Flags().BoolVar(&force, "force", false, "Overwrite destination data path if it exists")
return cmd
}
func createSnapshotArchive(ctx context.Context, dataPath, outFile string) (*snapshotSummary, error) {
_ = ctx
sourceAbs, err := filepath.Abs(filepath.Clean(dataPath))
if err != nil {
return nil, err
}
outAbs, err := filepath.Abs(filepath.Clean(outFile))
if err != nil {
return nil, err
}
if isPathWithin(sourceAbs, outAbs) {
return nil, errors.New("output file cannot be inside --data-path")
}
info, err := os.Stat(sourceAbs)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("data path %q is not a directory", sourceAbs)
}
if err := ensureMetadataExists(sourceAbs); err != nil {
return nil, err
}
if err := ensureDataPathOffline(sourceAbs); err != nil {
return nil, err
}
manifest, totalBytes, err := buildSnapshotManifest(sourceAbs)
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(outAbs), 0o755); err != nil {
return nil, err
}
tmpPath := outAbs + ".tmp-" + strconvNowNano()
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o600)
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
}()
gzw := gzip.NewWriter(file)
tw := tar.NewWriter(gzw)
if err := writeManifestToTar(tw, manifest); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
for _, entry := range manifest.Files {
absPath := filepath.Join(sourceAbs, filepath.FromSlash(entry.Path))
if err := writeFileToTar(tw, absPath, entry.Path); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
}
if err := tw.Close(); err != nil {
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
if err := gzw.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Sync(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := os.Rename(tmpPath, outAbs); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := syncDir(filepath.Dir(outAbs)); err != nil {
return nil, err
}
return &snapshotSummary{
SnapshotFile: outAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: sourceAbs,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func inspectSnapshotArchive(filePath string) (*snapshotManifest, *snapshotSummary, error) {
fileAbs, err := filepath.Abs(filepath.Clean(filePath))
if err != nil {
return nil, nil, err
}
manifest, actual, err := readSnapshotArchive(fileAbs)
if err != nil {
return nil, nil, err
}
expected := map[string]snapshotFileEntry{}
var totalBytes int64
for _, entry := range manifest.Files {
expected[entry.Path] = entry
totalBytes += entry.Size
}
if len(expected) != len(actual) {
return nil, nil, fmt.Errorf("snapshot validation failed: expected %d files, got %d", len(expected), len(actual))
}
for path, exp := range expected {
got, ok := actual[path]
if !ok {
return nil, nil, fmt.Errorf("snapshot validation failed: missing file %s", path)
}
if got.Size != exp.Size || got.SHA256 != exp.SHA256 {
return nil, nil, fmt.Errorf("snapshot validation failed: checksum mismatch for %s", path)
}
}
return manifest, &snapshotSummary{
SnapshotFile: fileAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: manifest.SourcePath,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func restoreSnapshotArchive(ctx context.Context, filePath, destinationPath string, force bool) (*snapshotSummary, error) {
_ = ctx
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return nil, err
}
destAbs, err := filepath.Abs(filepath.Clean(destinationPath))
if err != nil {
return nil, err
}
if fi, statErr := os.Stat(destAbs); statErr == nil && fi.IsDir() {
if err := ensureDataPathOffline(destAbs); err != nil {
return nil, err
}
entries, err := os.ReadDir(destAbs)
if err == nil && len(entries) > 0 && !force {
return nil, errors.New("destination data path is not empty; use --force to overwrite")
}
}
parent := filepath.Dir(destAbs)
if err := os.MkdirAll(parent, 0o755); err != nil {
return nil, err
}
stage := filepath.Join(parent, "."+filepath.Base(destAbs)+".restore-"+strconvNowNano())
if err := os.MkdirAll(stage, 0o755); err != nil {
return nil, err
}
cleanupStage := true
defer func() {
if cleanupStage {
_ = os.RemoveAll(stage)
}
}()
if err := extractSnapshotArchive(filePath, stage, manifest); err != nil {
return nil, err
}
if err := syncDir(stage); err != nil {
return nil, err
}
if _, err := os.Stat(destAbs); err == nil {
if !force {
return nil, errors.New("destination data path exists; use --force to overwrite")
}
if err := os.RemoveAll(destAbs); err != nil {
return nil, err
}
}
if err := os.Rename(stage, destAbs); err != nil {
return nil, err
}
if err := syncDir(parent); err != nil {
return nil, err
}
cleanupStage = false
summary.SourcePath = destAbs
return summary, nil
}
func ensureMetadataExists(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
info, err := os.Stat(dbPath)
if err != nil {
return fmt.Errorf("metadata.db not found in %s", dataPath)
}
if !info.Mode().IsRegular() {
return fmt.Errorf("metadata.db in %s is not a regular file", dataPath)
}
return nil
}
func ensureDataPathOffline(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
if _, err := os.Stat(dbPath); err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
db, err := bolt.Open(dbPath, 0o600, &bolt.Options{
Timeout: 100 * time.Millisecond,
ReadOnly: true,
})
if err != nil {
return fmt.Errorf("data path appears in use (metadata.db locked): %w", err)
}
return db.Close()
}
func buildSnapshotManifest(dataPath string) (*snapshotManifest, int64, error) {
entries := make([]snapshotFileEntry, 0, 128)
var totalBytes int64
err := filepath.WalkDir(dataPath, func(path string, d fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
rel, err := filepath.Rel(dataPath, path)
if err != nil {
return err
}
rel = filepath.ToSlash(filepath.Clean(rel))
if rel == "." || rel == "" {
return nil
}
sum, err := sha256File(path)
if err != nil {
return err
}
totalBytes += info.Size()
entries = append(entries, snapshotFileEntry{
Path: rel,
Size: info.Size(),
SHA256: sum,
})
return nil
})
if err != nil {
return nil, 0, err
}
if len(entries) == 0 {
return nil, 0, errors.New("data path contains no regular files to snapshot")
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: time.Now().UTC().Format(time.RFC3339Nano),
SourcePath: dataPath,
Files: entries,
}, totalBytes, nil
}
func writeManifestToTar(tw *tar.Writer, manifest *snapshotManifest) error {
if tw == nil || manifest == nil {
return errors.New("invalid manifest writer input")
}
payload, err := json.Marshal(manifest)
if err != nil {
return err
}
header := &tar.Header{
Name: snapshotManifestPath,
Mode: 0o600,
Size: int64(len(payload)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = tw.Write(payload)
return err
}
func writeFileToTar(tw *tar.Writer, absPath, relPath string) error {
file, err := os.Open(absPath)
if err != nil {
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: relPath,
Mode: int64(info.Mode().Perm()),
Size: info.Size(),
ModTime: info.ModTime(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = io.Copy(tw, file)
return err
}
func readSnapshotArchive(filePath string) (*snapshotManifest, map[string]snapshotFileEntry, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return nil, nil, err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
actual := make(map[string]snapshotFileEntry)
var manifest *snapshotManifest
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, nil, err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return nil, nil, err
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return nil, nil, fmt.Errorf("unsupported tar entry type for %s", name)
}
if name == snapshotManifestPath {
raw, err := io.ReadAll(tr)
if err != nil {
return nil, nil, err
}
current := &snapshotManifest{}
if err := json.Unmarshal(raw, current); err != nil {
return nil, nil, err
}
manifest = current
continue
}
size, hashHex, err := digestReader(tr)
if err != nil {
return nil, nil, err
}
actual[name] = snapshotFileEntry{
Path: name,
Size: size,
SHA256: hashHex,
}
}
if manifest == nil {
return nil, nil, errors.New("snapshot manifest.json not found")
}
if manifest.FormatVersion != snapshotFormat {
return nil, nil, fmt.Errorf("unsupported snapshot format version %d", manifest.FormatVersion)
}
return manifest, actual, nil
}
func extractSnapshotArchive(filePath, destination string, manifest *snapshotManifest) error {
expected := make(map[string]snapshotFileEntry, len(manifest.Files))
for _, entry := range manifest.Files {
expected[entry.Path] = entry
}
seen := make(map[string]struct{}, len(expected))
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return err
}
if name == snapshotManifestPath {
if _, err := io.Copy(io.Discard, tr); err != nil {
return err
}
continue
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return fmt.Errorf("unsupported tar entry type for %s", name)
}
exp, ok := expected[name]
if !ok {
return fmt.Errorf("snapshot contains unexpected file %s", name)
}
targetPath := filepath.Join(destination, filepath.FromSlash(name))
if !isPathWithin(destination, targetPath) {
return fmt.Errorf("invalid archive path %s", name)
}
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return err
}
out, err := os.OpenFile(targetPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(header.Mode)&0o777)
if err != nil {
return err
}
hasher := sha256.New()
written, copyErr := io.Copy(io.MultiWriter(out, hasher), tr)
syncErr := out.Sync()
closeErr := out.Close()
if copyErr != nil {
return copyErr
}
if syncErr != nil {
return syncErr
}
if closeErr != nil {
return closeErr
}
if err := syncDir(filepath.Dir(targetPath)); err != nil {
return err
}
sum := hex.EncodeToString(hasher.Sum(nil))
if written != exp.Size || sum != exp.SHA256 {
return fmt.Errorf("checksum mismatch while extracting %s", name)
}
seen[name] = struct{}{}
}
if len(seen) != len(expected) {
return fmt.Errorf("restore validation failed: extracted %d files, expected %d", len(seen), len(expected))
}
for path := range expected {
if _, ok := seen[path]; !ok {
return fmt.Errorf("restore validation failed: missing file %s", path)
}
}
return nil
}
func cleanArchivePath(name string) (string, error) {
name = strings.TrimSpace(name)
if name == "" {
return "", errors.New("empty archive path")
}
name = filepath.ToSlash(filepath.Clean(name))
if strings.HasPrefix(name, "/") || strings.HasPrefix(name, "../") || strings.Contains(name, "/../") || name == ".." {
return "", fmt.Errorf("unsafe archive path %q", name)
}
return name, nil
}
func digestReader(r io.Reader) (int64, string, error) {
hasher := sha256.New()
n, err := io.Copy(hasher, r)
if err != nil {
return 0, "", err
}
return n, hex.EncodeToString(hasher.Sum(nil)), nil
}
func sha256File(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
return sha256FromReader(file)
}
func sha256FromReader(reader io.Reader) (string, error) {
hasher := sha256.New()
if _, err := io.Copy(hasher, reader); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func isPathWithin(base, candidate string) bool {
base = filepath.Clean(base)
candidate = filepath.Clean(candidate)
rel, err := filepath.Rel(base, candidate)
if err != nil {
return false
}
return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)))
}
func syncDir(path string) error {
dir, err := os.Open(path)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}
func strconvNowNano() string {
return fmt.Sprintf("%d", time.Now().UnixNano())
}

240
cmd/admin_snapshot_test.go Normal file
View File

@@ -0,0 +1,240 @@
package cmd
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
bolt "go.etcd.io/bbolt"
)
type snapshotArchiveEntry struct {
Path string
Data []byte
}
func TestInspectSnapshotArchiveRejectsUnsafePath(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "bad.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "../escape", Data: []byte("oops")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsafe archive path") {
t.Fatalf("expected unsafe archive path error, got %v", err)
}
}
func TestInspectSnapshotArchiveChecksumMismatch(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "mismatch.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("good")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("bad")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "checksum mismatch") {
t.Fatalf("expected checksum mismatch error, got %v", err)
}
}
func TestInspectSnapshotArchiveMissingManifest(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "no-manifest.tar.gz")
err := writeSnapshotArchiveForTest(archive, nil, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, false)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "manifest.json not found") {
t.Fatalf("expected missing manifest error, got %v", err)
}
}
func TestInspectSnapshotArchiveUnsupportedFormat(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "unsupported-format.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
})
manifest.FormatVersion = 99
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsupported snapshot format version") {
t.Fatalf("expected unsupported format error, got %v", err)
}
}
func TestRestoreSnapshotArchiveDestinationBehavior(t *testing.T) {
t.Parallel()
root := t.TempDir()
archive := filepath.Join(root, "ok.tar.gz")
destination := filepath.Join(root, "dst")
entries := []snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db-bytes")},
{Path: "chunks/c1", Data: []byte("chunk-1")},
}
manifest := manifestForEntries(entries)
if err := writeSnapshotArchiveForTest(archive, manifest, entries, true); err != nil {
t.Fatalf("write test archive: %v", err)
}
if err := os.MkdirAll(destination, 0o755); err != nil {
t.Fatalf("mkdir destination: %v", err)
}
if err := os.WriteFile(filepath.Join(destination, "old.txt"), []byte("old"), 0o600); err != nil {
t.Fatalf("seed destination: %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, false); err == nil || !strings.Contains(err.Error(), "not empty") {
t.Fatalf("expected non-empty destination error, got %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, true); err != nil {
t.Fatalf("restore with force: %v", err)
}
if _, err := os.Stat(filepath.Join(destination, "old.txt")); !os.IsNotExist(err) {
t.Fatalf("expected old file to be removed, stat err=%v", err)
}
got, err := os.ReadFile(filepath.Join(destination, "chunks/c1"))
if err != nil {
t.Fatalf("read restored chunk: %v", err)
}
if string(got) != "chunk-1" {
t.Fatalf("restored chunk mismatch: got %q", string(got))
}
}
func TestCreateSnapshotArchiveRejectsOutputInsideDataPath(t *testing.T) {
t.Parallel()
root := t.TempDir()
if err := os.MkdirAll(filepath.Join(root, "chunks"), 0o755); err != nil {
t.Fatalf("mkdir chunks: %v", err)
}
if err := createBoltDBForTest(filepath.Join(root, "metadata.db")); err != nil {
t.Fatalf("create metadata db: %v", err)
}
if err := os.WriteFile(filepath.Join(root, "chunks/c1"), []byte("x"), 0o600); err != nil {
t.Fatalf("write chunk: %v", err)
}
out := filepath.Join(root, "inside.tar.gz")
if _, err := createSnapshotArchive(context.Background(), root, out); err == nil || !strings.Contains(err.Error(), "cannot be inside") {
t.Fatalf("expected output-inside-data-path error, got %v", err)
}
}
func writeSnapshotArchiveForTest(path string, manifest *snapshotManifest, entries []snapshotArchiveEntry, includeManifest bool) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
gzw := gzip.NewWriter(file)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
if includeManifest {
raw, err := json.Marshal(manifest)
if err != nil {
return err
}
if err := writeTarEntry(tw, snapshotManifestPath, raw); err != nil {
return err
}
}
for _, entry := range entries {
if err := writeTarEntry(tw, entry.Path, entry.Data); err != nil {
return err
}
}
return nil
}
func writeTarEntry(tw *tar.Writer, name string, data []byte) error {
header := &tar.Header{
Name: name,
Mode: 0o600,
Size: int64(len(data)),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err := ioCopyBytes(tw, data)
return err
}
func manifestForEntries(entries []snapshotArchiveEntry) *snapshotManifest {
files := make([]snapshotFileEntry, 0, len(entries))
for _, entry := range entries {
sum := sha256.Sum256(entry.Data)
files = append(files, snapshotFileEntry{
Path: filepath.ToSlash(filepath.Clean(entry.Path)),
Size: int64(len(entry.Data)),
SHA256: hex.EncodeToString(sum[:]),
})
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: "2026-03-11T00:00:00Z",
SourcePath: "/tmp/source",
Files: files,
}
}
func createBoltDBForTest(path string) error {
db, err := bolt.Open(path, 0o600, nil)
if err != nil {
return err
}
defer db.Close()
return db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte("x"))
return err
})
}
func ioCopyBytes(w *tar.Writer, data []byte) (int64, error) {
n, err := bytes.NewReader(data).WriteTo(w)
return n, err
}

388
cmd/admin_user.go Normal file
View File

@@ -0,0 +1,388 @@
package cmd
import (
"context"
"fmt"
"strings"
"github.com/spf13/cobra"
)
func newAdminUserCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "user",
Short: "Manage auth users",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminUserCreateCommand(opts))
cmd.AddCommand(newAdminUserListCommand(opts))
cmd.AddCommand(newAdminUserGetCommand(opts))
cmd.AddCommand(newAdminUserDeleteCommand(opts))
cmd.AddCommand(newAdminUserSetStatusCommand(opts))
cmd.AddCommand(newAdminUserSetRoleCommand(opts))
cmd.AddCommand(newAdminUserRemoveRoleCommand(opts))
return cmd
}
func newAdminUserCreateCommand(opts *adminOptions) *cobra.Command {
var (
accessKey string
secretKey string
status string
role string
bucket string
prefix string
)
cmd := &cobra.Command{
Use: "create",
Short: "Create a user",
RunE: func(cmd *cobra.Command, args []string) error {
accessKey = strings.TrimSpace(accessKey)
if accessKey == "" {
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", "--access-key is required")
}
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", err.Error())
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.CreateUser(context.Background(), createUserRequest{
AccessKeyID: accessKey,
SecretKey: strings.TrimSpace(secretKey),
Status: strings.TrimSpace(status),
Policy: policy,
})
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
if err := writeUserTable(cmd.OutOrStdout(), out, true); err != nil {
return err
}
if strings.TrimSpace(out.SecretKey) != "" {
_, _ = fmt.Fprintln(cmd.OutOrStdout(), "\nsecretKey is only returned once during create; store it securely.")
}
return nil
},
}
cmd.Flags().StringVar(&accessKey, "access-key", "", "User access key ID")
cmd.Flags().StringVar(&secretKey, "secret-key", "", "User secret key (optional; auto-generated when omitted)")
cmd.Flags().StringVar(&status, "status", "active", "User status: active|disabled")
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
return cmd
}
func newAdminUserListCommand(opts *adminOptions) *cobra.Command {
var (
limit int
cursor string
)
cmd := &cobra.Command{
Use: "list",
Short: "List users",
RunE: func(cmd *cobra.Command, args []string) error {
if limit < 1 || limit > 1000 {
return usageError("fs admin user list [--limit 1-1000] [--cursor <token>]", "--limit must be between 1 and 1000")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.ListUsers(context.Background(), limit, cursor)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserListTable(cmd.OutOrStdout(), out)
},
}
cmd.Flags().IntVar(&limit, "limit", 100, "List page size (1-1000)")
cmd.Flags().StringVar(&cursor, "cursor", "", "Pagination cursor from previous list call")
return cmd
}
func newAdminUserGetCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "get <access-key-id>",
Short: "Get one user",
Args: requireAccessKeyArg("fs admin user get <access-key-id>"),
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
return cmd
}
func newAdminUserDeleteCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "delete <access-key-id>",
Short: "Delete one user",
Args: requireAccessKeyArg("fs admin user delete <access-key-id>"),
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
if err := client.DeleteUser(context.Background(), args[0]); err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]string{
"status": "deleted",
"accessKeyId": args[0],
})
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "deleted user %s\n", args[0])
return err
},
}
return cmd
}
func newAdminUserSetStatusCommand(opts *adminOptions) *cobra.Command {
var status string
cmd := &cobra.Command{
Use: "set-status <access-key-id>",
Short: "Set user status",
Args: requireAccessKeyArg("fs admin user set-status <access-key-id> --status active|disabled"),
RunE: func(cmd *cobra.Command, args []string) error {
status = strings.TrimSpace(status)
if status == "" {
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status is required")
}
normalized := strings.ToLower(status)
if normalized != "active" && normalized != "disabled" {
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status must be active or disabled")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.SetUserStatus(context.Background(), args[0], normalized)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&status, "status", "", "User status: active|disabled")
return cmd
}
func newAdminUserSetRoleCommand(opts *adminOptions) *cobra.Command {
var (
role string
bucket string
prefix string
replace bool
)
cmd := &cobra.Command{
Use: "set-role <access-key-id>",
Short: "Add or replace user role policy statement",
Args: requireAccessKeyArg("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]"),
RunE: func(cmd *cobra.Command, args []string) error {
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]", err.Error())
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
finalPolicy := policy
if !replace {
existing, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
finalPolicy = mergePolicyStatements(existing.Policy, policy)
}
out, err := client.SetUserPolicy(context.Background(), args[0], finalPolicy)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
cmd.Flags().BoolVar(&replace, "replace", false, "Replace all existing policy statements instead of appending")
return cmd
}
func newAdminUserRemoveRoleCommand(opts *adminOptions) *cobra.Command {
var (
role string
bucket string
prefix string
)
cmd := &cobra.Command{
Use: "remove-role <access-key-id>",
Short: "Remove one role policy statement from user",
Args: requireAccessKeyArg("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]"),
RunE: func(cmd *cobra.Command, args []string) error {
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", err.Error())
}
if len(policy.Statements) == 0 {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", "no statement to remove")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
existing, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
if existing.Policy == nil || len(existing.Policy.Statements) == 0 {
return fmt.Errorf("user %q has no policy statements", args[0])
}
target := policy.Statements[0]
nextPolicy, removed := removePolicyStatements(existing.Policy, target)
if removed == 0 {
return fmt.Errorf("no matching statement found for role=%s bucket=%s prefix=%s", role, bucket, prefix)
}
if len(nextPolicy.Statements) == 0 {
return fmt.Errorf("cannot remove the last policy statement; add another role first or use set-role --replace")
}
out, err := client.SetUserPolicy(context.Background(), args[0], nextPolicy)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
return cmd
}
func mergePolicyStatements(existing *adminPolicy, addition adminPolicy) adminPolicy {
merged := adminPolicy{}
if existing != nil {
merged.Principal = existing.Principal
merged.Statements = append(merged.Statements, existing.Statements...)
}
if len(addition.Statements) == 0 {
return merged
}
stmt := addition.Statements[0]
for _, current := range merged.Statements {
if policyStatementsEqual(current, stmt) {
return merged
}
}
merged.Statements = append(merged.Statements, stmt)
return merged
}
func policyStatementsEqual(a, b adminPolicyStatement) bool {
if a.Effect != b.Effect || a.Bucket != b.Bucket || a.Prefix != b.Prefix {
return false
}
if len(a.Actions) != len(b.Actions) {
return false
}
for i := range a.Actions {
if a.Actions[i] != b.Actions[i] {
return false
}
}
return true
}
func removePolicyStatements(existing *adminPolicy, target adminPolicyStatement) (adminPolicy, int) {
out := adminPolicy{}
if existing == nil {
return out, 0
}
out.Principal = existing.Principal
out.Statements = make([]adminPolicyStatement, 0, len(existing.Statements))
removed := 0
for _, stmt := range existing.Statements {
if policyStatementsEqual(stmt, target) {
removed++
continue
}
out.Statements = append(out.Statements, stmt)
}
return out, removed
}
func requireAccessKeyArg(usage string) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return usageError(usage, "missing or invalid <access-key-id> argument")
}
if strings.TrimSpace(args[0]) == "" {
return usageError(usage, "access key id cannot be empty")
}
return nil
}
}
func usageError(usage, message string) error {
return fmt.Errorf("%s\nusage: %s", message, usage)
}

20
cmd/buildinfo.go Normal file
View File

@@ -0,0 +1,20 @@
package cmd
type BuildInfo struct {
Version string
Commit string
Date string
}
func (b BuildInfo) normalized() BuildInfo {
if b.Version == "" {
b.Version = "dev"
}
if b.Commit == "" {
b.Commit = "none"
}
if b.Date == "" {
b.Date = "unknown"
}
return b
}

26
cmd/execute.go Normal file
View File

@@ -0,0 +1,26 @@
package cmd
import (
"context"
"fs/app"
"os"
"os/signal"
"syscall"
)
func Execute(build BuildInfo) error {
build = build.normalized()
if len(os.Args) == 1 {
return runServerWithSignals()
}
root := newRootCommand(build)
return root.Execute()
}
func runServerWithSignals() error {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
return app.RunServer(ctx)
}

36
cmd/root.go Normal file
View File

@@ -0,0 +1,36 @@
package cmd
import (
"fmt"
"runtime"
"github.com/spf13/cobra"
)
func newRootCommand(build BuildInfo) *cobra.Command {
root := &cobra.Command{
Use: "fs",
Short: "fs object storage server and admin CLI",
SilenceUsage: true,
SilenceErrors: true,
Version: build.Version,
}
root.SetVersionTemplate(versionTemplate(build))
root.AddCommand(newServerCommand())
root.AddCommand(newAdminCommand(build))
root.AddCommand(&cobra.Command{
Use: "version",
Short: "Print build and runtime version information",
RunE: func(cmd *cobra.Command, args []string) error {
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", build.Version, build.Commit, build.Date, runtime.Version())
return err
},
})
return root
}
func versionTemplate(build BuildInfo) string {
return fmt.Sprintf("version=%s commit=%s date=%s\n", build.Version, build.Commit, build.Date)
}

15
cmd/server.go Normal file
View File

@@ -0,0 +1,15 @@
package cmd
import (
"github.com/spf13/cobra"
)
func newServerCommand() *cobra.Command {
return &cobra.Command{
Use: "server",
Short: "Run fs object storage server",
RunE: func(cmd *cobra.Command, args []string) error {
return runServerWithSignals()
},
}
}

336
docs/admin-api-openapi.yaml Normal file
View File

@@ -0,0 +1,336 @@
openapi: 3.1.0
info:
title: fs Admin API
version: 1.0.0
description: |
JSON admin API for managing local users and policies.
Notes:
- Base path is `/_admin/v1`.
- Requests must be AWS SigV4 signed.
- Only the bootstrap access key is authorized for admin endpoints.
servers:
- url: http://localhost:2600
description: Local development
security:
- AwsSigV4: []
paths:
/_admin/v1/users:
post:
summary: Create user
operationId: createUser
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateUserRequest'
responses:
'201':
description: User created
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'409':
$ref: '#/components/responses/UserAlreadyExists'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
get:
summary: List users
operationId: listUsers
parameters:
- name: limit
in: query
required: false
schema:
type: integer
minimum: 1
maximum: 1000
default: 100
- name: cursor
in: query
required: false
schema:
type: string
responses:
'200':
description: User summaries
content:
application/json:
schema:
$ref: '#/components/schemas/UserListResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}:
get:
summary: Get user with policy
operationId: getUser
parameters:
- $ref: '#/components/parameters/AccessKeyId'
responses:
'200':
description: User details
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
delete:
summary: Delete user
operationId: deleteUser
parameters:
- $ref: '#/components/parameters/AccessKeyId'
responses:
'204':
description: User deleted
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}/policy:
put:
summary: Replace user policy
operationId: setUserPolicy
parameters:
- $ref: '#/components/parameters/AccessKeyId'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/SetPolicyRequest'
responses:
'200':
description: User details with updated policy
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}/status:
put:
summary: Set user status
operationId: setUserStatus
parameters:
- $ref: '#/components/parameters/AccessKeyId'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/SetStatusRequest'
responses:
'200':
description: User details with updated status
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
components:
securitySchemes:
AwsSigV4:
type: apiKey
in: header
name: Authorization
description: |
AWS Signature Version 4 headers are required (`Authorization`, `x-amz-date`,
and for payload-signed requests `x-amz-content-sha256`).
Only bootstrap credential is authorized for admin endpoints.
parameters:
AccessKeyId:
name: accessKeyId
in: path
required: true
schema:
type: string
description: User access key ID
responses:
InvalidRequest:
description: Invalid request input
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
Forbidden:
description: Authenticated but not allowed
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
UserAlreadyExists:
description: User already exists
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
UserNotFound:
description: User not found
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
AuthDisabled:
description: Authentication subsystem disabled
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
InternalError:
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
schemas:
AdminError:
type: object
properties:
code:
type: string
message:
type: string
requestId:
type: string
required: [code, message]
PolicyStatement:
type: object
properties:
effect:
type: string
enum: [allow, deny]
actions:
type: array
items:
type: string
minItems: 1
bucket:
type: string
default: "*"
prefix:
type: string
default: "*"
required: [effect, actions]
Policy:
type: object
properties:
principal:
type: string
description: Server-managed; overwritten with target access key ID.
statements:
type: array
items:
$ref: '#/components/schemas/PolicyStatement'
minItems: 1
required: [statements]
CreateUserRequest:
type: object
properties:
accessKeyId:
type: string
secretKey:
type: string
description: If omitted, server generates one.
status:
type: string
enum: [active, disabled]
default: active
policy:
$ref: '#/components/schemas/Policy'
required: [accessKeyId, policy]
SetPolicyRequest:
type: object
properties:
policy:
$ref: '#/components/schemas/Policy'
required: [policy]
SetStatusRequest:
type: object
properties:
status:
type: string
enum: [active, disabled]
required: [status]
UserListItem:
type: object
properties:
accessKeyId:
type: string
status:
type: string
enum: [active, disabled]
createdAt:
type: integer
format: int64
updatedAt:
type: integer
format: int64
required: [accessKeyId, status, createdAt, updatedAt]
UserListResponse:
type: object
properties:
items:
type: array
items:
$ref: '#/components/schemas/UserListItem'
nextCursor:
type: string
required: [items]
UserResponse:
allOf:
- $ref: '#/components/schemas/UserListItem'
- type: object
properties:
policy:
$ref: '#/components/schemas/Policy'
secretKey:
type: string
description: Returned only on create.

53
docs/s3-compatibility.md Normal file
View File

@@ -0,0 +1,53 @@
# S3 Compatibility Matrix
This project is S3-compatible for a focused subset of operations.
## Implemented
### Service and account
- `GET /` list buckets
### Bucket
- `PUT /{bucket}` create bucket
- `HEAD /{bucket}` head bucket
- `DELETE /{bucket}` delete bucket (must be empty)
- `GET /{bucket}?list-type=2...` list objects v2
- `GET /{bucket}?location` get bucket location
- `POST /{bucket}?delete` delete multiple objects
### Object
- `PUT /{bucket}/{key}` put object
- `GET /{bucket}/{key}` get object
- `HEAD /{bucket}/{key}` head object
- `DELETE /{bucket}/{key}` delete object
- `GET /{bucket}/{key}` supports single-range requests
### Multipart upload
- `POST /{bucket}/{key}?uploads` initiate
- `PUT /{bucket}/{key}?uploadId=...&partNumber=N` upload part
- `GET /{bucket}/{key}?uploadId=...` list parts
- `POST /{bucket}/{key}?uploadId=...` complete
- `DELETE /{bucket}/{key}?uploadId=...` abort
### Authentication
- AWS SigV4 header auth
- AWS SigV4 presigned query auth
- `aws-chunked` payload decode for streaming uploads
## Partially Implemented / Differences
- Exact parity with AWS S3 error codes/headers is still evolving.
- Some S3 edge-case behaviors may differ (especially uncommon query/header combinations).
- Admin API is custom JSON (`/_admin/v1/*`).
## Not Implemented (Current)
- Bucket versioning
- Lifecycle rules
- Replication
- Object lock / legal hold / retention
- SSE-S3 / SSE-KMS / SSE-C
- ACL APIs and IAM-compatible policy APIs
- STS / temporary credentials
- Event notifications
- Tagging APIs
- CORS APIs
- Website hosting APIs

13
go.mod
View File

@@ -3,8 +3,15 @@ module fs
go 1.25.7
require (
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/klauspost/reedsolomon v1.13.2 // indirect
go.etcd.io/bbolt v1.4.3 // indirect
github.com/go-chi/chi/v5 v5.2.5
github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1
github.com/spf13/cobra v1.10.1
go.etcd.io/bbolt v1.4.3
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
golang.org/x/sys v0.41.0 // indirect
)

31
go.sum
View File

@@ -1,10 +1,29 @@
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/reedsolomon v1.13.2 h1:9qtQy2tKEVpVB8Pfq87ZljHZb60/LbeTQ1OxV8EGzdE=
github.com/klauspost/reedsolomon v1.13.2/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

207
logging/logging.go Normal file
View File

@@ -0,0 +1,207 @@
package logging
import (
"fs/metrics"
"log/slog"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
type Config struct {
Level slog.Level
LevelName string
Format string
Audit bool
AddSource bool
DebugMode bool
}
func ConfigFromEnv() Config {
levelName := strings.ToLower(strings.TrimSpace(os.Getenv("LOG_LEVEL")))
format := strings.ToLower(strings.TrimSpace(os.Getenv("LOG_FORMAT")))
return ConfigFromValues(levelName, format, envBool("AUDIT_LOG", true))
}
func ConfigFromValues(levelName, format string, audit bool) Config {
levelName = strings.ToLower(strings.TrimSpace(levelName))
if levelName == "" {
levelName = "info"
}
level := parseLevel(levelName)
levelName = strings.ToUpper(level.String())
format = strings.ToLower(strings.TrimSpace(format))
if format == "" {
format = "text"
}
if format != "json" && format != "text" {
format = "text"
}
debugMode := level <= slog.LevelDebug
return Config{
Level: level,
LevelName: levelName,
Format: format,
Audit: audit,
AddSource: debugMode,
DebugMode: debugMode,
}
}
func NewLogger(cfg Config) *slog.Logger {
opts := &slog.HandlerOptions{
Level: cfg.Level,
AddSource: cfg.AddSource,
}
opts.ReplaceAttr = func(_ []string, attr slog.Attr) slog.Attr {
if attr.Key == slog.SourceKey {
if src, ok := attr.Value.Any().(*slog.Source); ok && src != nil {
attr.Key = "src"
attr.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
}
}
return attr
}
var handler slog.Handler
if cfg.Format == "json" {
handler = slog.NewJSONHandler(os.Stdout, opts)
} else {
handler = slog.NewTextHandler(os.Stdout, opts)
}
logger := slog.New(handler)
slog.SetDefault(logger)
return logger
}
func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
op := metricOperationLabel(r)
metrics.Default.IncHTTPInFlightOp(op)
defer func() {
metrics.Default.DecHTTPInFlightOp(op)
}()
requestID := middleware.GetReqID(r.Context())
if requestID != "" {
ww.Header().Set("x-amz-request-id", requestID)
}
next.ServeHTTP(ww, r)
elapsed := time.Since(start)
status := ww.Status()
if status == 0 {
status = http.StatusOK
}
route := metricRouteLabel(r)
metrics.Default.ObserveHTTPRequestDetailed(r.Method, route, op, status, elapsed, ww.BytesWritten())
if !cfg.Audit && !cfg.DebugMode {
return
}
attrs := []any{
"method", r.Method,
"path", r.URL.Path,
"status", status,
"bytes", ww.BytesWritten(),
"duration_ms", float64(elapsed.Nanoseconds()) / 1_000_000.0,
"remote_addr", r.RemoteAddr,
}
if requestID != "" {
attrs = append(attrs, "request_id", requestID)
}
if cfg.DebugMode {
attrs = append(attrs,
"query", r.URL.RawQuery,
"user_agent", r.UserAgent(),
"content_length", r.ContentLength,
"content_type", r.Header.Get("Content-Type"),
"x_amz_sha256", r.Header.Get("x-amz-content-sha256"),
)
logger.Debug("http_request", attrs...)
return
}
logger.Info("http_request", attrs...)
})
}
}
func metricRouteLabel(r *http.Request) string {
if r == nil || r.URL == nil {
return "/unknown"
}
if routeCtx := chi.RouteContext(r.Context()); routeCtx != nil {
if pattern := strings.TrimSpace(routeCtx.RoutePattern()); pattern != "" {
return pattern
}
}
path := strings.TrimSpace(r.URL.Path)
if path == "" || path == "/" {
return "/"
}
if path == "/healthz" || path == "/metrics" {
return path
}
trimmed := strings.Trim(path, "/")
if trimmed == "" {
return "/"
}
if !strings.Contains(trimmed, "/") {
return "/{bucket}"
}
return "/{bucket}/*"
}
func metricOperationLabel(r *http.Request) string {
if r == nil {
return "other"
}
isDeletePost := false
if r.Method == http.MethodPost && r.URL != nil {
_, isDeletePost = r.URL.Query()["delete"]
}
return metrics.NormalizeHTTPOperation(r.Method, isDeletePost)
}
func envBool(key string, defaultValue bool) bool {
raw := os.Getenv(key)
if raw == "" {
return defaultValue
}
value, err := strconv.ParseBool(raw)
if err != nil {
return defaultValue
}
return value
}
func parseLevel(levelName string) slog.Level {
switch levelName {
case "debug":
return slog.LevelDebug
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}

View File

@@ -0,0 +1,30 @@
package logging
import (
"net/http/httptest"
"testing"
)
func TestMetricRouteLabelFallbacks(t *testing.T) {
testCases := []struct {
name string
path string
want string
}{
{name: "root", path: "/", want: "/"},
{name: "health", path: "/healthz", want: "/healthz"},
{name: "metrics", path: "/metrics", want: "/metrics"},
{name: "bucket", path: "/some-bucket", want: "/{bucket}"},
{name: "object", path: "/some-bucket/private/path/file.jpg", want: "/{bucket}/*"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", tc.path, nil)
got := metricRouteLabel(req)
if got != tc.want {
t.Fatalf("metricRouteLabel(%q) = %q, want %q", tc.path, got, tc.want)
}
})
}
}

58
main.go
View File

@@ -1,54 +1,24 @@
package main
import (
"fmt"
"fs/metadata"
"fs/service"
"io"
"fs/cmd"
"os"
)
var (
version = "dev"
commit = "none"
date = "unknown"
)
func main() {
fmt.Println("Hello, World!")
imageStream, err := os.Open("fer.jpg")
if err != nil {
fmt.Printf("Error opening image stream: %v\n", err)
return
build := cmd.BuildInfo{
Version: version,
Commit: commit,
Date: date,
}
defer imageStream.Close()
metadataHandler, err := metadata.NewMetadataHandler("metadata.db")
if err != nil {
fmt.Printf("Error initializing metadata handler: %v\n", err)
return
if err := cmd.Execute(build); err != nil {
_, _ = os.Stderr.WriteString(err.Error() + "\n")
os.Exit(1)
}
objectService := service.NewObjectService(metadataHandler)
manifest, err := objectService.PutObject("test-bucket-ferdzo/fer.jpg", "image/jpeg", imageStream)
if err != nil {
fmt.Printf("Error ingesting stream: %v\n", err)
return
}
fmt.Printf("Manifest: %+v\n", manifest)
objectData, manifest2, err := objectService.GetObject("test-bucket-ferdzo", "fer.jpg")
if err != nil {
fmt.Printf("Error retrieving object: %v\n", err)
return
}
fmt.Printf("Retrieved manifest: %+v\n", manifest2)
recoveredFile, err := os.Create("recovered_" + manifest2.Key)
if err != nil {
fmt.Printf("Error creating file: %v\n", err)
return
}
defer recoveredFile.Close()
bytesWritten, err := io.Copy(recoveredFile, objectData)
if err != nil {
fmt.Printf("Error streaming to recovered file: %v\n", err)
return
}
fmt.Printf("Successfully streamed %d bytes to disk!\n", bytesWritten)
}

File diff suppressed because it is too large Load Diff

795
metrics/metrics.go Normal file
View File

@@ -0,0 +1,795 @@
package metrics
import (
"fmt"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var defaultBuckets = []float64{
0.0005, 0.001, 0.0025, 0.005, 0.01,
0.025, 0.05, 0.1, 0.25, 0.5,
1, 2.5, 5, 10,
}
var lockBuckets = []float64{
0.000001, 0.000005, 0.00001, 0.00005,
0.0001, 0.0005, 0.001, 0.005, 0.01,
0.025, 0.05, 0.1, 0.25, 0.5, 1,
}
var batchBuckets = []float64{1, 2, 4, 8, 16, 32, 64, 100, 128, 256, 512, 1000, 5000}
var Default = NewRegistry()
type histogram struct {
bounds []float64
counts []uint64
sum float64
count uint64
}
func newHistogram(bounds []float64) *histogram {
cloned := make([]float64, len(bounds))
copy(cloned, bounds)
return &histogram{
bounds: cloned,
counts: make([]uint64, len(bounds)+1),
}
}
func (h *histogram) observe(v float64) {
h.count++
h.sum += v
for i, bound := range h.bounds {
if v <= bound {
h.counts[i]++
return
}
}
h.counts[len(h.counts)-1]++
}
func (h *histogram) snapshot() (bounds []float64, counts []uint64, sum float64, count uint64) {
bounds = make([]float64, len(h.bounds))
copy(bounds, h.bounds)
counts = make([]uint64, len(h.counts))
copy(counts, h.counts)
return bounds, counts, h.sum, h.count
}
type Registry struct {
startedAt time.Time
httpInFlight atomic.Int64
connectionPoolActive atomic.Int64
connectionPoolMax atomic.Int64
connectionPoolWaits atomic.Uint64
requestQueueLength atomic.Int64
mu sync.Mutex
httpRequestsRoute map[string]uint64
httpResponseBytesRoute map[string]uint64
httpDurationRoute map[string]*histogram
httpRequestsOp map[string]uint64
httpDurationOp map[string]*histogram
httpInFlightOp map[string]int64
authRequests map[string]uint64
serviceOps map[string]uint64
serviceDuration map[string]*histogram
dbTxTotal map[string]uint64
dbTxDuration map[string]*histogram
blobOps map[string]uint64
blobBytes map[string]uint64
blobDuration map[string]*histogram
lockWait map[string]*histogram
lockHold map[string]*histogram
cacheHits map[string]uint64
cacheMisses map[string]uint64
batchSize *histogram
retries map[string]uint64
errors map[string]uint64
gcRuns map[string]uint64
gcDuration *histogram
gcDeletedChunks uint64
gcDeleteErrors uint64
gcCleanedUpload uint64
}
func NewRegistry() *Registry {
return &Registry{
startedAt: time.Now(),
httpRequestsRoute: make(map[string]uint64),
httpResponseBytesRoute: make(map[string]uint64),
httpDurationRoute: make(map[string]*histogram),
httpRequestsOp: make(map[string]uint64),
httpDurationOp: make(map[string]*histogram),
httpInFlightOp: make(map[string]int64),
authRequests: make(map[string]uint64),
serviceOps: make(map[string]uint64),
serviceDuration: make(map[string]*histogram),
dbTxTotal: make(map[string]uint64),
dbTxDuration: make(map[string]*histogram),
blobOps: make(map[string]uint64),
blobBytes: make(map[string]uint64),
blobDuration: make(map[string]*histogram),
lockWait: make(map[string]*histogram),
lockHold: make(map[string]*histogram),
cacheHits: make(map[string]uint64),
cacheMisses: make(map[string]uint64),
batchSize: newHistogram(batchBuckets),
retries: make(map[string]uint64),
errors: make(map[string]uint64),
gcRuns: make(map[string]uint64),
gcDuration: newHistogram(defaultBuckets),
}
}
func NormalizeHTTPOperation(method string, isDeletePost bool) string {
switch strings.ToUpper(strings.TrimSpace(method)) {
case "GET":
return "get"
case "PUT":
return "put"
case "DELETE":
return "delete"
case "HEAD":
return "head"
case "POST":
if isDeletePost {
return "delete"
}
return "put"
default:
return "other"
}
}
func statusResult(status int) string {
if status >= 200 && status < 400 {
return "ok"
}
return "error"
}
func normalizeRoute(route string) string {
route = strings.TrimSpace(route)
if route == "" {
return "/unknown"
}
return route
}
func normalizeOp(op string) string {
op = strings.ToLower(strings.TrimSpace(op))
if op == "" {
return "other"
}
return op
}
func (r *Registry) IncHTTPInFlight() {
r.httpInFlight.Add(1)
}
func (r *Registry) DecHTTPInFlight() {
r.httpInFlight.Add(-1)
}
func (r *Registry) IncHTTPInFlightOp(op string) {
r.httpInFlight.Add(1)
op = normalizeOp(op)
r.mu.Lock()
r.httpInFlightOp[op]++
r.mu.Unlock()
}
func (r *Registry) DecHTTPInFlightOp(op string) {
r.httpInFlight.Add(-1)
op = normalizeOp(op)
r.mu.Lock()
r.httpInFlightOp[op]--
if r.httpInFlightOp[op] < 0 {
r.httpInFlightOp[op] = 0
}
r.mu.Unlock()
}
func (r *Registry) ObserveHTTPRequest(method, route string, status int, d time.Duration, responseBytes int) {
op := NormalizeHTTPOperation(method, false)
r.ObserveHTTPRequestDetailed(method, route, op, status, d, responseBytes)
}
func (r *Registry) ObserveHTTPRequestDetailed(method, route, op string, status int, d time.Duration, responseBytes int) {
route = normalizeRoute(route)
op = normalizeOp(op)
result := statusResult(status)
routeKey := method + "|" + route + "|" + strconv.Itoa(status)
routeDurKey := method + "|" + route
opKey := op + "|" + result
r.mu.Lock()
r.httpRequestsRoute[routeKey]++
if responseBytes > 0 {
r.httpResponseBytesRoute[routeKey] += uint64(responseBytes)
}
hRoute := r.httpDurationRoute[routeDurKey]
if hRoute == nil {
hRoute = newHistogram(defaultBuckets)
r.httpDurationRoute[routeDurKey] = hRoute
}
hRoute.observe(d.Seconds())
r.httpRequestsOp[opKey]++
hOp := r.httpDurationOp[opKey]
if hOp == nil {
hOp = newHistogram(defaultBuckets)
r.httpDurationOp[opKey] = hOp
}
hOp.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveAuth(result, authType, reason string) {
authType = strings.TrimSpace(authType)
if authType == "" {
authType = "unknown"
}
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "none"
}
key := result + "|" + authType + "|" + reason
r.mu.Lock()
r.authRequests[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveService(operation string, d time.Duration, ok bool) {
result := "error"
if ok {
result = "ok"
}
key := operation + "|" + result
r.mu.Lock()
r.serviceOps[key]++
h := r.serviceDuration[operation]
if h == nil {
h = newHistogram(defaultBuckets)
r.serviceDuration[operation] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveMetadataTx(txType string, d time.Duration, ok bool) {
result := "error"
if ok {
result = "ok"
}
key := txType + "|" + result
r.mu.Lock()
r.dbTxTotal[key]++
h := r.dbTxDuration[txType]
if h == nil {
h = newHistogram(defaultBuckets)
r.dbTxDuration[txType] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveBlob(operation string, d time.Duration, bytes int64, ok bool, backend ...string) {
be := "disk"
if len(backend) > 0 {
candidate := strings.TrimSpace(backend[0])
if candidate != "" {
be = strings.ToLower(candidate)
}
}
result := "error"
if ok {
result = "ok"
}
op := strings.ToLower(strings.TrimSpace(operation))
if op == "" {
op = "unknown"
}
histKey := op + "|" + be + "|" + result
opsKey := histKey
r.mu.Lock()
r.blobOps[opsKey]++
h := r.blobDuration[histKey]
if h == nil {
h = newHistogram(defaultBuckets)
r.blobDuration[histKey] = h
}
h.observe(d.Seconds())
if bytes > 0 {
r.blobBytes[op] += uint64(bytes)
}
r.mu.Unlock()
}
func (r *Registry) SetConnectionPoolMax(max int) {
if max < 0 {
max = 0
}
r.connectionPoolMax.Store(int64(max))
}
func (r *Registry) IncConnectionPoolActive() {
r.connectionPoolActive.Add(1)
}
func (r *Registry) DecConnectionPoolActive() {
r.connectionPoolActive.Add(-1)
}
func (r *Registry) IncConnectionPoolWait() {
r.connectionPoolWaits.Add(1)
}
func (r *Registry) IncRequestQueueLength() {
r.requestQueueLength.Add(1)
}
func (r *Registry) DecRequestQueueLength() {
r.requestQueueLength.Add(-1)
}
func (r *Registry) ObserveLockWait(lockName string, d time.Duration) {
lockName = strings.TrimSpace(lockName)
if lockName == "" {
lockName = "unknown"
}
r.mu.Lock()
h := r.lockWait[lockName]
if h == nil {
h = newHistogram(lockBuckets)
r.lockWait[lockName] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveLockHold(lockName string, d time.Duration) {
lockName = strings.TrimSpace(lockName)
if lockName == "" {
lockName = "unknown"
}
r.mu.Lock()
h := r.lockHold[lockName]
if h == nil {
h = newHistogram(lockBuckets)
r.lockHold[lockName] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveCacheHit(cache string) {
cache = strings.TrimSpace(cache)
if cache == "" {
cache = "unknown"
}
r.mu.Lock()
r.cacheHits[cache]++
r.mu.Unlock()
}
func (r *Registry) ObserveCacheMiss(cache string) {
cache = strings.TrimSpace(cache)
if cache == "" {
cache = "unknown"
}
r.mu.Lock()
r.cacheMisses[cache]++
r.mu.Unlock()
}
func (r *Registry) ObserveBatchSize(size int) {
if size < 0 {
size = 0
}
r.mu.Lock()
r.batchSize.observe(float64(size))
r.mu.Unlock()
}
func (r *Registry) ObserveRetry(op, reason string) {
op = normalizeOp(op)
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "unknown"
}
key := op + "|" + reason
r.mu.Lock()
r.retries[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveError(op, reason string) {
op = normalizeOp(op)
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "unknown"
}
key := op + "|" + reason
r.mu.Lock()
r.errors[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveGC(d time.Duration, deletedChunks, deleteErrors, cleanedUploads int, ok bool) {
result := "error"
if ok {
result = "ok"
}
r.mu.Lock()
r.gcRuns[result]++
r.gcDuration.observe(d.Seconds())
if deletedChunks > 0 {
r.gcDeletedChunks += uint64(deletedChunks)
}
if deleteErrors > 0 {
r.gcDeleteErrors += uint64(deleteErrors)
}
if cleanedUploads > 0 {
r.gcCleanedUpload += uint64(cleanedUploads)
}
r.mu.Unlock()
}
func (r *Registry) RenderPrometheus() string {
now := time.Now()
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
r.mu.Lock()
httpReqRoute := copyCounterMap(r.httpRequestsRoute)
httpRespRoute := copyCounterMap(r.httpResponseBytesRoute)
httpDurRoute := copyHistogramMap(r.httpDurationRoute)
httpReqOp := copyCounterMap(r.httpRequestsOp)
httpDurOp := copyHistogramMap(r.httpDurationOp)
httpInFlightOp := copyIntGaugeMap(r.httpInFlightOp)
authReq := copyCounterMap(r.authRequests)
serviceOps := copyCounterMap(r.serviceOps)
serviceDur := copyHistogramMap(r.serviceDuration)
dbTx := copyCounterMap(r.dbTxTotal)
dbTxDur := copyHistogramMap(r.dbTxDuration)
blobOps := copyCounterMap(r.blobOps)
blobBytes := copyCounterMap(r.blobBytes)
blobDur := copyHistogramMap(r.blobDuration)
lockWait := copyHistogramMap(r.lockWait)
lockHold := copyHistogramMap(r.lockHold)
cacheHits := copyCounterMap(r.cacheHits)
cacheMisses := copyCounterMap(r.cacheMisses)
batchBounds, batchCounts, batchSum, batchCount := r.batchSize.snapshot()
retries := copyCounterMap(r.retries)
errorsTotal := copyCounterMap(r.errors)
gcRuns := copyCounterMap(r.gcRuns)
gcDurBounds, gcDurCounts, gcDurSum, gcDurCount := r.gcDuration.snapshot()
gcDeletedChunks := r.gcDeletedChunks
gcDeleteErrors := r.gcDeleteErrors
gcCleanedUploads := r.gcCleanedUpload
r.mu.Unlock()
connectionActive := float64(r.connectionPoolActive.Load())
connectionMax := float64(r.connectionPoolMax.Load())
connectionWaits := r.connectionPoolWaits.Load()
queueLength := float64(r.requestQueueLength.Load())
resident, hasResident := readResidentMemoryBytes()
cpuSeconds, hasCPU := readProcessCPUSeconds()
var b strings.Builder
httpInFlightOp["all"] = r.httpInFlight.Load()
writeGaugeVecFromInt64(&b, "fs_http_inflight_requests", "Current in-flight HTTP requests by operation.", httpInFlightOp, "op")
writeCounterVecKV(&b, "fs_http_requests_total", "Total HTTP requests by operation and result.", httpReqOp, []string{"op", "result"})
writeHistogramVecKV(&b, "fs_http_request_duration_seconds", "HTTP request latency by operation and result.", httpDurOp, []string{"op", "result"})
writeCounterVecKV(&b, "fs_http_requests_by_route_total", "Total HTTP requests by method/route/status.", httpReqRoute, []string{"method", "route", "status"})
writeCounterVecKV(&b, "fs_http_response_bytes_total", "Total HTTP response bytes written.", httpRespRoute, []string{"method", "route", "status"})
writeHistogramVecKV(&b, "fs_http_request_duration_by_route_seconds", "HTTP request latency by method/route.", httpDurRoute, []string{"method", "route"})
writeCounterVecKV(&b, "fs_auth_requests_total", "Authentication attempts by result.", authReq, []string{"result", "auth_type", "reason"})
writeCounterVecKV(&b, "fs_service_operations_total", "Service-level operation calls.", serviceOps, []string{"operation", "result"})
writeHistogramVecKV(&b, "fs_service_operation_duration_seconds", "Service-level operation latency.", serviceDur, []string{"operation"})
writeCounterVecKV(&b, "fs_metadata_tx_total", "Metadata transaction calls.", dbTx, []string{"type", "result"})
writeHistogramVecKV(&b, "fs_metadata_tx_duration_seconds", "Metadata transaction latency.", dbTxDur, []string{"type"})
writeHistogramVecKV(&b, "fs_blob_operation_duration_seconds", "Blob backend operation latency.", blobDur, []string{"op", "backend", "result"})
writeCounterVecKV(&b, "fs_blob_operations_total", "Blob store operations.", blobOps, []string{"op", "backend", "result"})
writeCounterVecKV(&b, "fs_blob_bytes_total", "Blob bytes processed by operation.", blobBytes, []string{"op"})
writeGauge(&b, "fs_connection_pool_active", "Active pooled connections.", connectionActive)
writeGauge(&b, "fs_connection_pool_max", "Maximum pooled connections.", connectionMax)
writeCounter(&b, "fs_connection_pool_waits_total", "Number of waits due to pool saturation.", connectionWaits)
writeGauge(&b, "fs_request_queue_length", "Requests waiting for an execution slot.", queueLength)
writeHistogramVecKV(&b, "fs_lock_wait_seconds", "Time spent waiting for locks.", lockWait, []string{"lock_name"})
writeHistogramVecKV(&b, "fs_lock_hold_seconds", "Time locks were held.", lockHold, []string{"lock_name"})
writeCounterVecKV(&b, "fs_cache_hits_total", "Cache hits by cache name.", cacheHits, []string{"cache"})
writeCounterVecKV(&b, "fs_cache_misses_total", "Cache misses by cache name.", cacheMisses, []string{"cache"})
writeHistogram(&b, "fs_batch_size_histogram", "Observed batch sizes.", nil, batchBounds, batchCounts, batchSum, batchCount)
writeCounterVecKV(&b, "fs_retries_total", "Retries by operation and reason.", retries, []string{"op", "reason"})
writeCounterVecKV(&b, "fs_errors_total", "Errors by operation and reason.", errorsTotal, []string{"op", "reason"})
writeCounterVecKV(&b, "fs_gc_runs_total", "Garbage collection runs.", gcRuns, []string{"result"})
writeHistogram(&b, "fs_gc_duration_seconds", "Garbage collection runtime.", nil, gcDurBounds, gcDurCounts, gcDurSum, gcDurCount)
writeCounter(&b, "fs_gc_deleted_chunks_total", "Deleted chunks during GC.", gcDeletedChunks)
writeCounter(&b, "fs_gc_delete_errors_total", "Chunk delete errors during GC.", gcDeleteErrors)
writeCounter(&b, "fs_gc_cleaned_uploads_total", "Cleaned multipart uploads during GC.", gcCleanedUploads)
writeGauge(&b, "fs_uptime_seconds", "Process uptime in seconds.", now.Sub(r.startedAt).Seconds())
writeGauge(&b, "fs_runtime_goroutines", "Number of goroutines.", float64(runtime.NumGoroutine()))
writeGaugeVec(&b, "fs_runtime_memory_bytes", "Runtime memory in bytes.", map[string]float64{
"alloc": float64(mem.Alloc),
"total": float64(mem.TotalAlloc),
"sys": float64(mem.Sys),
"heap_alloc": float64(mem.HeapAlloc),
"heap_sys": float64(mem.HeapSys),
"stack_sys": float64(mem.StackSys),
}, "type")
writeCounter(&b, "fs_runtime_gc_cycles_total", "Completed GC cycles.", uint64(mem.NumGC))
writeCounterFloat(&b, "fs_runtime_gc_pause_seconds_total", "Total GC pause time in seconds.", float64(mem.PauseTotalNs)/1e9)
if hasCPU {
writeCounterFloat(&b, "process_cpu_seconds_total", "Total user and system CPU time spent in seconds.", cpuSeconds)
}
if hasResident {
writeGauge(&b, "process_resident_memory_bytes", "Resident memory size in bytes.", resident)
}
return b.String()
}
type histogramSnapshot struct {
bounds []float64
counts []uint64
sum float64
count uint64
}
func copyCounterMap(src map[string]uint64) map[string]uint64 {
out := make(map[string]uint64, len(src))
for k, v := range src {
out[k] = v
}
return out
}
func copyIntGaugeMap(src map[string]int64) map[string]int64 {
out := make(map[string]int64, len(src))
for k, v := range src {
out[k] = v
}
return out
}
func copyHistogramMap(src map[string]*histogram) map[string]histogramSnapshot {
out := make(map[string]histogramSnapshot, len(src))
for k, h := range src {
bounds, counts, sum, count := h.snapshot()
out[k] = histogramSnapshot{bounds: bounds, counts: counts, sum: sum, count: count}
}
return out
}
func writeCounter(b *strings.Builder, name, help string, value uint64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
fmt.Fprintf(b, "%s %d\n", name, value)
}
func writeCounterFloat(b *strings.Builder, name, help string, value float64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
fmt.Fprintf(b, "%s %.9f\n", name, value)
}
func writeGauge(b *strings.Builder, name, help string, value float64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
fmt.Fprintf(b, "%s %.9f\n", name, value)
}
func writeGaugeVec(b *strings.Builder, name, help string, values map[string]float64, labelName string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Fprintf(b, "%s{%s=\"%s\"} %.9f\n", name, labelName, escapeLabelValue(key), values[key])
}
}
func writeGaugeVecFromInt64(b *strings.Builder, name, help string, values map[string]int64, labelName string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Fprintf(b, "%s{%s=\"%s\"} %.9f\n", name, labelName, escapeLabelValue(key), float64(values[key]))
}
}
func writeCounterVecKV(b *strings.Builder, name, help string, values map[string]uint64, labels []string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
parts := strings.Split(key, "|")
fmt.Fprintf(b, "%s{%s} %d\n", name, formatLabels(labels, parts), values[key])
}
}
func writeHistogramVecKV(b *strings.Builder, name, help string, values map[string]histogramSnapshot, labels []string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s histogram\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
parts := strings.Split(key, "|")
labelsMap := make(map[string]string, len(labels))
for i, label := range labels {
if i < len(parts) {
labelsMap[label] = parts[i]
} else {
labelsMap[label] = ""
}
}
writeHistogramWithLabelsMap(b, name, labelsMap, values[key])
}
}
func writeHistogram(b *strings.Builder, name, help string, labels map[string]string, bounds []float64, counts []uint64, sum float64, count uint64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s histogram\n", name)
writeHistogramWithLabelsMap(b, name, labels, histogramSnapshot{bounds: bounds, counts: counts, sum: sum, count: count})
}
func writeHistogramWithLabelsMap(b *strings.Builder, name string, labels map[string]string, s histogramSnapshot) {
var cumulative uint64
for i, bucketCount := range s.counts {
cumulative += bucketCount
bucketLabels := cloneLabels(labels)
if i < len(s.bounds) {
bucketLabels["le"] = trimFloat(s.bounds[i])
} else {
bucketLabels["le"] = "+Inf"
}
fmt.Fprintf(b, "%s_bucket{%s} %d\n", name, labelsToString(bucketLabels), cumulative)
}
labelsSuffix := formatLabelsSuffix(labels)
fmt.Fprintf(b, "%s_sum%s %.9f\n", name, labelsSuffix, s.sum)
fmt.Fprintf(b, "%s_count%s %d\n", name, labelsSuffix, s.count)
}
func formatLabelsSuffix(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
return "{" + labelsToString(labels) + "}"
}
func formatLabels(keys, values []string) string {
parts := make([]string, 0, len(keys))
for i, key := range keys {
value := ""
if i < len(values) {
value = values[i]
}
parts = append(parts, fmt.Sprintf("%s=\"%s\"", key, escapeLabelValue(value)))
}
return strings.Join(parts, ",")
}
func labelsToString(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
keys := make([]string, 0, len(labels))
for k := range labels {
keys = append(keys, k)
}
sort.Strings(keys)
parts := make([]string, 0, len(keys))
for _, key := range keys {
parts = append(parts, fmt.Sprintf("%s=\"%s\"", key, escapeLabelValue(labels[key])))
}
return strings.Join(parts, ",")
}
func cloneLabels(in map[string]string) map[string]string {
if len(in) == 0 {
return map[string]string{}
}
out := make(map[string]string, len(in)+1)
for k, v := range in {
out[k] = v
}
return out
}
func trimFloat(v float64) string {
return strconv.FormatFloat(v, 'f', -1, 64)
}
func escapeLabelValue(value string) string {
value = strings.ReplaceAll(value, `\`, `\\`)
value = strings.ReplaceAll(value, "\n", `\n`)
value = strings.ReplaceAll(value, `"`, `\"`)
return value
}
func readResidentMemoryBytes() (float64, bool) {
data, err := os.ReadFile("/proc/self/statm")
if err != nil {
return 0, false
}
fields := strings.Fields(string(data))
if len(fields) < 2 {
return 0, false
}
rssPages, err := strconv.ParseInt(fields[1], 10, 64)
if err != nil || rssPages < 0 {
return 0, false
}
return float64(rssPages * int64(os.Getpagesize())), true
}
func readProcessCPUSeconds() (float64, bool) {
var usage syscall.Rusage
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
return 0, false
}
user := float64(usage.Utime.Sec) + float64(usage.Utime.Usec)/1e6
sys := float64(usage.Stime.Sec) + float64(usage.Stime.Usec)/1e6
return user + sys, true
}

34
metrics/metrics_test.go Normal file
View File

@@ -0,0 +1,34 @@
package metrics
import (
"strings"
"testing"
)
func TestRenderPrometheusHistogramNoEmptyLabelSet(t *testing.T) {
reg := NewRegistry()
reg.ObserveBatchSize(3)
reg.ObserveGC(0, 0, 0, 0, true)
out := reg.RenderPrometheus()
if strings.Contains(out, "fs_batch_size_histogram_sum{}") {
t.Fatalf("unexpected empty label set for batch sum metric")
}
if strings.Contains(out, "fs_batch_size_histogram_count{}") {
t.Fatalf("unexpected empty label set for batch count metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_sum{}") {
t.Fatalf("unexpected empty label set for gc sum metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_count{}") {
t.Fatalf("unexpected empty label set for gc count metric")
}
}
func TestEscapeLabelValueEscapesSingleBackslash(t *testing.T) {
got := escapeLabelValue(`a\b`)
want := `a\\b`
if got != want {
t.Fatalf("escapeLabelValue returned %q, want %q", got, want)
}
}

View File

@@ -1,5 +1,10 @@
package models
import (
"encoding/xml"
"time"
)
type ObjectManifest struct {
Bucket string `json:"bucket"`
Key string `json:"key"`
@@ -9,3 +14,212 @@ type ObjectManifest struct {
Chunks []string `json:"chunks"`
CreatedAt int64 `json:"created_at"`
}
type BucketManifest struct {
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
OwnerID string `json:"owner_id"`
OwnerDisplayName string `json:"owner_display_name"`
Region string `json:"region"`
VersioningStatus string `json:"versioning_status"`
PublicAccessBlock bool `json:"public_access_block"`
}
type ListAllMyBucketsResult struct {
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Xmlns string `xml:"xmlns,attr"`
Owner BucketsOwner `xml:"Owner"`
Buckets BucketsElement `xml:"Buckets"`
}
type BucketsOwner struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName,omitempty"`
}
type BucketsElement struct {
Items []BucketItem `xml:"Bucket"`
}
type BucketItem struct {
Name string `xml:"Name"`
CreationDate string `xml:"CreationDate"`
}
type S3ErrorResponse struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
Message string `xml:"Message"`
Resource string `xml:"Resource,omitempty"`
RequestID string `xml:"RequestId,omitempty"`
HostID string `xml:"HostId,omitempty"`
}
type ListBucketResult struct {
XMLName xml.Name `xml:"ListBucketResult"`
Xmlns string `xml:"xmlns,attr"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
KeyCount int `xml:"KeyCount"`
MaxKeys int `xml:"MaxKeys"`
IsTruncated bool `xml:"IsTruncated"`
Contents []Contents `xml:"Contents"`
CommonPrefixes []CommonPrefixes `xml:"CommonPrefixes,omitempty"`
}
type ListBucketResultV1 struct {
XMLName xml.Name `xml:"ListBucketResult"`
Xmlns string `xml:"xmlns,attr"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker,omitempty"`
NextMarker string `xml:"NextMarker,omitempty"`
Delimiter string `xml:"Delimiter,omitempty"`
MaxKeys int `xml:"MaxKeys"`
IsTruncated bool `xml:"IsTruncated"`
EncodingType string `xml:"EncodingType,omitempty"`
Contents []Contents `xml:"Contents,omitempty"`
CommonPrefixes []CommonPrefixes `xml:"CommonPrefixes,omitempty"`
}
type ListBucketResultV2 struct {
XMLName xml.Name `xml:"ListBucketResult"`
Xmlns string `xml:"xmlns,attr"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Delimiter string `xml:"Delimiter,omitempty"`
MaxKeys int `xml:"MaxKeys"`
KeyCount int `xml:"KeyCount"`
IsTruncated bool `xml:"IsTruncated"`
ContinuationToken string `xml:"ContinuationToken,omitempty"`
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
StartAfter string `xml:"StartAfter,omitempty"`
EncodingType string `xml:"EncodingType,omitempty"`
Contents []Contents `xml:"Contents,omitempty"`
CommonPrefixes []CommonPrefixes `xml:"CommonPrefixes,omitempty"`
}
type Contents struct {
Key string `xml:"Key"`
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
Size int64 `xml:"Size"`
StorageClass string `xml:"StorageClass"`
}
type CommonPrefixes struct {
Prefix string `xml:"Prefix"`
}
type MultipartUpload struct {
UploadID string `json:"upload_id" xml:"UploadId"`
Bucket string `json:"bucket" xml:"Bucket"`
Key string `json:"key" xml:"Key"`
CreatedAt string `json:"created_at" xml:"CreatedAt"`
State string `json:"state" xml:"State"`
}
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Xmlns string `xml:"xmlns,attr"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadID string `xml:"UploadId"`
}
type UploadedPart struct {
PartNumber int `json:"part_number" xml:"PartNumber"`
ETag string `json:"etag" xml:"ETag"`
Size int64 `json:"size" xml:"Size"`
Chunks []string `json:"chunks"`
CreatedAt int64 `json:"created_at"`
}
type CompletedPart struct {
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
}
type CompleteMultipartUploadRequest struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []CompletedPart `xml:"Part"`
}
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Xmlns string `xml:"xmlns,attr"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
ETag string `xml:"ETag"`
Location string `xml:"Location,omitempty"`
}
type ListPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"`
Xmlns string `xml:"xmlns,attr"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadID string `xml:"UploadId"`
Parts []PartItem `xml:"Part"`
}
type PartItem struct {
PartNumber int `xml:"PartNumber"`
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
Size int64 `xml:"Size"`
}
type DeleteObjectsRequest struct {
XMLName xml.Name `xml:"Delete"`
Objects []DeleteObjectIdentity `xml:"Object"`
Quiet bool `xml:"Quiet"`
}
type DeleteObjectIdentity struct {
Key string `xml:"Key"`
}
type DeleteObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
Xmlns string `xml:"xmlns,attr"`
Deleted []DeletedEntry `xml:"Deleted,omitempty"`
Errors []DeleteError `xml:"Error,omitempty"`
}
type DeletedEntry struct {
Key string `xml:"Key"`
}
type DeleteError struct {
Key string `xml:"Key"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}
type AuthIdentity struct {
AccessKeyID string `json:"access_key_id"`
SecretEnc string `json:"secret_enc"`
SecretNonce string `json:"secret_nonce"`
EncAlg string `json:"enc_alg"`
KeyVersion string `json:"key_version"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
type AuthPolicy struct {
Principal string `json:"principal"`
Statements []AuthPolicyStatement `json:"statements"`
}
type AuthPolicyStatement struct {
Effect string `json:"effect"`
Actions []string `json:"actions"`
Bucket string `json:"bucket"`
Prefix string `json:"prefix"`
}

View File

@@ -1,29 +1,80 @@
package service
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"fs/metadata"
"fs/metrics"
"fs/models"
"fs/storage"
"io"
"log/slog"
"strings"
"sync"
"time"
)
type ObjectService struct {
metadataHandler *metadata.MetadataHandler
metadata *metadata.MetadataHandler
blob *storage.BlobStore
multipartRetention time.Duration
gcMu sync.RWMutex
}
func NewObjectService(metadataHandler *metadata.MetadataHandler) *ObjectService {
return &ObjectService{metadataHandler: metadataHandler}
var (
ErrInvalidPart = errors.New("invalid multipart part")
ErrInvalidPartOrder = errors.New("invalid multipart part order")
ErrInvalidCompleteRequest = errors.New("invalid complete multipart request")
ErrEntityTooSmall = errors.New("multipart entity too small")
)
func NewObjectService(metadataHandler *metadata.MetadataHandler, blobHandler *storage.BlobStore, multipartRetention time.Duration) *ObjectService {
if multipartRetention <= 0 {
multipartRetention = 24 * time.Hour
}
return &ObjectService{
metadata: metadataHandler,
blob: blobHandler,
multipartRetention: multipartRetention,
}
}
func (s *ObjectService) PutObject(uri string, contentType string, input io.Reader) (*models.ObjectManifest, error) {
func (s *ObjectService) acquireGCRLock() func() {
waitStart := time.Now()
s.gcMu.RLock()
metrics.Default.ObserveLockWait("gc_mu_read", time.Since(waitStart))
holdStart := time.Now()
return func() {
metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
s.gcMu.RUnlock()
}
}
bucket := strings.Split(uri, "/")[0]
key := strings.Join(strings.Split(uri, "/")[1:], "/")
func (s *ObjectService) acquireGCLock() func() {
waitStart := time.Now()
s.gcMu.Lock()
metrics.Default.ObserveLockWait("gc_mu_write", time.Since(waitStart))
holdStart := time.Now()
return func() {
metrics.Default.ObserveLockHold("gc_mu_write", time.Since(holdStart))
s.gcMu.Unlock()
}
}
chunks, size, etag, err := storage.IngestStream(input)
func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Reader) (*models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("put_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
chunks, size, etag, err := s.blob.IngestStream(input)
if err != nil {
return nil, err
}
@@ -38,28 +89,408 @@ func (s *ObjectService) PutObject(uri string, contentType string, input io.Reade
Chunks: chunks,
CreatedAt: timestamp,
}
fmt.Println(manifest)
if err = s.metadataHandler.PutManifest(manifest); err != nil {
slog.Debug("object_written_manifest",
"bucket", manifest.Bucket,
"key", manifest.Key,
"size", manifest.Size,
"chunk_count", len(manifest.Chunks),
"etag", manifest.ETag,
)
if err = s.metadata.PutManifest(manifest); err != nil {
return nil, err
}
success = true
return manifest, nil
}
func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) {
manifest, err := s.metadataHandler.GetManifest(bucket, key)
start := time.Now()
waitStart := time.Now()
s.gcMu.RLock()
metrics.Default.ObserveLockWait("gc_mu_read", time.Since(waitStart))
holdStart := time.Now()
manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil {
metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
s.gcMu.RUnlock()
metrics.Default.ObserveService("get_object", time.Since(start), false)
return nil, nil, err
}
pr, pw := io.Pipe()
go func() {
defer pw.Close()
err := storage.AssembleStream(manifest.Chunks, pw)
if err != nil {
streamOK := false
defer func() {
metrics.Default.ObserveService("get_object", time.Since(start), streamOK)
}()
defer metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
defer s.gcMu.RUnlock()
if err := s.blob.AssembleStream(manifest.Chunks, pw); err != nil {
_ = pw.CloseWithError(err)
return
}
if err := pw.Close(); err != nil {
return
}
streamOK = true
}()
return pr, manifest, nil
}
func (s *ObjectService) HeadObject(bucket, key string) (models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("head_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil {
return models.ObjectManifest{}, err
}
success = true
return *manifest, nil
}
func (s *ObjectService) DeleteObject(bucket, key string) error {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("delete_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.DeleteManifest(bucket, key)
success = err == nil
return err
}
func (s *ObjectService) ListObjects(bucket, prefix string) ([]*models.ObjectManifest, error) {
unlock := s.acquireGCRLock()
defer unlock()
return s.metadata.ListObjects(bucket, prefix)
}
func (s *ObjectService) ForEachObjectFrom(bucket, startKey string, fn func(*models.ObjectManifest) error) error {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("for_each_object_from", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.ForEachObjectFrom(bucket, startKey, fn)
success = err == nil
return err
}
func (s *ObjectService) CreateBucket(bucket string) error {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("create_bucket", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.CreateBucket(bucket)
success = err == nil
return err
}
func (s *ObjectService) HeadBucket(bucket string) error {
unlock := s.acquireGCRLock()
defer unlock()
_, err := s.metadata.GetBucketManifest(bucket)
return err
}
func (s *ObjectService) GetBucketManifest(bucket string) (*models.BucketManifest, error) {
unlock := s.acquireGCRLock()
defer unlock()
return s.metadata.GetBucketManifest(bucket)
}
func (s *ObjectService) DeleteBucket(bucket string) error {
unlock := s.acquireGCRLock()
defer unlock()
return s.metadata.DeleteBucket(bucket)
}
func (s *ObjectService) ListBuckets() ([]string, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("list_buckets", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
buckets, err := s.metadata.ListBuckets()
success = err == nil
return buckets, err
}
func (s *ObjectService) DeleteObjects(bucket string, keys []string) ([]string, error) {
unlock := s.acquireGCRLock()
defer unlock()
return s.metadata.DeleteManifests(bucket, keys)
}
func (s *ObjectService) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) {
unlock := s.acquireGCRLock()
defer unlock()
return s.metadata.CreateMultipartUpload(bucket, key)
}
func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int, input io.Reader) (string, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("upload_part", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
if partNumber < 1 || partNumber > 10000 {
return "", ErrInvalidPart
}
upload, err := s.metadata.GetMultipartUpload(uploadId)
if err != nil {
return "", err
}
if upload.Bucket != bucket || upload.Key != key {
return "", metadata.ErrMultipartNotFound
}
var uploadedPart models.UploadedPart
chunkIds, totalSize, etag, err := s.blob.IngestStream(input)
if err != nil {
return "", err
}
uploadedPart = models.UploadedPart{
PartNumber: partNumber,
ETag: etag,
Size: totalSize,
Chunks: chunkIds,
CreatedAt: time.Now().Unix(),
}
err = s.metadata.PutMultipartPart(uploadId, uploadedPart)
if err != nil {
return "", err
}
success = true
return etag, nil
}
func (s *ObjectService) ListMultipartParts(bucket, key, uploadID string) ([]models.UploadedPart, error) {
unlock := s.acquireGCRLock()
defer unlock()
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return nil, err
}
if upload.Bucket != bucket || upload.Key != key {
return nil, metadata.ErrMultipartNotFound
}
return s.metadata.ListMultipartParts(uploadID)
}
func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, completed []models.CompletedPart) (*models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("complete_multipart_upload", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
if len(completed) == 0 {
return nil, ErrInvalidCompleteRequest
}
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return nil, err
}
if upload.Bucket != bucket || upload.Key != key {
return nil, metadata.ErrMultipartNotFound
}
storedParts, err := s.metadata.ListMultipartParts(uploadID)
if err != nil {
return nil, err
}
partsByNumber := make(map[int]models.UploadedPart, len(storedParts))
for _, part := range storedParts {
partsByNumber[part.PartNumber] = part
}
lastPartNumber := 0
orderedParts := make([]models.UploadedPart, 0, len(completed))
chunks := make([]string, 0)
var totalSize int64
for i, part := range completed {
if part.PartNumber <= lastPartNumber {
return nil, ErrInvalidPartOrder
}
lastPartNumber = part.PartNumber
storedPart, ok := partsByNumber[part.PartNumber]
if !ok {
return nil, ErrInvalidPart
}
if normalizeETag(part.ETag) != normalizeETag(storedPart.ETag) {
return nil, ErrInvalidPart
}
if i < len(completed)-1 && storedPart.Size < 5*1024*1024 {
return nil, ErrEntityTooSmall
}
orderedParts = append(orderedParts, storedPart)
chunks = append(chunks, storedPart.Chunks...)
totalSize += storedPart.Size
}
finalETag := buildMultipartETag(orderedParts)
manifest := &models.ObjectManifest{
Bucket: bucket,
Key: key,
Size: totalSize,
ContentType: "application/octet-stream",
ETag: finalETag,
Chunks: chunks,
CreatedAt: time.Now().Unix(),
}
if err := s.metadata.CompleteMultipartUpload(uploadID, manifest); err != nil {
return nil, err
}
success = true
return manifest, nil
}
func (s *ObjectService) AbortMultipartUpload(bucket, key, uploadID string) error {
unlock := s.acquireGCRLock()
defer unlock()
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return err
}
if upload.Bucket != bucket || upload.Key != key {
return metadata.ErrMultipartNotFound
}
return s.metadata.AbortMultipartUpload(uploadID)
}
func normalizeETag(etag string) string {
return strings.Trim(etag, "\"")
}
func buildMultipartETag(parts []models.UploadedPart) string {
hasher := md5.New()
for _, part := range parts {
etagBytes, err := hex.DecodeString(normalizeETag(part.ETag))
if err == nil {
_, _ = hasher.Write(etagBytes)
continue
}
_, _ = hasher.Write([]byte(normalizeETag(part.ETag)))
}
return fmt.Sprintf("%x-%d", hasher.Sum(nil), len(parts))
}
func (s *ObjectService) Close() error {
return s.metadata.Close()
}
func (s *ObjectService) GarbageCollect() error {
start := time.Now()
success := false
deletedChunks := 0
deleteErrors := 0
cleanedUploads := 0
defer func() {
metrics.Default.ObserveGC(time.Since(start), deletedChunks, deleteErrors, cleanedUploads, success)
}()
unlock := s.acquireGCLock()
defer unlock()
referencedChunkSet, err := s.metadata.GetReferencedChunkSet()
if err != nil {
return err
}
totalChunks := 0
if err := s.blob.ForEachChunk(func(chunkID string) error {
totalChunks++
if _, found := referencedChunkSet[chunkID]; found {
return nil
}
if err := s.blob.DeleteBlob(chunkID); err != nil {
deleteErrors++
slog.Warn("garbage_collect_delete_failed", "chunk_id", chunkID, "error", err)
return nil
}
deletedChunks++
return nil
}); err != nil {
return err
}
cleanedUploads, err = s.metadata.CleanupMultipartUploads(s.multipartRetention)
if err != nil {
return err
}
slog.Info("garbage_collect_completed",
"referenced_chunks", len(referencedChunkSet),
"total_chunks", totalChunks,
"deleted_chunks", deletedChunks,
"delete_errors", deleteErrors,
"cleaned_uploads", cleanedUploads,
)
success = true
return nil
}
func (s *ObjectService) RunGC(ctx context.Context, interval time.Duration) {
if interval <= 0 {
slog.Warn("garbage_collect_disabled_invalid_interval", "interval", interval.String())
return
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
_ = s.GarbageCollect()
}
}
}

View File

@@ -4,24 +4,55 @@ import (
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"fs/metrics"
"io"
"os"
"path/filepath"
"strings"
"time"
)
const chunkSize = 64 * 1024
const blobRoot = "blobs/"
const blobRoot = "blobs"
const maxChunkSize = 64 * 1024 * 1024
func IngestStream(stream io.Reader) ([]string, int64, string, error) {
type BlobStore struct {
dataRoot string
chunkSize int
}
func NewBlobStore(root string, chunkSize int) (*BlobStore, error) {
root = strings.TrimSpace(root)
if root == "" {
return nil, errors.New("blob root is required")
}
if chunkSize <= 0 || chunkSize > maxChunkSize {
return nil, fmt.Errorf("chunk size must be between 1 and %d bytes", maxChunkSize)
}
cleanRoot := filepath.Clean(root)
if err := os.MkdirAll(filepath.Join(cleanRoot, blobRoot), 0o755); err != nil {
return nil, err
}
return &BlobStore{chunkSize: chunkSize, dataRoot: cleanRoot}, nil
}
func (bs *BlobStore) IngestStream(stream io.Reader) ([]string, int64, string, error) {
start := time.Now()
fullFileHasher := md5.New()
buffer := make([]byte, chunkSize)
buffer := make([]byte, bs.chunkSize)
var totalSize int64
var chunkIDs []string
success := false
defer func() {
metrics.Default.ObserveBlob("ingest_stream", time.Since(start), 0, success)
}()
for {
bytesRead, err := io.ReadFull(stream, buffer)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
if err != nil && err != io.EOF && !errors.Is(err, io.ErrUnexpectedEOF) {
return nil, 0, "", err
}
@@ -34,13 +65,13 @@ func IngestStream(stream io.Reader) ([]string, int64, string, error) {
chunkHash := sha256.Sum256(chunkData)
chunkID := hex.EncodeToString(chunkHash[:])
err := saveBlob(chunkID, chunkData)
err := bs.saveBlob(chunkID, chunkData)
if err != nil {
return nil, 0, "", err
}
chunkIDs = append(chunkIDs, chunkID)
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
break
}
if err != nil {
@@ -50,27 +81,84 @@ func IngestStream(stream io.Reader) ([]string, int64, string, error) {
}
etag := hex.EncodeToString(fullFileHasher.Sum(nil))
success = true
return chunkIDs, totalSize, etag, nil
}
func saveBlob(chunkID string, data []byte) error {
dir := filepath.Join(blobRoot, chunkID[:2], chunkID[2:4])
func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
start := time.Now()
success := false
writtenBytes := int64(0)
defer func() {
metrics.Default.ObserveBlob("write_chunk", time.Since(start), writtenBytes, success)
}()
if !isValidChunkID(chunkID) {
return fmt.Errorf("invalid chunk id: %q", chunkID)
}
dir := filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4])
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
fullPath := filepath.Join(dir, chunkID)
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
if err := os.WriteFile(fullPath, data, 0644); err != nil {
return err
}
if _, err := os.Stat(fullPath); err == nil {
success = true
return nil
} else if !os.IsNotExist(err) {
return err
}
tmpFile, err := os.CreateTemp(dir, chunkID+".tmp-*")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
cleanup := true
defer func() {
if cleanup {
_ = os.Remove(tmpPath)
}
}()
if _, err := tmpFile.Write(data); err != nil {
_ = tmpFile.Close()
return err
}
if err := tmpFile.Sync(); err != nil {
_ = tmpFile.Close()
return err
}
if err := tmpFile.Close(); err != nil {
return err
}
if err := os.Rename(tmpPath, fullPath); err != nil {
if _, statErr := os.Stat(fullPath); statErr == nil {
success = true
return nil
}
return err
}
cleanup = false
if err := syncDir(dir); err != nil {
return err
}
writtenBytes = int64(len(data))
success = true
return nil
}
func AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
func (bs *BlobStore) AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveBlob("assemble_stream", time.Since(start), 0, success)
}()
for _, chunkID := range chunkIDs {
chunkData, err := GetBlob(chunkID)
chunkData, err := bs.GetBlob(chunkID)
if err != nil {
return err
}
@@ -78,10 +166,86 @@ func AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
return err
}
}
success = true
return nil
}
func GetBlob(chunkID string) ([]byte, error) {
func (bs *BlobStore) GetBlob(chunkID string) ([]byte, error) {
start := time.Now()
success := false
var size int64
defer func() {
metrics.Default.ObserveBlob("read_chunk", time.Since(start), size, success)
}()
return os.ReadFile(filepath.Join(blobRoot, chunkID[:2], chunkID[2:4], chunkID))
if !isValidChunkID(chunkID) {
return nil, fmt.Errorf("invalid chunk id: %q", chunkID)
}
data, err := os.ReadFile(filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4], chunkID))
if err != nil {
return nil, err
}
size = int64(len(data))
success = true
return data, nil
}
func (bs *BlobStore) DeleteBlob(chunkID string) error {
if !isValidChunkID(chunkID) {
return fmt.Errorf("invalid chunk id: %q", chunkID)
}
err := os.Remove(filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4], chunkID))
if err != nil && os.IsNotExist(err) {
return nil
}
return err
}
func (bs *BlobStore) ListChunks() ([]string, error) {
var chunkIDs []string
err := bs.ForEachChunk(func(chunkID string) error {
chunkIDs = append(chunkIDs, chunkID)
return nil
})
return chunkIDs, err
}
func (bs *BlobStore) ForEachChunk(fn func(chunkID string) error) error {
if fn == nil {
return errors.New("chunk callback is required")
}
return filepath.Walk(filepath.Join(bs.dataRoot, blobRoot), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
chunkID := info.Name()
if isValidChunkID(chunkID) {
return fn(chunkID)
}
}
return nil
})
}
func isValidChunkID(chunkID string) bool {
if len(chunkID) != sha256.Size*2 {
return false
}
for _, ch := range chunkID {
if (ch < '0' || ch > '9') && (ch < 'a' || ch > 'f') {
return false
}
}
return true
}
func syncDir(dirPath string) error {
dir, err := os.Open(dirPath)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}

116
utils/config.go Normal file
View File

@@ -0,0 +1,116 @@
package utils
import (
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/joho/godotenv"
)
type Config struct {
DataPath string
Address string
Port int
ChunkSize int
LogLevel string
LogFormat string
AuditLog bool
GcInterval time.Duration
GcEnabled bool
MultipartCleanupRetention time.Duration
AuthEnabled bool
AuthRegion string
AuthSkew time.Duration
AuthMaxPresign time.Duration
AuthMasterKey string
AuthBootstrapAccessKey string
AuthBootstrapSecretKey string
AuthBootstrapPolicy string
AdminAPIEnabled bool
}
func NewConfig() *Config {
_ = godotenv.Load()
config := &Config{
DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")),
Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"),
Port: envIntRange("PORT", 2600, 1, 65535),
ChunkSize: envIntRange("CHUNK_SIZE", 8192000, 1, 64*1024*1024),
LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")),
LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")),
AuditLog: envBool("AUDIT_LOG", true),
GcInterval: time.Duration(envIntRange("GC_INTERVAL", 10, 1, 60)) * time.Minute,
GcEnabled: envBool("GC_ENABLED", true),
MultipartCleanupRetention: time.Duration(
envIntRange("MULTIPART_RETENTION_HOURS", 24, 1, 24*30),
) * time.Hour,
AuthEnabled: envBool("FS_AUTH_ENABLED", false),
AuthRegion: firstNonEmpty(strings.TrimSpace(os.Getenv("FS_AUTH_REGION")), "us-east-1"),
AuthSkew: time.Duration(envIntRange("FS_AUTH_CLOCK_SKEW_SECONDS", 300, 30, 3600)) * time.Second,
AuthMaxPresign: time.Duration(envIntRange("FS_AUTH_MAX_PRESIGN_SECONDS", 86400, 60, 86400)) * time.Second,
AuthMasterKey: strings.TrimSpace(os.Getenv("FS_MASTER_KEY")),
AuthBootstrapAccessKey: strings.TrimSpace(os.Getenv("FS_ROOT_USER")),
AuthBootstrapSecretKey: strings.TrimSpace(os.Getenv("FS_ROOT_PASSWORD")),
AuthBootstrapPolicy: strings.TrimSpace(os.Getenv("FS_ROOT_POLICY_JSON")),
AdminAPIEnabled: envBool("ADMIN_API_ENABLED", true),
}
if config.LogFormat != "json" && config.LogFormat != "text" {
config.LogFormat = "text"
}
return config
}
func envIntRange(key string, defaultValue, minValue, maxValue int) int {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return defaultValue
}
value, err := strconv.Atoi(raw)
if err != nil {
return defaultValue
}
if value < minValue || value > maxValue {
return defaultValue
}
return value
}
func envBool(key string, defaultValue bool) bool {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return defaultValue
}
value, err := strconv.ParseBool(raw)
if err != nil {
return defaultValue
}
return value
}
func firstNonEmpty(values ...string) string {
for _, v := range values {
if v != "" {
return v
}
}
return ""
}
func sanitizeDataPath(raw string) string {
cleaned := strings.TrimSpace(raw)
if cleaned == "" {
cleaned = "."
}
cleaned = filepath.Clean(cleaned)
if abs, err := filepath.Abs(cleaned); err == nil {
return abs
}
return cleaned
}

54
utils/utils.go Normal file
View File

@@ -0,0 +1,54 @@
package utils
import (
"encoding/xml"
"fs/models"
"sort"
"strings"
"time"
)
func ConstructXMLResponseForObjectList(bucket string, objects []*models.ObjectManifest) (string, error) {
result := models.ListBucketResult{
Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
Name: bucket,
Prefix: "",
KeyCount: len(objects),
MaxKeys: 1000,
IsTruncated: false,
}
prefixSet := make(map[string]struct{})
for _, object := range objects {
result.Contents = append(result.Contents, models.Contents{
Key: object.Key,
LastModified: time.Unix(object.CreatedAt, 0).UTC().Format("2006-01-02T15:04:05.000Z"),
ETag: "\"" + object.ETag + "\"",
Size: object.Size,
StorageClass: "STANDARD",
})
if strings.Contains(object.Key, "/") {
parts := strings.SplitN(object.Key, "/", 2)
prefixSet[parts[0]+"/"] = struct{}{}
}
}
prefixes := make([]string, 0, len(prefixSet))
for prefix := range prefixSet {
prefixes = append(prefixes, prefix)
}
sort.Strings(prefixes)
for _, prefix := range prefixes {
result.CommonPrefixes = append(result.CommonPrefixes, models.CommonPrefixes{Prefix: prefix})
}
output, err := xml.MarshalIndent(result, "", " ")
if err != nil {
return "", err
}
return xml.Header + string(output), nil
}