31 Commits

Author SHA1 Message Date
115d825234 HOTFIX: Fixed streaming problems with size and equal sign encoding 2026-03-13 00:25:48 +01:00
237063d9fc Merge pull request #8 from ferdzo/fix/bucket-creation-status201-to-200ok
Changed 201 Created to 200 OK when creating bucket
2026-03-12 00:29:05 +01:00
c2215d8589 Changed 201 Created to 200 OK when creating bucket 2026-03-12 00:28:01 +01:00
82cb58dff1 README.md edit 2026-03-11 23:39:25 +01:00
b592d6a2f0 Merge pull request #7 from ferdzo/feat/cli
Fs CLI
2026-03-11 20:27:22 +01:00
ef12326975 Copilot suggestions fixed 2026-03-11 20:26:17 +01:00
a23577d531 Update cmd/admin_snapshot.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-11 20:19:50 +01:00
e8256d66e3 Added Github Action for build and release. 2026-03-11 20:18:24 +01:00
ad53a6d8ac Backup/restore option for cli 2026-03-11 20:12:00 +01:00
cfb9b591ac Policy example documentation 2026-03-11 00:50:09 +01:00
b27f1186cf Remove Role command. 2026-03-11 00:47:24 +01:00
f57c7b8390 Initial FS CLI 2026-03-11 00:40:51 +01:00
181cd42bbf Changed environment variable names for auth settings. 2026-03-03 23:27:16 +01:00
1a0f15a313 Merge pull request #5 from ferdzo/feature/auth
Authentication
2026-03-03 16:12:54 +01:00
46eb093d83 Fixed copilot suggestions. 2026-03-03 00:38:49 +01:00
9abffd056e Update metrics/metrics.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-03 00:35:16 +01:00
cd7a1b4956 Update storage/blob.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-03 00:34:53 +01:00
cfec3afb49 S3 compatibility and openapi spec. 2026-03-03 00:24:45 +01:00
66e3db44dc add admin endpoints for user policy and status updates 2026-03-03 00:11:39 +01:00
57951fda38 add admin user delete endpoint and service support 2026-03-02 23:58:12 +01:00
22cfb820f9 skip S3 policy check for admin routes after signature verify 2026-03-02 23:54:05 +01:00
93a3aabf7d allow signed admin routes before S3 policy resolution 2026-03-02 23:51:46 +01:00
9b8d0b2b3e add auth service tests for user admin primitives 2026-03-02 23:39:39 +01:00
7a7f570882 add admin v1 create/list/get HTTP endpoints 2026-03-02 23:38:58 +01:00
828b7c7c34 add auth service user create/list/get primitives 2026-03-02 23:37:33 +01:00
96e3b0e042 wire admin API feature flag 2026-03-02 23:36:09 +01:00
651413d494 Merge pull request #6 from ferdzo/feature/metrics
Metrics endpoint
2026-03-02 23:28:05 +01:00
c03bd3e3a2 Minimal fixes for metrics 2026-03-02 23:26:57 +01:00
8c9cd96213 Auth for metrics, removed unwanted metrics and fixed tests. 2026-03-02 22:30:15 +01:00
Andrej Mickov
6ca3fb8701 Updated metrics 2026-02-27 16:38:51 +01:00
Andrej Mickov
f04f7601c0 Initial metrics endpoint added in Prometheus style 2026-02-27 14:59:23 +01:00
41 changed files with 5262 additions and 213 deletions

View File

@@ -7,13 +7,14 @@ ADDRESS=0.0.0.0
GC_INTERVAL=10 GC_INTERVAL=10
GC_ENABLED=true GC_ENABLED=true
MULTIPART_RETENTION_HOURS=24 MULTIPART_RETENTION_HOURS=24
AUTH_ENABLED=false FS_AUTH_ENABLED=false
AUTH_REGION=us-east-1 FS_AUTH_REGION=us-east-1
AUTH_SKEW_SECONDS=300 FS_AUTH_CLOCK_SKEW_SECONDS=300
AUTH_MAX_PRESIGN_SECONDS=86400 FS_AUTH_MAX_PRESIGN_SECONDS=86400
# When AUTH_ENABLED=true you MUST set AUTH_MASTER_KEY to a strong random value, e.g.: # When FS_AUTH_ENABLED=true you MUST set FS_MASTER_KEY to a strong random value, e.g.:
# openssl rand -base64 32 # openssl rand -base64 32
AUTH_MASTER_KEY=REPLACE_WITH_SECURE_RANDOM_KEY FS_MASTER_KEY=REPLACE_WITH_SECURE_RANDOM_KEY
AUTH_BOOTSTRAP_ACCESS_KEY= FS_ROOT_USER=
AUTH_BOOTSTRAP_SECRET_KEY= FS_ROOT_PASSWORD=
AUTH_BOOTSTRAP_POLICY= FS_ROOT_POLICY_JSON=
ADMIN_API_ENABLED=true

24
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: CI
on:
push:
branches: ["main"]
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run tests
run: |
export GOCACHE=/tmp/go-build-cache
go test ./...

66
.github/workflows/release-image.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Release Image
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
permissions:
contents: read
packages: write
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set build date
id: date
run: echo "value=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$GITHUB_OUTPUT"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
labels: |
org.opencontainers.image.title=fs
org.opencontainers.image.description=Lightweight S3-compatible object storage
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.created=${{ steps.date.outputs.value }}
- name: Build and push image
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ github.ref_name }}
COMMIT=${{ github.sha }}
DATE=${{ steps.date.outputs.value }}

View File

@@ -6,7 +6,13 @@ COPY go.mod go.sum ./
RUN go mod download RUN go mod download
COPY . . COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/fs . ARG VERSION=dev
ARG COMMIT=none
ARG DATE=unknown
RUN CGO_ENABLED=0 GOOS=linux go build \
-trimpath \
-ldflags "-s -w -X main.version=${VERSION} -X main.commit=${COMMIT} -X main.date=${DATE}" \
-o /app/fs .
FROM alpine:3.23 AS runner FROM alpine:3.23 AS runner

111
README.md
View File

@@ -2,6 +2,13 @@
An experimental Object Storage written in Go that should be partially compatible with S3 An experimental Object Storage written in Go that should be partially compatible with S3
## Running
Single binary, two modes:
- `fs` (no subcommand) starts the server (backward compatible)
- `fs server` starts the server explicitly
- `fs admin ...` runs admin CLI commands
## Features ## Features
Bucket operations: Bucket operations:
@@ -34,17 +41,115 @@ Authentication:
- Local credential/policy store in bbolt - Local credential/policy store in bbolt
- Bootstrap access key/secret via environment variables - Bootstrap access key/secret via environment variables
Admin API (JSON):
- `POST /_admin/v1/users`
- `GET /_admin/v1/users`
- `GET /_admin/v1/users/{accessKeyId}`
- `PUT /_admin/v1/users/{accessKeyId}/policy`
- `PUT /_admin/v1/users/{accessKeyId}/status`
- `DELETE /_admin/v1/users/{accessKeyId}`
Admin API policy examples (SigV4):
```bash
ENDPOINT="http://localhost:2600"
REGION="us-east-1"
ADMIN_ACCESS_KEY="${FS_ROOT_USER}"
ADMIN_SECRET_KEY="${FS_ROOT_PASSWORD}"
SIGV4="aws:amz:${REGION}:s3"
```
Replace user policy with one scoped statement:
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "backup",
"prefix": "restic/*"
}
]
}
}'
```
Set multiple statements (for multiple buckets):
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject"],
"bucket": "test-bucket",
"prefix": "*"
},
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "test-bucket-2",
"prefix": "*"
}
]
}
}'
```
Admin CLI:
- `fs admin user create --access-key backup-user --role readwrite`
- `fs admin user list`
- `fs admin user get backup-user`
- `fs admin user set-status backup-user --status disabled`
- `fs admin user set-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role readwrite --bucket backups-2` (appends another statement)
- `fs admin user remove-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role admin --replace` (replaces all statements)
- `fs admin user delete backup-user`
- `fs admin snapshot create --data-path /var/lib/fs --out /backup/fs-20260311.tar.gz`
- `fs admin snapshot inspect --file /backup/fs-20260311.tar.gz`
- `fs admin snapshot restore --file /backup/fs-20260311.tar.gz --data-path /var/lib/fs --force`
- `fs admin diag health`
- `fs admin diag version`
## Auth Setup ## Auth Setup
Required when `AUTH_ENABLED=true`: Required when `FS_AUTH_ENABLED=true`:
- `AUTH_MASTER_KEY` must be base64 for 32 decoded bytes (AES-256 key), e.g. `openssl rand -base64 32` - `FS_MASTER_KEY` must be base64 for 32 decoded bytes (AES-256 key), e.g. `openssl rand -base64 32`
- `AUTH_BOOTSTRAP_ACCESS_KEY` and `AUTH_BOOTSTRAP_SECRET_KEY` define initial credentials - `FS_ROOT_USER` and `FS_ROOT_PASSWORD` define initial credentials
- `ADMIN_API_ENABLED=true` enables `/_admin/v1/*` routes (bootstrap key only)
Reference: `auth/README.md` Reference: `auth/README.md`
Additional docs:
- Admin OpenAPI spec: `docs/admin-api-openapi.yaml`
- S3 compatibility matrix: `docs/s3-compatibility.md`
CLI credential/env resolution for `fs admin`:
- Flags: `--access-key`, `--secret-key`, `--endpoint`, `--region`
- Env fallback:
- `FS_ROOT_USER` / `FS_ROOT_PASSWORD` (same defaults as server bootstrap)
- `FSCLI_ACCESS_KEY` / `FSCLI_SECRET_KEY`
- `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY`
- `FSCLI_ENDPOINT` (fallback to `ADDRESS` + `PORT`, then `http://localhost:2600`)
- `FSCLI_REGION` (fallback `FS_AUTH_REGION`, default `us-east-1`)
Note:
- `fs admin snapshot ...` commands operate locally on filesystem paths and do not require endpoint or auth credentials.
Health: Health:
- `GET /healthz` - `GET /healthz`
- `HEAD /healthz` - `HEAD /healthz`
- `GET /metrics` (Prometheus exposition format)
- `HEAD /metrics`
## Limitations ## Limitations

300
api/admin_api.go Normal file
View File

@@ -0,0 +1,300 @@
package api
import (
"encoding/json"
"errors"
"fs/auth"
"fs/models"
"io"
"net/http"
"strconv"
"strings"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
const (
maxAdminJSONBodyBytes = 1 << 20
defaultAdminPageSize = 100
maxAdminPageSize = 1000
)
type adminErrorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
RequestID string `json:"requestId,omitempty"`
}
type adminCreateUserRequest struct {
AccessKeyID string `json:"accessKeyId"`
SecretKey string `json:"secretKey,omitempty"`
Status string `json:"status,omitempty"`
Policy models.AuthPolicy `json:"policy"`
}
type adminSetPolicyRequest struct {
Policy models.AuthPolicy `json:"policy"`
}
type adminSetStatusRequest struct {
Status string `json:"status"`
}
type adminUserListItem struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
}
type adminUserListResponse struct {
Items []adminUserListItem `json:"items"`
NextCursor string `json:"nextCursor,omitempty"`
}
type adminUserResponse struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
Policy *models.AuthPolicy `json:"policy,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
}
func (h *Handler) registerAdminRoutes() {
h.router.Route("/_admin/v1", func(r chi.Router) {
r.Post("/users", h.handleAdminCreateUser)
r.Get("/users", h.handleAdminListUsers)
r.Get("/users/{accessKeyId}", h.handleAdminGetUser)
r.Put("/users/{accessKeyId}/policy", h.handleAdminSetUserPolicy)
r.Put("/users/{accessKeyId}/status", h.handleAdminSetUserStatus)
r.Delete("/users/{accessKeyId}", h.handleAdminDeleteUser)
})
}
func (h *Handler) handleAdminCreateUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
var req adminCreateUserRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
created, err := h.authSvc.CreateUser(auth.CreateUserInput{
AccessKeyID: req.AccessKeyID,
SecretKey: req.SecretKey,
Status: req.Status,
Policy: req.Policy,
})
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: created.AccessKeyID,
Status: created.Status,
CreatedAt: created.CreatedAt,
UpdatedAt: created.UpdatedAt,
Policy: &created.Policy,
SecretKey: created.SecretKey,
}
writeJSON(w, http.StatusCreated, resp)
}
func (h *Handler) handleAdminListUsers(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
limit := defaultAdminPageSize
if raw := strings.TrimSpace(r.URL.Query().Get("limit")); raw != "" {
parsed, err := strconv.Atoi(raw)
if err != nil || parsed < 1 || parsed > maxAdminPageSize {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", "limit must be between 1 and 1000")
return
}
limit = parsed
}
cursor := strings.TrimSpace(r.URL.Query().Get("cursor"))
users, nextCursor, err := h.authSvc.ListUsers(limit, cursor)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
items := make([]adminUserListItem, 0, len(users))
for _, user := range users {
items = append(items, adminUserListItem{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
})
}
writeJSON(w, http.StatusOK, adminUserListResponse{
Items: items,
NextCursor: nextCursor,
})
}
func (h *Handler) handleAdminGetUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
user, err := h.authSvc.GetUser(accessKeyID)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) handleAdminDeleteUser(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
if err := h.authSvc.DeleteUser(accessKeyID); err != nil {
writeMappedAdminError(w, r, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (h *Handler) handleAdminSetUserPolicy(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
var req adminSetPolicyRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
user, err := h.authSvc.SetUserPolicy(accessKeyID, req.Policy)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) handleAdminSetUserStatus(w http.ResponseWriter, r *http.Request) {
if !h.requireBootstrapAdmin(w, r) {
return
}
accessKeyID := chi.URLParam(r, "accessKeyId")
var req adminSetStatusRequest
if err := decodeJSONBody(w, r, &req); err != nil {
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
return
}
user, err := h.authSvc.SetUserStatus(accessKeyID, req.Status)
if err != nil {
writeMappedAdminError(w, r, err)
return
}
resp := adminUserResponse{
AccessKeyID: user.AccessKeyID,
Status: user.Status,
CreatedAt: user.CreatedAt,
UpdatedAt: user.UpdatedAt,
Policy: &user.Policy,
}
writeJSON(w, http.StatusOK, resp)
}
func (h *Handler) requireBootstrapAdmin(w http.ResponseWriter, r *http.Request) bool {
authCtx, ok := auth.GetRequestContext(r.Context())
if !ok || !authCtx.Authenticated {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin credentials are required")
return false
}
if h.authSvc == nil {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin access is not configured")
return false
}
bootstrap := strings.TrimSpace(h.authSvc.Config().BootstrapAccessKey)
if bootstrap == "" || authCtx.AccessKeyID != bootstrap {
writeAdminError(w, r, http.StatusForbidden, "Forbidden", "admin access denied")
return false
}
return true
}
func decodeJSONBody(w http.ResponseWriter, r *http.Request, dst any) error {
r.Body = http.MaxBytesReader(w, r.Body, maxAdminJSONBodyBytes)
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(dst); err != nil {
return err
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
return errors.New("request body must contain a single JSON object")
}
return nil
}
func writeMappedAdminError(w http.ResponseWriter, r *http.Request, err error) {
switch {
case errors.Is(err, auth.ErrInvalidUserInput):
writeAdminError(w, r, http.StatusBadRequest, "InvalidRequest", err.Error())
case errors.Is(err, auth.ErrUserAlreadyExists):
writeAdminError(w, r, http.StatusConflict, "UserAlreadyExists", "user already exists")
case errors.Is(err, auth.ErrUserNotFound):
writeAdminError(w, r, http.StatusNotFound, "UserNotFound", "user was not found")
case errors.Is(err, auth.ErrAuthNotEnabled):
writeAdminError(w, r, http.StatusServiceUnavailable, "AuthDisabled", "authentication subsystem is disabled")
default:
writeAdminError(w, r, http.StatusInternalServerError, "InternalError", "internal server error")
}
}
func writeAdminError(w http.ResponseWriter, r *http.Request, status int, code string, message string) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
requestID := middleware.GetReqID(r.Context())
if requestID != "" {
w.Header().Set("x-amz-request-id", requestID)
}
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(adminErrorResponse{
Code: code,
Message: message,
RequestID: requestID,
})
}
func writeJSON(w http.ResponseWriter, status int, payload any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(payload)
}

View File

@@ -2,6 +2,7 @@ package api
import ( import (
"bufio" "bufio"
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
@@ -10,6 +11,7 @@ import (
"fs/auth" "fs/auth"
"fs/logging" "fs/logging"
"fs/metadata" "fs/metadata"
"fs/metrics"
"fs/models" "fs/models"
"fs/service" "fs/service"
"io" "io"
@@ -32,6 +34,7 @@ type Handler struct {
logger *slog.Logger logger *slog.Logger
logConfig logging.Config logConfig logging.Config
authSvc *auth.Service authSvc *auth.Service
adminAPI bool
} }
const ( const (
@@ -46,7 +49,7 @@ const (
serverMaxConnections = 1024 serverMaxConnections = 1024
) )
func NewHandler(svc *service.ObjectService, logger *slog.Logger, logConfig logging.Config, authSvc *auth.Service) *Handler { func NewHandler(svc *service.ObjectService, logger *slog.Logger, logConfig logging.Config, authSvc *auth.Service, adminAPI bool) *Handler {
r := chi.NewRouter() r := chi.NewRouter()
r.Use(middleware.RequestID) r.Use(middleware.RequestID)
r.Use(middleware.Recoverer) r.Use(middleware.Recoverer)
@@ -60,6 +63,7 @@ func NewHandler(svc *service.ObjectService, logger *slog.Logger, logConfig loggi
logger: logger, logger: logger,
logConfig: logConfig, logConfig: logConfig,
authSvc: authSvc, authSvc: authSvc,
adminAPI: adminAPI,
} }
return h return h
} }
@@ -70,7 +74,12 @@ func (h *Handler) setupRoutes() {
h.router.Get("/healthz", h.handleHealth) h.router.Get("/healthz", h.handleHealth)
h.router.Head("/healthz", h.handleHealth) h.router.Head("/healthz", h.handleHealth)
h.router.Get("/metrics", h.handleMetrics)
h.router.Head("/metrics", h.handleMetrics)
h.router.Get("/", h.handleGetBuckets) h.router.Get("/", h.handleGetBuckets)
if h.adminAPI {
h.registerAdminRoutes()
}
h.router.Get("/{bucket}/", h.handleGetBucket) h.router.Get("/{bucket}/", h.handleGetBucket)
h.router.Get("/{bucket}", h.handleGetBucket) h.router.Get("/{bucket}", h.handleGetBucket)
@@ -106,6 +115,18 @@ func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) {
} }
} }
func (h *Handler) handleMetrics(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
payload := metrics.Default.RenderPrometheus()
w.Header().Set("Content-Length", strconv.Itoa(len(payload)))
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(payload))
}
func validateObjectKey(key string) *s3APIError { func validateObjectKey(key string) *s3APIError {
if key == "" { if key == "" {
err := s3ErrInvalidObjectKey err := s3ErrInvalidObjectKey
@@ -222,6 +243,7 @@ func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) {
writeS3Error(w, r, s3ErrMalformedXML, r.URL.Path) writeS3Error(w, r, s3ErrMalformedXML, r.URL.Path)
return return
} }
metrics.Default.ObserveBatchSize(len(req.Parts))
manifest, err := h.svc.CompleteMultipartUpload(bucket, key, uploadID, req.Parts) manifest, err := h.svc.CompleteMultipartUpload(bucket, key, uploadID, req.Parts)
if err != nil { if err != nil {
@@ -297,6 +319,7 @@ func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
return return
} }
metrics.Default.ObserveBatchSize(1)
if ifNoneMatch := strings.TrimSpace(r.Header.Get("If-None-Match")); ifNoneMatch != "" { if ifNoneMatch := strings.TrimSpace(r.Header.Get("If-None-Match")); ifNoneMatch != "" {
manifest, err := h.svc.HeadObject(bucket, key) manifest, err := h.svc.HeadObject(bucket, key)
@@ -378,13 +401,21 @@ func shouldDecodeAWSChunkedPayload(r *http.Request) bool {
return true return true
} }
signingMode := strings.ToLower(r.Header.Get("x-amz-content-sha256")) signingMode := strings.ToLower(r.Header.Get("x-amz-content-sha256"))
return strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload") if strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload") {
return true
}
return strings.HasPrefix(signingMode, "streaming-unsigned-payload")
} }
func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser { func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
probedReader, isAWSChunked := probeAWSChunkedPayload(src)
if !isAWSChunked {
return io.NopCloser(probedReader)
}
pr, pw := io.Pipe() pr, pw := io.Pipe()
go func() { go func() {
if err := decodeAWSChunkedPayload(src, pw); err != nil { if err := decodeAWSChunkedPayload(probedReader, pw); err != nil {
_ = pw.CloseWithError(err) _ = pw.CloseWithError(err)
return return
} }
@@ -393,6 +424,30 @@ func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
return pr return pr
} }
func probeAWSChunkedPayload(src io.Reader) (io.Reader, bool) {
reader := bufio.NewReaderSize(src, 512)
headerLine, err := reader.ReadSlice('\n')
replay := io.MultiReader(bytes.NewReader(headerLine), reader)
if err != nil {
return replay, false
}
line := strings.TrimRight(string(headerLine), "\r\n")
chunkSizeToken := line
if idx := strings.IndexByte(chunkSizeToken, ';'); idx >= 0 {
chunkSizeToken = chunkSizeToken[:idx]
}
chunkSizeToken = strings.TrimSpace(chunkSizeToken)
if chunkSizeToken == "" {
return replay, false
}
size, parseErr := strconv.ParseInt(chunkSizeToken, 16, 64)
if parseErr != nil || size < 0 {
return replay, false
}
return replay, true
}
func decodeAWSChunkedPayload(src io.Reader, dst io.Writer) error { func decodeAWSChunkedPayload(src io.Reader, dst io.Writer) error {
reader := bufio.NewReader(src) reader := bufio.NewReader(src)
for { for {
@@ -466,7 +521,7 @@ func (h *Handler) handlePutBucket(w http.ResponseWriter, r *http.Request) {
writeMappedS3Error(w, r, err) writeMappedS3Error(w, r, err)
return return
} }
w.WriteHeader(http.StatusCreated) w.WriteHeader(http.StatusOK)
} }
func (h *Handler) handleDeleteBucket(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleDeleteBucket(w http.ResponseWriter, r *http.Request) {
@@ -509,6 +564,7 @@ func (h *Handler) handlePostBucket(w http.ResponseWriter, r *http.Request) {
writeS3Error(w, r, s3ErrTooManyDeleteObjects, r.URL.Path) writeS3Error(w, r, s3ErrTooManyDeleteObjects, r.URL.Path)
return return
} }
metrics.Default.ObserveBatchSize(len(req.Objects))
keys := make([]string, 0, len(req.Objects)) keys := make([]string, 0, len(req.Objects))
response := models.DeleteObjectsResult{ response := models.DeleteObjectsResult{
@@ -627,6 +683,7 @@ func newLimitedListener(inner net.Listener, maxConns int) net.Listener {
if maxConns <= 0 { if maxConns <= 0 {
return inner return inner
} }
metrics.Default.SetConnectionPoolMax(maxConns)
return &limitedListener{ return &limitedListener{
Listener: inner, Listener: inner,
slots: make(chan struct{}, maxConns), slots: make(chan struct{}, maxConns),
@@ -634,15 +691,26 @@ func newLimitedListener(inner net.Listener, maxConns int) net.Listener {
} }
func (l *limitedListener) Accept() (net.Conn, error) { func (l *limitedListener) Accept() (net.Conn, error) {
l.slots <- struct{}{} select {
case l.slots <- struct{}{}:
default:
metrics.Default.IncConnectionPoolWait()
metrics.Default.IncRequestQueueLength()
l.slots <- struct{}{}
metrics.Default.DecRequestQueueLength()
}
conn, err := l.Listener.Accept() conn, err := l.Listener.Accept()
if err != nil { if err != nil {
<-l.slots <-l.slots
return nil, err return nil, err
} }
metrics.Default.IncConnectionPoolActive()
return &limitedConn{ return &limitedConn{
Conn: conn, Conn: conn,
done: func() { <-l.slots }, done: func() {
<-l.slots
metrics.Default.DecConnectionPoolActive()
},
}, nil }, nil
} }
@@ -705,8 +773,15 @@ func (h *Handler) handleGetBucket(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query() query := r.URL.Query()
if query.Has("location") { if query.Has("location") {
xmlResponse := `<?xml version="1.0" encoding="UTF-8"?> region := "us-east-1"
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">us-east-1</LocationConstraint>` if h.authSvc != nil {
candidate := strings.TrimSpace(h.authSvc.Config().Region)
if candidate != "" {
region = candidate
}
}
xmlResponse := fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">%s</LocationConstraint>`, region)
w.Header().Set("Content-Type", "application/xml; charset=utf-8") w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.Header().Set("Content-Length", strconv.Itoa(len(xmlResponse))) w.Header().Set("Content-Length", strconv.Itoa(len(xmlResponse)))

View File

@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fs/auth" "fs/auth"
"fs/metadata" "fs/metadata"
"fs/metrics"
"fs/models" "fs/models"
"fs/service" "fs/service"
"net/http" "net/http"
@@ -200,12 +201,19 @@ func mapToS3Error(err error) s3APIError {
func writeS3Error(w http.ResponseWriter, r *http.Request, apiErr s3APIError, resource string) { func writeS3Error(w http.ResponseWriter, r *http.Request, apiErr s3APIError, resource string) {
requestID := "" requestID := ""
op := "other"
if r != nil { if r != nil {
requestID = middleware.GetReqID(r.Context()) requestID = middleware.GetReqID(r.Context())
isDeletePost := false
if r.Method == http.MethodPost {
_, isDeletePost = r.URL.Query()["delete"]
}
op = metrics.NormalizeHTTPOperation(r.Method, isDeletePost)
if requestID != "" { if requestID != "" {
w.Header().Set("x-amz-request-id", requestID) w.Header().Set("x-amz-request-id", requestID)
} }
} }
metrics.Default.ObserveError(op, apiErr.Code)
w.Header().Set("Content-Type", "application/xml; charset=utf-8") w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.WriteHeader(apiErr.Status) w.WriteHeader(apiErr.Status)

90
app/server.go Normal file
View File

@@ -0,0 +1,90 @@
package app
import (
"context"
"fs/api"
"fs/auth"
"fs/logging"
"fs/metadata"
"fs/service"
"fs/storage"
"fs/utils"
"os"
"path/filepath"
"strconv"
"time"
)
func RunServer(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
config := utils.NewConfig()
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog)
authConfig := auth.ConfigFromValues(
config.AuthEnabled,
config.AuthRegion,
config.AuthSkew,
config.AuthMaxPresign,
config.AuthMasterKey,
config.AuthBootstrapAccessKey,
config.AuthBootstrapSecretKey,
config.AuthBootstrapPolicy,
)
logger := logging.NewLogger(logConfig)
logger.Info("boot",
"log_level", logConfig.LevelName,
"log_format", logConfig.Format,
"audit_log", logConfig.Audit,
"data_path", config.DataPath,
"multipart_retention_hours", int(config.MultipartCleanupRetention/time.Hour),
"auth_enabled", authConfig.Enabled,
"auth_region", authConfig.Region,
"admin_api_enabled", config.AdminAPIEnabled,
)
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
return err
}
dbPath := filepath.Join(config.DataPath, "metadata.db")
metadataHandler, err := metadata.NewMetadataHandler(dbPath)
if err != nil {
logger.Error("failed_to_initialize_metadata_handler", "error", err)
return err
}
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_blob_store", "error", err)
return err
}
objectService := service.NewObjectService(metadataHandler, blobHandler, config.MultipartCleanupRetention)
authService, err := auth.NewService(authConfig, metadataHandler)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_auth_service", "error", err)
return err
}
if err := authService.EnsureBootstrap(); err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_ensure_bootstrap_auth_identity", "error", err)
return err
}
handler := api.NewHandler(objectService, logger, logConfig, authService, config.AdminAPIEnabled)
addr := config.Address + ":" + strconv.Itoa(config.Port)
if config.GcEnabled {
go objectService.RunGC(ctx, config.GcInterval)
}
if err := handler.Start(ctx, addr); err != nil {
logger.Error("server_stopped_with_error", "error", err)
return err
}
return nil
}

View File

@@ -40,18 +40,18 @@ This folder implements S3-compatible request authentication using AWS Signature
## Config Model ## Config Model
Auth is configured through env (read in `utils/config.go`, converted in `auth/config.go`): Auth is configured through env (read in `utils/config.go`, converted in `auth/config.go`):
- `AUTH_ENABLED` - `FS_AUTH_ENABLED`
- `AUTH_REGION` - `FS_AUTH_REGION`
- `AUTH_SKEW_SECONDS` - `FS_AUTH_CLOCK_SKEW_SECONDS`
- `AUTH_MAX_PRESIGN_SECONDS` - `FS_AUTH_MAX_PRESIGN_SECONDS`
- `AUTH_MASTER_KEY` - `FS_MASTER_KEY`
- `AUTH_BOOTSTRAP_ACCESS_KEY` - `FS_ROOT_USER`
- `AUTH_BOOTSTRAP_SECRET_KEY` - `FS_ROOT_PASSWORD`
- `AUTH_BOOTSTRAP_POLICY` (optional JSON) - `FS_ROOT_POLICY_JSON` (optional JSON)
Important: Important:
- If `AUTH_ENABLED=true`, `AUTH_MASTER_KEY` is required. - If `FS_AUTH_ENABLED=true`, `FS_MASTER_KEY` is required.
- `AUTH_MASTER_KEY` must be base64 that decodes to exactly 32 bytes (AES-256 key). - `FS_MASTER_KEY` must be base64 that decodes to exactly 32 bytes (AES-256 key).
## Persistence Model (bbolt) ## Persistence Model (bbolt)
Implemented in metadata layer: Implemented in metadata layer:
@@ -75,7 +75,7 @@ If bootstrap env key/secret are set:
- secret is encrypted with AES-GCM and stored - secret is encrypted with AES-GCM and stored
- policy is created: - policy is created:
- default: full access (`s3:*`, `bucket=*`, `prefix=*`) - default: full access (`s3:*`, `bucket=*`, `prefix=*`)
- or overridden by `AUTH_BOOTSTRAP_POLICY` - or overridden by `FS_ROOT_POLICY_JSON`
## Request Authentication Flow ## Request Authentication Flow
For each non-health request: For each non-health request:
@@ -87,8 +87,8 @@ For each non-health request:
- region must match config - region must match config
3. Validate time: 3. Validate time:
- `x-amz-date` format - `x-amz-date` format
- skew within `AUTH_SKEW_SECONDS` - skew within `FS_AUTH_CLOCK_SKEW_SECONDS`
- presigned expiry within `AUTH_MAX_PRESIGN_SECONDS` - presigned expiry within `FS_AUTH_MAX_PRESIGN_SECONDS`
4. Load identity by access key id. 4. Load identity by access key id.
5. Ensure identity status is active. 5. Ensure identity status is active.
6. Decrypt stored secret using master key. 6. Decrypt stored secret using master key.
@@ -133,18 +133,10 @@ Each audit entry includes method, path, remote IP, and request ID (if present).
- Secret keys are recoverable by server design (required for SigV4 verification). - Secret keys are recoverable by server design (required for SigV4 verification).
- They are encrypted at rest, not hashed. - They are encrypted at rest, not hashed.
- Master key rotation is not implemented yet. - Master key rotation is not implemented yet.
- Keep `AUTH_MASTER_KEY` protected (secret manager/systemd env file/etc.). - Keep `FS_MASTER_KEY` protected (secret manager/systemd env file/etc.).
## Current Scope / Limitations ## Current Scope / Limitations
- No STS/session-token auth yet. - No STS/session-token auth yet.
- No admin API for managing multiple users yet.
- Policy language is intentionally minimal, not full IAM. - Policy language is intentionally minimal, not full IAM.
- No automatic key rotation workflows. - No automatic key rotation workflows.
- No key rotation endpoint for existing users yet.
## Practical Next Step
To support multiple users cleanly, add admin operations in auth service + API:
- create user
- rotate secret
- set policy
- disable/enable
- delete user

View File

@@ -5,6 +5,9 @@ import "errors"
var ( var (
ErrAccessDenied = errors.New("access denied") ErrAccessDenied = errors.New("access denied")
ErrInvalidAccessKeyID = errors.New("invalid access key id") ErrInvalidAccessKeyID = errors.New("invalid access key id")
ErrUserAlreadyExists = errors.New("user already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidUserInput = errors.New("invalid user input")
ErrSignatureDoesNotMatch = errors.New("signature does not match") ErrSignatureDoesNotMatch = errors.New("signature does not match")
ErrAuthorizationHeaderMalformed = errors.New("authorization header malformed") ErrAuthorizationHeaderMalformed = errors.New("authorization header malformed")
ErrRequestTimeTooSkewed = errors.New("request time too skewed") ErrRequestTimeTooSkewed = errors.New("request time too skewed")

View File

@@ -1,6 +1,8 @@
package auth package auth
import ( import (
"errors"
"fs/metrics"
"log/slog" "log/slog"
"net" "net"
"net/http" "net/http"
@@ -18,17 +20,20 @@ func Middleware(
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
authCtx := RequestContext{Authenticated: false, AuthType: "none"} authCtx := RequestContext{Authenticated: false, AuthType: "none"}
if svc == nil || !svc.Config().Enabled { if svc == nil || !svc.Config().Enabled {
metrics.Default.ObserveAuth("bypass", "disabled", "auth_disabled")
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx))) next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx)))
return return
} }
if r.URL.Path == "/healthz" { if r.URL.Path == "/healthz" {
metrics.Default.ObserveAuth("bypass", "none", "public_endpoint")
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx))) next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx)))
return return
} }
resolvedCtx, err := svc.AuthenticateRequest(r) resolvedCtx, err := svc.AuthenticateRequest(r)
if err != nil { if err != nil {
metrics.Default.ObserveAuth("error", "sigv4", authErrorClass(err))
if auditEnabled && logger != nil { if auditEnabled && logger != nil {
requestID := middleware.GetReqID(r.Context()) requestID := middleware.GetReqID(r.Context())
attrs := []any{ attrs := []any{
@@ -50,6 +55,7 @@ func Middleware(
return return
} }
metrics.Default.ObserveAuth("ok", resolvedCtx.AuthType, "none")
if auditEnabled && logger != nil { if auditEnabled && logger != nil {
requestID := middleware.GetReqID(r.Context()) requestID := middleware.GetReqID(r.Context())
attrs := []any{ attrs := []any{
@@ -69,6 +75,33 @@ func Middleware(
} }
} }
func authErrorClass(err error) string {
switch {
case errors.Is(err, ErrInvalidAccessKeyID):
return "invalid_access_key"
case errors.Is(err, ErrSignatureDoesNotMatch):
return "signature_mismatch"
case errors.Is(err, ErrAuthorizationHeaderMalformed):
return "auth_header_malformed"
case errors.Is(err, ErrRequestTimeTooSkewed):
return "time_skew"
case errors.Is(err, ErrExpiredToken):
return "expired_token"
case errors.Is(err, ErrNoAuthCredentials):
return "missing_credentials"
case errors.Is(err, ErrUnsupportedAuthScheme):
return "unsupported_auth_scheme"
case errors.Is(err, ErrInvalidPresign):
return "invalid_presign"
case errors.Is(err, ErrCredentialDisabled):
return "credential_disabled"
case errors.Is(err, ErrAccessDenied):
return "access_denied"
default:
return "other"
}
}
func clientIP(remoteAddr string) string { func clientIP(remoteAddr string) string {
host, _, err := net.SplitHostPort(remoteAddr) host, _, err := net.SplitHostPort(remoteAddr)
if err == nil && host != "" { if err == nil && host != "" {

View File

@@ -1,11 +1,15 @@
package auth package auth
import ( import (
"crypto/rand"
"encoding/base64"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"fs/metadata"
"fs/models" "fs/models"
"net/http" "net/http"
"regexp"
"strings" "strings"
"time" "time"
) )
@@ -13,10 +17,42 @@ import (
type Store interface { type Store interface {
GetAuthIdentity(accessKeyID string) (*models.AuthIdentity, error) GetAuthIdentity(accessKeyID string) (*models.AuthIdentity, error)
PutAuthIdentity(identity *models.AuthIdentity) error PutAuthIdentity(identity *models.AuthIdentity) error
DeleteAuthIdentity(accessKeyID string) error
ListAuthIdentities(limit int, after string) ([]models.AuthIdentity, string, error)
GetAuthPolicy(accessKeyID string) (*models.AuthPolicy, error) GetAuthPolicy(accessKeyID string) (*models.AuthPolicy, error)
PutAuthPolicy(policy *models.AuthPolicy) error PutAuthPolicy(policy *models.AuthPolicy) error
DeleteAuthPolicy(accessKeyID string) error
} }
type CreateUserInput struct {
AccessKeyID string
SecretKey string
Status string
Policy models.AuthPolicy
}
type UserSummary struct {
AccessKeyID string
Status string
CreatedAt int64
UpdatedAt int64
}
type UserDetails struct {
AccessKeyID string
Status string
CreatedAt int64
UpdatedAt int64
Policy models.AuthPolicy
}
type CreateUserResult struct {
UserDetails
SecretKey string
}
var validAccessKeyID = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9._-]{2,127}$`)
type Service struct { type Service struct {
cfg Config cfg Config
store Store store Store
@@ -137,6 +173,19 @@ func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
return RequestContext{}, ErrSignatureDoesNotMatch return RequestContext{}, ErrSignatureDoesNotMatch
} }
authType := "sigv4-header"
if input.Presigned {
authType = "sigv4-presign"
}
if strings.HasPrefix(r.URL.Path, "/_admin/") {
return RequestContext{
Authenticated: true,
AccessKeyID: identity.AccessKeyID,
AuthType: authType,
}, nil
}
policy, err := s.store.GetAuthPolicy(identity.AccessKeyID) policy, err := s.store.GetAuthPolicy(identity.AccessKeyID)
if err != nil { if err != nil {
return RequestContext{}, ErrAccessDenied return RequestContext{}, ErrAccessDenied
@@ -149,10 +198,6 @@ func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
return RequestContext{}, ErrAccessDenied return RequestContext{}, ErrAccessDenied
} }
authType := "sigv4-header"
if input.Presigned {
authType = "sigv4-presign"
}
return RequestContext{ return RequestContext{
Authenticated: true, Authenticated: true,
AccessKeyID: identity.AccessKeyID, AccessKeyID: identity.AccessKeyID,
@@ -160,6 +205,261 @@ func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
}, nil }, nil
} }
func (s *Service) CreateUser(input CreateUserInput) (*CreateUserResult, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID := strings.TrimSpace(input.AccessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
secretKey := strings.TrimSpace(input.SecretKey)
if secretKey == "" {
generated, err := generateSecretKey(32)
if err != nil {
return nil, err
}
secretKey = generated
}
if len(secretKey) < 8 {
return nil, fmt.Errorf("%w: secret key must be at least 8 characters", ErrInvalidUserInput)
}
status := normalizeUserStatus(input.Status)
if status == "" {
return nil, fmt.Errorf("%w: status must be active or disabled", ErrInvalidUserInput)
}
policy, err := normalizePolicy(input.Policy, accessKeyID)
if err != nil {
return nil, err
}
existing, err := s.store.GetAuthIdentity(accessKeyID)
if err == nil && existing != nil {
return nil, ErrUserAlreadyExists
}
if err != nil && !errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, err
}
now := s.now().Unix()
ciphertext, nonce, err := encryptSecret(s.masterKey, accessKeyID, secretKey)
if err != nil {
return nil, err
}
identity := &models.AuthIdentity{
AccessKeyID: accessKeyID,
SecretEnc: ciphertext,
SecretNonce: nonce,
EncAlg: "AES-256-GCM",
KeyVersion: "v1",
Status: status,
CreatedAt: now,
UpdatedAt: now,
}
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
if err := s.store.PutAuthPolicy(&policy); err != nil {
return nil, err
}
return &CreateUserResult{
UserDetails: UserDetails{
AccessKeyID: accessKeyID,
Status: status,
CreatedAt: now,
UpdatedAt: now,
Policy: policy,
},
SecretKey: secretKey,
}, nil
}
func (s *Service) ListUsers(limit int, cursor string) ([]UserSummary, string, error) {
if !s.cfg.Enabled {
return nil, "", ErrAuthNotEnabled
}
if limit <= 0 {
limit = 100
}
if limit > 1000 {
limit = 1000
}
identities, nextCursor, err := s.store.ListAuthIdentities(limit, cursor)
if err != nil {
return nil, "", err
}
users := make([]UserSummary, 0, len(identities))
for _, identity := range identities {
users = append(users, UserSummary{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
})
}
return users, nextCursor, nil
}
func (s *Service) GetUser(accessKeyID string) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return nil, fmt.Errorf("%w: access key id is required", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: *policy,
}, nil
}
func (s *Service) DeleteUser(accessKeyID string) error {
if !s.cfg.Enabled {
return ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
bootstrap := strings.TrimSpace(s.cfg.BootstrapAccessKey)
if bootstrap != "" && accessKeyID == bootstrap {
return fmt.Errorf("%w: bootstrap user cannot be deleted", ErrInvalidUserInput)
}
if _, err := s.store.GetAuthIdentity(accessKeyID); err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return ErrUserNotFound
}
return err
}
if err := s.store.DeleteAuthIdentity(accessKeyID); err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return ErrUserNotFound
}
return err
}
if err := s.store.DeleteAuthPolicy(accessKeyID); err != nil && !errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return err
}
return nil
}
func (s *Service) SetUserPolicy(accessKeyID string, policy models.AuthPolicy) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
normalizedPolicy, err := normalizePolicy(policy, accessKeyID)
if err != nil {
return nil, err
}
if err := s.store.PutAuthPolicy(&normalizedPolicy); err != nil {
return nil, err
}
identity.UpdatedAt = s.now().Unix()
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: normalizedPolicy,
}, nil
}
func (s *Service) SetUserStatus(accessKeyID, status string) (*UserDetails, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled
}
accessKeyID = strings.TrimSpace(accessKeyID)
if !validAccessKeyID.MatchString(accessKeyID) {
return nil, fmt.Errorf("%w: invalid access key id", ErrInvalidUserInput)
}
status = strings.TrimSpace(status)
if status == "" {
return nil, fmt.Errorf("%w: status is required", ErrInvalidUserInput)
}
normalizedStatus := normalizeUserStatus(status)
if normalizedStatus == "" {
return nil, fmt.Errorf("%w: status must be active or disabled", ErrInvalidUserInput)
}
identity, err := s.store.GetAuthIdentity(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthIdentityNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
identity.Status = normalizedStatus
identity.UpdatedAt = s.now().Unix()
if err := s.store.PutAuthIdentity(identity); err != nil {
return nil, err
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
if errors.Is(err, metadata.ErrAuthPolicyNotFound) {
return nil, ErrUserNotFound
}
return nil, err
}
return &UserDetails{
AccessKeyID: identity.AccessKeyID,
Status: normalizeUserStatus(identity.Status),
CreatedAt: identity.CreatedAt,
UpdatedAt: identity.UpdatedAt,
Policy: *policy,
}, nil
}
func parsePolicyJSON(raw string) (*models.AuthPolicy, error) { func parsePolicyJSON(raw string) (*models.AuthPolicy, error) {
policy := models.AuthPolicy{} policy := models.AuthPolicy{}
if err := json.Unmarshal([]byte(raw), &policy); err != nil { if err := json.Unmarshal([]byte(raw), &policy); err != nil {
@@ -184,3 +484,71 @@ func defaultBootstrapPolicy(principal string) *models.AuthPolicy {
}, },
} }
} }
func generateSecretKey(length int) (string, error) {
if length <= 0 {
length = 32
}
buf := make([]byte, length)
if _, err := rand.Read(buf); err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(buf), nil
}
func normalizeUserStatus(raw string) string {
status := strings.ToLower(strings.TrimSpace(raw))
if status == "" {
return "active"
}
if status != "active" && status != "disabled" {
return ""
}
return status
}
func normalizePolicy(policy models.AuthPolicy, principal string) (models.AuthPolicy, error) {
if len(policy.Statements) == 0 {
return models.AuthPolicy{}, fmt.Errorf("%w: at least one policy statement is required", ErrInvalidUserInput)
}
out := models.AuthPolicy{
Principal: principal,
Statements: make([]models.AuthPolicyStatement, 0, len(policy.Statements)),
}
for _, stmt := range policy.Statements {
effect := strings.ToLower(strings.TrimSpace(stmt.Effect))
if effect != "allow" && effect != "deny" {
return models.AuthPolicy{}, fmt.Errorf("%w: invalid policy effect %q", ErrInvalidUserInput, stmt.Effect)
}
actions := make([]string, 0, len(stmt.Actions))
for _, action := range stmt.Actions {
action = strings.TrimSpace(action)
if action == "" {
continue
}
actions = append(actions, action)
}
if len(actions) == 0 {
return models.AuthPolicy{}, fmt.Errorf("%w: policy statement must include at least one action", ErrInvalidUserInput)
}
bucket := strings.TrimSpace(stmt.Bucket)
if bucket == "" {
bucket = "*"
}
prefix := strings.TrimSpace(stmt.Prefix)
if prefix == "" {
prefix = "*"
}
out.Statements = append(out.Statements, models.AuthPolicyStatement{
Effect: effect,
Actions: actions,
Bucket: bucket,
Prefix: prefix,
})
}
return out, nil
}

223
auth/service_admin_test.go Normal file
View File

@@ -0,0 +1,223 @@
package auth
import (
"encoding/base64"
"errors"
"fs/metadata"
"fs/models"
"path/filepath"
"testing"
)
func TestAdminCreateListGetUser(t *testing.T) {
meta, svc := newTestAuthService(t)
created, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "backup-user",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{
Effect: "allow",
Actions: []string{"s3:GetObject"},
Bucket: "backup-bucket",
Prefix: "restic/",
},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
if created.SecretKey == "" {
t.Fatalf("CreateUser should return generated secret")
}
if created.AccessKeyID != "backup-user" {
t.Fatalf("CreateUser access key mismatch: got %q", created.AccessKeyID)
}
if created.Policy.Principal != "backup-user" {
t.Fatalf("policy principal mismatch: got %q", created.Policy.Principal)
}
users, nextCursor, err := svc.ListUsers(100, "")
if err != nil {
t.Fatalf("ListUsers returned error: %v", err)
}
if nextCursor != "" {
t.Fatalf("unexpected next cursor: %q", nextCursor)
}
if len(users) != 1 {
t.Fatalf("ListUsers returned %d users, want 1", len(users))
}
if users[0].AccessKeyID != "backup-user" {
t.Fatalf("ListUsers returned wrong user: %q", users[0].AccessKeyID)
}
got, err := svc.GetUser("backup-user")
if err != nil {
t.Fatalf("GetUser returned error: %v", err)
}
if got.AccessKeyID != "backup-user" {
t.Fatalf("GetUser access key mismatch: got %q", got.AccessKeyID)
}
if got.Policy.Principal != "backup-user" {
t.Fatalf("GetUser policy principal mismatch: got %q", got.Policy.Principal)
}
if len(got.Policy.Statements) != 1 {
t.Fatalf("GetUser policy statement count = %d, want 1", len(got.Policy.Statements))
}
_ = meta
}
func TestCreateUserDuplicateFails(t *testing.T) {
_, svc := newTestAuthService(t)
input := CreateUserInput{
AccessKeyID: "duplicate-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
}
if _, err := svc.CreateUser(input); err != nil {
t.Fatalf("first CreateUser returned error: %v", err)
}
if _, err := svc.CreateUser(input); !errors.Is(err, ErrUserAlreadyExists) {
t.Fatalf("second CreateUser error = %v, want ErrUserAlreadyExists", err)
}
}
func TestCreateUserRejectsInvalidAccessKey(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "x",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if !errors.Is(err, ErrInvalidUserInput) {
t.Fatalf("CreateUser error = %v, want ErrInvalidUserInput", err)
}
}
func TestDeleteUser(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "delete-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
if err := svc.DeleteUser("delete-user"); err != nil {
t.Fatalf("DeleteUser returned error: %v", err)
}
if _, err := svc.GetUser("delete-user"); !errors.Is(err, ErrUserNotFound) {
t.Fatalf("GetUser after delete error = %v, want ErrUserNotFound", err)
}
}
func TestDeleteBootstrapUserRejected(t *testing.T) {
_, svc := newTestAuthService(t)
if err := svc.DeleteUser("root-user"); !errors.Is(err, ErrInvalidUserInput) {
t.Fatalf("DeleteUser bootstrap error = %v, want ErrInvalidUserInput", err)
}
}
func TestSetUserPolicy(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "policy-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:GetObject"}, Bucket: "b1", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
updated, err := svc.SetUserPolicy("policy-user", models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:PutObject"}, Bucket: "b2", Prefix: "p/"},
},
})
if err != nil {
t.Fatalf("SetUserPolicy returned error: %v", err)
}
if len(updated.Policy.Statements) != 1 || updated.Policy.Statements[0].Actions[0] != "s3:PutObject" {
t.Fatalf("SetUserPolicy did not apply new policy: %+v", updated.Policy)
}
}
func TestSetUserStatus(t *testing.T) {
_, svc := newTestAuthService(t)
_, err := svc.CreateUser(CreateUserInput{
AccessKeyID: "status-user",
SecretKey: "super-secret-1",
Policy: models.AuthPolicy{
Statements: []models.AuthPolicyStatement{
{Effect: "allow", Actions: []string{"s3:*"}, Bucket: "*", Prefix: "*"},
},
},
})
if err != nil {
t.Fatalf("CreateUser returned error: %v", err)
}
updated, err := svc.SetUserStatus("status-user", "disabled")
if err != nil {
t.Fatalf("SetUserStatus returned error: %v", err)
}
if updated.Status != "disabled" {
t.Fatalf("SetUserStatus status = %q, want disabled", updated.Status)
}
}
func newTestAuthService(t *testing.T) (*metadata.MetadataHandler, *Service) {
t.Helper()
dbPath := filepath.Join(t.TempDir(), "metadata.db")
meta, err := metadata.NewMetadataHandler(dbPath)
if err != nil {
t.Fatalf("NewMetadataHandler returned error: %v", err)
}
t.Cleanup(func() {
_ = meta.Close()
})
masterKey := base64.StdEncoding.EncodeToString(make([]byte, 32))
cfg := ConfigFromValues(
true,
"us-east-1",
0,
0,
masterKey,
"root-user",
"root-secret-123",
"",
)
svc, err := NewService(cfg, meta)
if err != nil {
t.Fatalf("NewService returned error: %v", err)
}
return meta, svc
}

View File

@@ -259,7 +259,57 @@ func canonicalPath(u *url.URL) string {
if path == "" { if path == "" {
return "/" return "/"
} }
return path return awsEncodePath(path)
}
func awsEncodePath(path string) string {
var b strings.Builder
b.Grow(len(path))
for i := 0; i < len(path); i++ {
ch := path[i]
if ch == '/' || isUnreserved(ch) {
b.WriteByte(ch)
continue
}
if ch == '%' && i+2 < len(path) && isHex(path[i+1]) && isHex(path[i+2]) {
b.WriteByte('%')
b.WriteByte(toUpperHex(path[i+1]))
b.WriteByte(toUpperHex(path[i+2]))
i += 2
continue
}
b.WriteByte('%')
b.WriteByte(hexUpper(ch >> 4))
b.WriteByte(hexUpper(ch & 0x0F))
}
return b.String()
}
func isUnreserved(ch byte) bool {
return (ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '_' || ch == '.' || ch == '~'
}
func isHex(ch byte) bool {
return (ch >= '0' && ch <= '9') ||
(ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F')
}
func toUpperHex(ch byte) byte {
if ch >= 'a' && ch <= 'f' {
return ch - ('a' - 'A')
}
return ch
}
func hexUpper(nibble byte) byte {
if nibble < 10 {
return '0' + nibble
}
return 'A' + (nibble - 10)
} }
type queryPair struct { type queryPair struct {

123
cmd/admin.go Normal file
View File

@@ -0,0 +1,123 @@
package cmd
import (
"errors"
"fmt"
"fs/utils"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
)
const (
defaultAdminEndpoint = "http://localhost:2600"
defaultAdminRegion = "us-east-1"
)
type adminOptions struct {
Endpoint string
Region string
AccessKey string
SecretKey string
JSON bool
Timeout time.Duration
}
func newAdminCommand(build BuildInfo) *cobra.Command {
opts := &adminOptions{}
cmd := &cobra.Command{
Use: "admin",
Short: "Admin operations over the fs admin API",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.PersistentFlags().StringVar(&opts.Endpoint, "endpoint", "", "Admin API endpoint (env: FSCLI_ENDPOINT, fallback ADDRESS+PORT)")
cmd.PersistentFlags().StringVar(&opts.Region, "region", "", "SigV4 region (env: FSCLI_REGION or FS_AUTH_REGION)")
cmd.PersistentFlags().StringVar(&opts.AccessKey, "access-key", "", "Admin access key (env: FS_ROOT_USER, FSCLI_ACCESS_KEY)")
cmd.PersistentFlags().StringVar(&opts.SecretKey, "secret-key", "", "Admin secret key (env: FS_ROOT_PASSWORD, FSCLI_SECRET_KEY)")
cmd.PersistentFlags().BoolVar(&opts.JSON, "json", false, "Emit JSON output")
cmd.PersistentFlags().DurationVar(&opts.Timeout, "timeout", 15*time.Second, "HTTP timeout")
cmd.AddCommand(newAdminUserCommand(opts))
cmd.AddCommand(newAdminDiagCommand(opts, build))
cmd.AddCommand(newAdminSnapshotCommand(opts))
return cmd
}
func (o *adminOptions) resolve(requireCredentials bool) error {
serverCfg := utils.NewConfig()
o.Endpoint = strings.TrimSpace(firstNonEmpty(
o.Endpoint,
os.Getenv("FSCLI_ENDPOINT"),
endpointFromServerConfig(serverCfg.Address, serverCfg.Port),
defaultAdminEndpoint,
))
o.Region = strings.TrimSpace(firstNonEmpty(
o.Region,
os.Getenv("FSCLI_REGION"),
os.Getenv("FS_AUTH_REGION"),
serverCfg.AuthRegion,
defaultAdminRegion,
))
o.AccessKey = strings.TrimSpace(firstNonEmpty(
o.AccessKey,
os.Getenv("FS_ROOT_USER"),
os.Getenv("FSCLI_ACCESS_KEY"),
os.Getenv("AWS_ACCESS_KEY_ID"),
serverCfg.AuthBootstrapAccessKey,
))
o.SecretKey = strings.TrimSpace(firstNonEmpty(
o.SecretKey,
os.Getenv("FS_ROOT_PASSWORD"),
os.Getenv("FSCLI_SECRET_KEY"),
os.Getenv("AWS_SECRET_ACCESS_KEY"),
serverCfg.AuthBootstrapSecretKey,
))
if o.Timeout <= 0 {
o.Timeout = 15 * time.Second
}
if o.Endpoint == "" {
return errors.New("admin endpoint is required")
}
parsed, err := url.Parse(o.Endpoint)
if err != nil || parsed.Scheme == "" || parsed.Host == "" {
return fmt.Errorf("invalid endpoint %q", o.Endpoint)
}
if o.Region == "" {
return errors.New("region is required")
}
if requireCredentials && (o.AccessKey == "" || o.SecretKey == "") {
return errors.New("credentials required: set --access-key/--secret-key or FSCLI_ACCESS_KEY/FSCLI_SECRET_KEY")
}
return nil
}
func endpointFromServerConfig(address string, port int) string {
host := strings.TrimSpace(address)
if host == "" || host == "0.0.0.0" || host == "::" || host == "[::]" {
host = "localhost"
}
if port <= 0 || port > 65535 {
port = 2600
}
return "http://" + net.JoinHostPort(host, strconv.Itoa(port))
}
func firstNonEmpty(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
return strings.TrimSpace(v)
}
}
return ""
}

281
cmd/admin_client.go Normal file
View File

@@ -0,0 +1,281 @@
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
)
type adminUserListItem struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
}
type adminUserListResponse struct {
Items []adminUserListItem `json:"items"`
NextCursor string `json:"nextCursor,omitempty"`
}
type adminPolicyStatement struct {
Effect string `json:"effect"`
Actions []string `json:"actions"`
Bucket string `json:"bucket"`
Prefix string `json:"prefix"`
}
type adminPolicy struct {
Principal string `json:"principal,omitempty"`
Statements []adminPolicyStatement `json:"statements"`
}
type adminUserResponse struct {
AccessKeyID string `json:"accessKeyId"`
Status string `json:"status"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
Policy *adminPolicy `json:"policy,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
}
type createUserRequest struct {
AccessKeyID string `json:"accessKeyId"`
SecretKey string `json:"secretKey,omitempty"`
Status string `json:"status,omitempty"`
Policy adminPolicy `json:"policy"`
}
type setStatusRequest struct {
Status string `json:"status"`
}
type setPolicyRequest struct {
Policy adminPolicy `json:"policy"`
}
type adminErrorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
RequestID string `json:"requestId,omitempty"`
}
type adminAPIError struct {
StatusCode int
Code string
Message string
RequestID string
}
func (e *adminAPIError) Error() string {
if e == nil {
return ""
}
if e.Code == "" {
return fmt.Sprintf("admin API request failed: status=%d", e.StatusCode)
}
if e.RequestID == "" {
return fmt.Sprintf("%s: %s", e.Code, e.Message)
}
return fmt.Sprintf("%s: %s (requestId=%s)", e.Code, e.Message, e.RequestID)
}
type adminAPIClient struct {
baseURL *url.URL
region string
accessKey string
secretKey string
client *http.Client
}
func newAdminAPIClient(opts *adminOptions, requireCredentials bool) (*adminAPIClient, error) {
if opts == nil {
return nil, errors.New("admin options are required")
}
if err := opts.resolve(requireCredentials); err != nil {
return nil, err
}
baseURL, err := url.Parse(opts.Endpoint)
if err != nil {
return nil, err
}
return &adminAPIClient{
baseURL: baseURL,
region: opts.Region,
accessKey: opts.AccessKey,
secretKey: opts.SecretKey,
client: &http.Client{
Timeout: opts.Timeout,
},
}, nil
}
func (c *adminAPIClient) CreateUser(ctx context.Context, request createUserRequest) (*adminUserResponse, error) {
var out adminUserResponse
if err := c.doJSON(ctx, http.MethodPost, "/_admin/v1/users", nil, request, &out, http.StatusCreated); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) ListUsers(ctx context.Context, limit int, cursor string) (*adminUserListResponse, error) {
query := make(url.Values)
if limit > 0 {
query.Set("limit", strconv.Itoa(limit))
}
if strings.TrimSpace(cursor) != "" {
query.Set("cursor", strings.TrimSpace(cursor))
}
var out adminUserListResponse
if err := c.doJSON(ctx, http.MethodGet, "/_admin/v1/users", query, nil, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) GetUser(ctx context.Context, accessKeyID string) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
if err := c.doJSON(ctx, http.MethodGet, path, nil, nil, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) DeleteUser(ctx context.Context, accessKeyID string) error {
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
return c.doJSON(ctx, http.MethodDelete, path, nil, nil, nil, http.StatusNoContent)
}
func (c *adminAPIClient) SetUserStatus(ctx context.Context, accessKeyID, status string) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/status"
if err := c.doJSON(ctx, http.MethodPut, path, nil, setStatusRequest{Status: status}, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) SetUserPolicy(ctx context.Context, accessKeyID string, policy adminPolicy) (*adminUserResponse, error) {
var out adminUserResponse
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/policy"
if err := c.doJSON(ctx, http.MethodPut, path, nil, setPolicyRequest{Policy: policy}, &out, http.StatusOK); err != nil {
return nil, err
}
return &out, nil
}
func (c *adminAPIClient) Health(ctx context.Context) (string, error) {
req, err := c.newRequest(ctx, http.MethodGet, "/healthz", nil, nil)
if err != nil {
return "", err
}
resp, err := c.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<10))
text := strings.TrimSpace(string(body))
if resp.StatusCode != http.StatusOK {
if text == "" {
text = http.StatusText(resp.StatusCode)
}
return text, fmt.Errorf("health check failed: status=%d", resp.StatusCode)
}
if text == "" {
text = "ok"
}
return text, nil
}
func (c *adminAPIClient) doJSON(
ctx context.Context,
method string,
path string,
query url.Values,
body any,
out any,
expectedStatus int,
) error {
var payload []byte
var err error
if body != nil {
payload, err = json.Marshal(body)
if err != nil {
return err
}
}
req, err := c.newRequest(ctx, method, path, query, payload)
if err != nil {
return err
}
if len(payload) > 0 {
req.Header.Set("Content-Type", "application/json")
}
req.Header.Set("Accept", "application/json")
if err := signSigV4Request(req, payload, c.accessKey, c.secretKey, c.region, "s3"); err != nil {
return err
}
resp, err := c.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
raw, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20))
if readErr != nil {
return readErr
}
if resp.StatusCode != expectedStatus {
apiErr := &adminAPIError{StatusCode: resp.StatusCode}
parsed := adminErrorResponse{}
if len(raw) > 0 && json.Unmarshal(raw, &parsed) == nil {
apiErr.Code = parsed.Code
apiErr.Message = parsed.Message
apiErr.RequestID = parsed.RequestID
}
if apiErr.Message == "" {
apiErr.Message = strings.TrimSpace(string(raw))
}
return apiErr
}
if out == nil || len(raw) == 0 {
return nil
}
return json.NewDecoder(bytes.NewReader(raw)).Decode(out)
}
func (c *adminAPIClient) newRequest(
ctx context.Context,
method string,
path string,
query url.Values,
payload []byte,
) (*http.Request, error) {
u := *c.baseURL
u.Path = strings.TrimRight(c.baseURL.Path, "/") + path
u.RawQuery = ""
if len(query) > 0 {
u.RawQuery = query.Encode()
}
req, err := http.NewRequestWithContext(ctx, method, u.String(), bytes.NewReader(payload))
if err != nil {
return nil, err
}
return req, nil
}

68
cmd/admin_diag.go Normal file
View File

@@ -0,0 +1,68 @@
package cmd
import (
"context"
"fmt"
"runtime"
"github.com/spf13/cobra"
)
func newAdminDiagCommand(opts *adminOptions, build BuildInfo) *cobra.Command {
cmd := &cobra.Command{
Use: "diag",
Short: "Diagnostics and connectivity checks",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminDiagHealthCommand(opts))
cmd.AddCommand(newAdminDiagVersionCommand(build, opts))
return cmd
}
func newAdminDiagHealthCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "health",
Short: "Check server health endpoint",
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, false)
if err != nil {
return err
}
status, err := client.Health(context.Background())
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]string{
"status": status,
})
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "health: %s\n", status)
return err
},
}
return cmd
}
func newAdminDiagVersionCommand(build BuildInfo, opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Print CLI version metadata",
RunE: func(cmd *cobra.Command, args []string) error {
out := map[string]string{
"version": build.Version,
"commit": build.Commit,
"date": build.Date,
"go": runtime.Version(),
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", out["version"], out["commit"], out["date"], out["go"])
return err
},
}
return cmd
}

80
cmd/admin_output.go Normal file
View File

@@ -0,0 +1,80 @@
package cmd
import (
"encoding/json"
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
)
func writeJSON(out io.Writer, value any) error {
encoder := json.NewEncoder(out)
encoder.SetIndent("", " ")
return encoder.Encode(value)
}
func formatUnix(ts int64) string {
if ts <= 0 {
return "-"
}
return time.Unix(ts, 0).UTC().Format(time.RFC3339)
}
func writeUserListTable(out io.Writer, value *adminUserListResponse) error {
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
if _, err := fmt.Fprintln(w, "ACCESS_KEY_ID\tSTATUS\tCREATED_AT\tUPDATED_AT"); err != nil {
return err
}
for _, item := range value.Items {
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", item.AccessKeyID, item.Status, formatUnix(item.CreatedAt), formatUnix(item.UpdatedAt)); err != nil {
return err
}
}
if strings.TrimSpace(value.NextCursor) != "" {
if _, err := fmt.Fprintf(w, "\nNEXT_CURSOR\t%s\t\t\n", value.NextCursor); err != nil {
return err
}
}
return w.Flush()
}
func writeUserTable(out io.Writer, value *adminUserResponse, includeSecret bool) error {
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
if _, err := fmt.Fprintf(w, "accessKeyId\t%s\n", value.AccessKeyID); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "status\t%s\n", value.Status); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "createdAt\t%s\n", formatUnix(value.CreatedAt)); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "updatedAt\t%s\n", formatUnix(value.UpdatedAt)); err != nil {
return err
}
if includeSecret && strings.TrimSpace(value.SecretKey) != "" {
if _, err := fmt.Fprintf(w, "secretKey\t%s\n", value.SecretKey); err != nil {
return err
}
}
if value.Policy != nil {
for i, stmt := range value.Policy.Statements {
idx := i + 1
if _, err := fmt.Fprintf(w, "policy[%d].effect\t%s\n", idx, stmt.Effect); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].actions\t%s\n", idx, strings.Join(stmt.Actions, ",")); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].bucket\t%s\n", idx, stmt.Bucket); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "policy[%d].prefix\t%s\n", idx, stmt.Prefix); err != nil {
return err
}
}
}
return w.Flush()
}

47
cmd/admin_policy.go Normal file
View File

@@ -0,0 +1,47 @@
package cmd
import (
"fmt"
"strings"
)
type rolePolicyOptions struct {
Role string
Bucket string
Prefix string
}
func buildPolicyFromRole(opts rolePolicyOptions) (adminPolicy, error) {
role := strings.ToLower(strings.TrimSpace(opts.Role))
bucket := strings.TrimSpace(opts.Bucket)
prefix := strings.TrimSpace(opts.Prefix)
if bucket == "" {
bucket = "*"
}
if prefix == "" {
prefix = "*"
}
var actions []string
switch role {
case "admin":
actions = []string{"s3:*"}
case "readwrite":
actions = []string{"s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"}
case "readonly":
actions = []string{"s3:ListBucket", "s3:GetObject"}
default:
return adminPolicy{}, fmt.Errorf("invalid role %q (allowed: admin, readwrite, readonly)", opts.Role)
}
return adminPolicy{
Statements: []adminPolicyStatement{
{
Effect: "allow",
Actions: actions,
Bucket: bucket,
Prefix: prefix,
},
},
}, nil
}

166
cmd/admin_sigv4.go Normal file
View File

@@ -0,0 +1,166 @@
package cmd
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
const sigV4Algorithm = "AWS4-HMAC-SHA256"
func signSigV4Request(req *http.Request, body []byte, accessKey, secretKey, region, service string) error {
if req == nil {
return fmt.Errorf("nil request")
}
if strings.TrimSpace(accessKey) == "" || strings.TrimSpace(secretKey) == "" {
return fmt.Errorf("missing signing credentials")
}
if strings.TrimSpace(region) == "" || strings.TrimSpace(service) == "" {
return fmt.Errorf("missing signing scope")
}
now := time.Now().UTC()
amzDate := now.Format("20060102T150405Z")
shortDate := now.Format("20060102")
scope := shortDate + "/" + region + "/" + service + "/aws4_request"
payloadHash := sha256Hex(body)
req.Header.Set("x-amz-date", amzDate)
req.Header.Set("x-amz-content-sha256", payloadHash)
host := req.URL.Host
signedHeaders := []string{"host", "x-amz-content-sha256", "x-amz-date"}
canonicalRequest, signedHeadersRaw := buildCanonicalRequest(req, signedHeaders, payloadHash)
stringToSign := buildStringToSign(amzDate, scope, canonicalRequest)
signature := hex.EncodeToString(hmacSHA256(deriveSigningKey(secretKey, shortDate, region, service), stringToSign))
authHeader := fmt.Sprintf(
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
sigV4Algorithm,
accessKey,
scope,
signedHeadersRaw,
signature,
)
req.Header.Set("Authorization", authHeader)
req.Host = host
return nil
}
func buildCanonicalRequest(req *http.Request, signedHeaders []string, payloadHash string) (string, string) {
canonicalHeaders, signedHeadersRaw := canonicalHeaders(req, signedHeaders)
return strings.Join([]string{
req.Method,
canonicalPath(req.URL),
canonicalQuery(req.URL),
canonicalHeaders,
signedHeadersRaw,
payloadHash,
}, "\n"), signedHeadersRaw
}
func canonicalPath(u *url.URL) string {
if u == nil {
return "/"
}
path := u.EscapedPath()
if path == "" {
return "/"
}
return path
}
func canonicalQuery(u *url.URL) string {
if u == nil {
return ""
}
values := u.Query()
type pair struct {
key string
value string
}
pairs := make([]pair, 0, len(values))
for key, vals := range values {
if len(vals) == 0 {
pairs = append(pairs, pair{key: key, value: ""})
continue
}
for _, v := range vals {
pairs = append(pairs, pair{key: key, value: v})
}
}
sort.Slice(pairs, func(i, j int) bool {
if pairs[i].key == pairs[j].key {
return pairs[i].value < pairs[j].value
}
return pairs[i].key < pairs[j].key
})
out := make([]string, 0, len(pairs))
for _, p := range pairs {
out = append(out, awsEncodeQuery(p.key)+"="+awsEncodeQuery(p.value))
}
return strings.Join(out, "&")
}
func awsEncodeQuery(value string) string {
encoded := url.QueryEscape(value)
encoded = strings.ReplaceAll(encoded, "+", "%20")
encoded = strings.ReplaceAll(encoded, "*", "%2A")
encoded = strings.ReplaceAll(encoded, "%7E", "~")
return encoded
}
func canonicalHeaders(req *http.Request, headers []string) (string, string) {
names := make([]string, 0, len(headers))
lines := make([]string, 0, len(headers))
for _, h := range headers {
name := strings.ToLower(strings.TrimSpace(h))
if name == "" {
continue
}
var value string
if name == "host" {
value = req.URL.Host
} else {
value = strings.Join(req.Header.Values(http.CanonicalHeaderKey(name)), ",")
}
value = strings.Join(strings.Fields(strings.TrimSpace(value)), " ")
names = append(names, name)
lines = append(lines, name+":"+value)
}
return strings.Join(lines, "\n") + "\n", strings.Join(names, ";")
}
func buildStringToSign(amzDate, scope, canonicalRequest string) string {
hash := sha256.Sum256([]byte(canonicalRequest))
return strings.Join([]string{
sigV4Algorithm,
amzDate,
scope,
hex.EncodeToString(hash[:]),
}, "\n")
}
func deriveSigningKey(secret, date, region, service string) []byte {
kDate := hmacSHA256([]byte("AWS4"+secret), date)
kRegion := hmacSHA256(kDate, region)
kService := hmacSHA256(kRegion, service)
return hmacSHA256(kService, "aws4_request")
}
func hmacSHA256(key []byte, message string) []byte {
mac := hmac.New(sha256.New, key)
_, _ = mac.Write([]byte(message))
return mac.Sum(nil)
}
func sha256Hex(payload []byte) string {
sum := sha256.Sum256(payload)
return hex.EncodeToString(sum[:])
}

728
cmd/admin_snapshot.go Normal file
View File

@@ -0,0 +1,728 @@
package cmd
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
bolt "go.etcd.io/bbolt"
"github.com/spf13/cobra"
)
const (
snapshotManifestPath = ".fs-snapshot/manifest.json"
snapshotFormat = 1
)
type snapshotFileEntry struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
}
type snapshotManifest struct {
FormatVersion int `json:"formatVersion"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
Files []snapshotFileEntry `json:"files"`
}
type snapshotSummary struct {
SnapshotFile string `json:"snapshotFile"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
FileCount int `json:"fileCount"`
TotalBytes int64 `json:"totalBytes"`
}
func newAdminSnapshotCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "snapshot",
Short: "Offline snapshot and restore utilities",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminSnapshotCreateCommand(opts))
cmd.AddCommand(newAdminSnapshotInspectCommand(opts))
cmd.AddCommand(newAdminSnapshotRestoreCommand(opts))
return cmd
}
func newAdminSnapshotCreateCommand(opts *adminOptions) *cobra.Command {
var dataPath string
var outFile string
cmd := &cobra.Command{
Use: "create",
Short: "Create offline snapshot tarball (.tar.gz)",
RunE: func(cmd *cobra.Command, args []string) error {
dataPath = strings.TrimSpace(dataPath)
outFile = strings.TrimSpace(outFile)
if dataPath == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--data-path is required")
}
if outFile == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--out is required")
}
result, err := createSnapshotArchive(context.Background(), dataPath, outFile)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "snapshot created: %s (files=%d bytes=%d)\n", result.SnapshotFile, result.FileCount, result.TotalBytes)
return err
},
}
cmd.Flags().StringVar(&dataPath, "data-path", "", "Source data path (must contain metadata.db)")
cmd.Flags().StringVar(&outFile, "out", "", "Output snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotInspectCommand(opts *adminOptions) *cobra.Command {
var filePath string
cmd := &cobra.Command{
Use: "inspect",
Short: "Inspect and verify snapshot archive integrity",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return usageError("fs admin snapshot inspect --file <snapshot.tar.gz>", "--file is required")
}
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]any{
"summary": summary,
"manifest": manifest,
})
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot ok: %s\ncreated_at=%s source=%s files=%d bytes=%d\n",
summary.SnapshotFile,
summary.CreatedAt,
summary.SourcePath,
summary.FileCount,
summary.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotRestoreCommand(opts *adminOptions) *cobra.Command {
var filePath string
var dataPath string
var force bool
cmd := &cobra.Command{
Use: "restore",
Short: "Restore snapshot into a data path (offline only)",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
dataPath = strings.TrimSpace(dataPath)
if filePath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--file is required")
}
if dataPath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--data-path is required")
}
result, err := restoreSnapshotArchive(context.Background(), filePath, dataPath, force)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot restored to %s (files=%d bytes=%d)\n",
result.SourcePath,
result.FileCount,
result.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
cmd.Flags().StringVar(&dataPath, "data-path", "", "Destination data path")
cmd.Flags().BoolVar(&force, "force", false, "Overwrite destination data path if it exists")
return cmd
}
func createSnapshotArchive(ctx context.Context, dataPath, outFile string) (*snapshotSummary, error) {
_ = ctx
sourceAbs, err := filepath.Abs(filepath.Clean(dataPath))
if err != nil {
return nil, err
}
outAbs, err := filepath.Abs(filepath.Clean(outFile))
if err != nil {
return nil, err
}
if isPathWithin(sourceAbs, outAbs) {
return nil, errors.New("output file cannot be inside --data-path")
}
info, err := os.Stat(sourceAbs)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("data path %q is not a directory", sourceAbs)
}
if err := ensureMetadataExists(sourceAbs); err != nil {
return nil, err
}
if err := ensureDataPathOffline(sourceAbs); err != nil {
return nil, err
}
manifest, totalBytes, err := buildSnapshotManifest(sourceAbs)
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(outAbs), 0o755); err != nil {
return nil, err
}
tmpPath := outAbs + ".tmp-" + strconvNowNano()
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o600)
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
}()
gzw := gzip.NewWriter(file)
tw := tar.NewWriter(gzw)
if err := writeManifestToTar(tw, manifest); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
for _, entry := range manifest.Files {
absPath := filepath.Join(sourceAbs, filepath.FromSlash(entry.Path))
if err := writeFileToTar(tw, absPath, entry.Path); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
}
if err := tw.Close(); err != nil {
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
if err := gzw.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Sync(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := os.Rename(tmpPath, outAbs); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := syncDir(filepath.Dir(outAbs)); err != nil {
return nil, err
}
return &snapshotSummary{
SnapshotFile: outAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: sourceAbs,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func inspectSnapshotArchive(filePath string) (*snapshotManifest, *snapshotSummary, error) {
fileAbs, err := filepath.Abs(filepath.Clean(filePath))
if err != nil {
return nil, nil, err
}
manifest, actual, err := readSnapshotArchive(fileAbs)
if err != nil {
return nil, nil, err
}
expected := map[string]snapshotFileEntry{}
var totalBytes int64
for _, entry := range manifest.Files {
expected[entry.Path] = entry
totalBytes += entry.Size
}
if len(expected) != len(actual) {
return nil, nil, fmt.Errorf("snapshot validation failed: expected %d files, got %d", len(expected), len(actual))
}
for path, exp := range expected {
got, ok := actual[path]
if !ok {
return nil, nil, fmt.Errorf("snapshot validation failed: missing file %s", path)
}
if got.Size != exp.Size || got.SHA256 != exp.SHA256 {
return nil, nil, fmt.Errorf("snapshot validation failed: checksum mismatch for %s", path)
}
}
return manifest, &snapshotSummary{
SnapshotFile: fileAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: manifest.SourcePath,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func restoreSnapshotArchive(ctx context.Context, filePath, destinationPath string, force bool) (*snapshotSummary, error) {
_ = ctx
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return nil, err
}
destAbs, err := filepath.Abs(filepath.Clean(destinationPath))
if err != nil {
return nil, err
}
if fi, statErr := os.Stat(destAbs); statErr == nil && fi.IsDir() {
if err := ensureDataPathOffline(destAbs); err != nil {
return nil, err
}
entries, err := os.ReadDir(destAbs)
if err == nil && len(entries) > 0 && !force {
return nil, errors.New("destination data path is not empty; use --force to overwrite")
}
}
parent := filepath.Dir(destAbs)
if err := os.MkdirAll(parent, 0o755); err != nil {
return nil, err
}
stage := filepath.Join(parent, "."+filepath.Base(destAbs)+".restore-"+strconvNowNano())
if err := os.MkdirAll(stage, 0o755); err != nil {
return nil, err
}
cleanupStage := true
defer func() {
if cleanupStage {
_ = os.RemoveAll(stage)
}
}()
if err := extractSnapshotArchive(filePath, stage, manifest); err != nil {
return nil, err
}
if err := syncDir(stage); err != nil {
return nil, err
}
if _, err := os.Stat(destAbs); err == nil {
if !force {
return nil, errors.New("destination data path exists; use --force to overwrite")
}
if err := os.RemoveAll(destAbs); err != nil {
return nil, err
}
}
if err := os.Rename(stage, destAbs); err != nil {
return nil, err
}
if err := syncDir(parent); err != nil {
return nil, err
}
cleanupStage = false
summary.SourcePath = destAbs
return summary, nil
}
func ensureMetadataExists(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
info, err := os.Stat(dbPath)
if err != nil {
return fmt.Errorf("metadata.db not found in %s", dataPath)
}
if !info.Mode().IsRegular() {
return fmt.Errorf("metadata.db in %s is not a regular file", dataPath)
}
return nil
}
func ensureDataPathOffline(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
if _, err := os.Stat(dbPath); err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
db, err := bolt.Open(dbPath, 0o600, &bolt.Options{
Timeout: 100 * time.Millisecond,
ReadOnly: true,
})
if err != nil {
return fmt.Errorf("data path appears in use (metadata.db locked): %w", err)
}
return db.Close()
}
func buildSnapshotManifest(dataPath string) (*snapshotManifest, int64, error) {
entries := make([]snapshotFileEntry, 0, 128)
var totalBytes int64
err := filepath.WalkDir(dataPath, func(path string, d fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
rel, err := filepath.Rel(dataPath, path)
if err != nil {
return err
}
rel = filepath.ToSlash(filepath.Clean(rel))
if rel == "." || rel == "" {
return nil
}
sum, err := sha256File(path)
if err != nil {
return err
}
totalBytes += info.Size()
entries = append(entries, snapshotFileEntry{
Path: rel,
Size: info.Size(),
SHA256: sum,
})
return nil
})
if err != nil {
return nil, 0, err
}
if len(entries) == 0 {
return nil, 0, errors.New("data path contains no regular files to snapshot")
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: time.Now().UTC().Format(time.RFC3339Nano),
SourcePath: dataPath,
Files: entries,
}, totalBytes, nil
}
func writeManifestToTar(tw *tar.Writer, manifest *snapshotManifest) error {
if tw == nil || manifest == nil {
return errors.New("invalid manifest writer input")
}
payload, err := json.Marshal(manifest)
if err != nil {
return err
}
header := &tar.Header{
Name: snapshotManifestPath,
Mode: 0o600,
Size: int64(len(payload)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = tw.Write(payload)
return err
}
func writeFileToTar(tw *tar.Writer, absPath, relPath string) error {
file, err := os.Open(absPath)
if err != nil {
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: relPath,
Mode: int64(info.Mode().Perm()),
Size: info.Size(),
ModTime: info.ModTime(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = io.Copy(tw, file)
return err
}
func readSnapshotArchive(filePath string) (*snapshotManifest, map[string]snapshotFileEntry, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return nil, nil, err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
actual := make(map[string]snapshotFileEntry)
var manifest *snapshotManifest
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, nil, err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return nil, nil, err
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return nil, nil, fmt.Errorf("unsupported tar entry type for %s", name)
}
if name == snapshotManifestPath {
raw, err := io.ReadAll(tr)
if err != nil {
return nil, nil, err
}
current := &snapshotManifest{}
if err := json.Unmarshal(raw, current); err != nil {
return nil, nil, err
}
manifest = current
continue
}
size, hashHex, err := digestReader(tr)
if err != nil {
return nil, nil, err
}
actual[name] = snapshotFileEntry{
Path: name,
Size: size,
SHA256: hashHex,
}
}
if manifest == nil {
return nil, nil, errors.New("snapshot manifest.json not found")
}
if manifest.FormatVersion != snapshotFormat {
return nil, nil, fmt.Errorf("unsupported snapshot format version %d", manifest.FormatVersion)
}
return manifest, actual, nil
}
func extractSnapshotArchive(filePath, destination string, manifest *snapshotManifest) error {
expected := make(map[string]snapshotFileEntry, len(manifest.Files))
for _, entry := range manifest.Files {
expected[entry.Path] = entry
}
seen := make(map[string]struct{}, len(expected))
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return err
}
if name == snapshotManifestPath {
if _, err := io.Copy(io.Discard, tr); err != nil {
return err
}
continue
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return fmt.Errorf("unsupported tar entry type for %s", name)
}
exp, ok := expected[name]
if !ok {
return fmt.Errorf("snapshot contains unexpected file %s", name)
}
targetPath := filepath.Join(destination, filepath.FromSlash(name))
if !isPathWithin(destination, targetPath) {
return fmt.Errorf("invalid archive path %s", name)
}
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return err
}
out, err := os.OpenFile(targetPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(header.Mode)&0o777)
if err != nil {
return err
}
hasher := sha256.New()
written, copyErr := io.Copy(io.MultiWriter(out, hasher), tr)
syncErr := out.Sync()
closeErr := out.Close()
if copyErr != nil {
return copyErr
}
if syncErr != nil {
return syncErr
}
if closeErr != nil {
return closeErr
}
if err := syncDir(filepath.Dir(targetPath)); err != nil {
return err
}
sum := hex.EncodeToString(hasher.Sum(nil))
if written != exp.Size || sum != exp.SHA256 {
return fmt.Errorf("checksum mismatch while extracting %s", name)
}
seen[name] = struct{}{}
}
if len(seen) != len(expected) {
return fmt.Errorf("restore validation failed: extracted %d files, expected %d", len(seen), len(expected))
}
for path := range expected {
if _, ok := seen[path]; !ok {
return fmt.Errorf("restore validation failed: missing file %s", path)
}
}
return nil
}
func cleanArchivePath(name string) (string, error) {
name = strings.TrimSpace(name)
if name == "" {
return "", errors.New("empty archive path")
}
name = filepath.ToSlash(filepath.Clean(name))
if strings.HasPrefix(name, "/") || strings.HasPrefix(name, "../") || strings.Contains(name, "/../") || name == ".." {
return "", fmt.Errorf("unsafe archive path %q", name)
}
return name, nil
}
func digestReader(r io.Reader) (int64, string, error) {
hasher := sha256.New()
n, err := io.Copy(hasher, r)
if err != nil {
return 0, "", err
}
return n, hex.EncodeToString(hasher.Sum(nil)), nil
}
func sha256File(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
return sha256FromReader(file)
}
func sha256FromReader(reader io.Reader) (string, error) {
hasher := sha256.New()
if _, err := io.Copy(hasher, reader); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func isPathWithin(base, candidate string) bool {
base = filepath.Clean(base)
candidate = filepath.Clean(candidate)
rel, err := filepath.Rel(base, candidate)
if err != nil {
return false
}
return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)))
}
func syncDir(path string) error {
dir, err := os.Open(path)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}
func strconvNowNano() string {
return fmt.Sprintf("%d", time.Now().UnixNano())
}

240
cmd/admin_snapshot_test.go Normal file
View File

@@ -0,0 +1,240 @@
package cmd
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
bolt "go.etcd.io/bbolt"
)
type snapshotArchiveEntry struct {
Path string
Data []byte
}
func TestInspectSnapshotArchiveRejectsUnsafePath(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "bad.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "../escape", Data: []byte("oops")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsafe archive path") {
t.Fatalf("expected unsafe archive path error, got %v", err)
}
}
func TestInspectSnapshotArchiveChecksumMismatch(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "mismatch.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("good")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("bad")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "checksum mismatch") {
t.Fatalf("expected checksum mismatch error, got %v", err)
}
}
func TestInspectSnapshotArchiveMissingManifest(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "no-manifest.tar.gz")
err := writeSnapshotArchiveForTest(archive, nil, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, false)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "manifest.json not found") {
t.Fatalf("expected missing manifest error, got %v", err)
}
}
func TestInspectSnapshotArchiveUnsupportedFormat(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "unsupported-format.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
})
manifest.FormatVersion = 99
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsupported snapshot format version") {
t.Fatalf("expected unsupported format error, got %v", err)
}
}
func TestRestoreSnapshotArchiveDestinationBehavior(t *testing.T) {
t.Parallel()
root := t.TempDir()
archive := filepath.Join(root, "ok.tar.gz")
destination := filepath.Join(root, "dst")
entries := []snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db-bytes")},
{Path: "chunks/c1", Data: []byte("chunk-1")},
}
manifest := manifestForEntries(entries)
if err := writeSnapshotArchiveForTest(archive, manifest, entries, true); err != nil {
t.Fatalf("write test archive: %v", err)
}
if err := os.MkdirAll(destination, 0o755); err != nil {
t.Fatalf("mkdir destination: %v", err)
}
if err := os.WriteFile(filepath.Join(destination, "old.txt"), []byte("old"), 0o600); err != nil {
t.Fatalf("seed destination: %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, false); err == nil || !strings.Contains(err.Error(), "not empty") {
t.Fatalf("expected non-empty destination error, got %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, true); err != nil {
t.Fatalf("restore with force: %v", err)
}
if _, err := os.Stat(filepath.Join(destination, "old.txt")); !os.IsNotExist(err) {
t.Fatalf("expected old file to be removed, stat err=%v", err)
}
got, err := os.ReadFile(filepath.Join(destination, "chunks/c1"))
if err != nil {
t.Fatalf("read restored chunk: %v", err)
}
if string(got) != "chunk-1" {
t.Fatalf("restored chunk mismatch: got %q", string(got))
}
}
func TestCreateSnapshotArchiveRejectsOutputInsideDataPath(t *testing.T) {
t.Parallel()
root := t.TempDir()
if err := os.MkdirAll(filepath.Join(root, "chunks"), 0o755); err != nil {
t.Fatalf("mkdir chunks: %v", err)
}
if err := createBoltDBForTest(filepath.Join(root, "metadata.db")); err != nil {
t.Fatalf("create metadata db: %v", err)
}
if err := os.WriteFile(filepath.Join(root, "chunks/c1"), []byte("x"), 0o600); err != nil {
t.Fatalf("write chunk: %v", err)
}
out := filepath.Join(root, "inside.tar.gz")
if _, err := createSnapshotArchive(context.Background(), root, out); err == nil || !strings.Contains(err.Error(), "cannot be inside") {
t.Fatalf("expected output-inside-data-path error, got %v", err)
}
}
func writeSnapshotArchiveForTest(path string, manifest *snapshotManifest, entries []snapshotArchiveEntry, includeManifest bool) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
gzw := gzip.NewWriter(file)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
if includeManifest {
raw, err := json.Marshal(manifest)
if err != nil {
return err
}
if err := writeTarEntry(tw, snapshotManifestPath, raw); err != nil {
return err
}
}
for _, entry := range entries {
if err := writeTarEntry(tw, entry.Path, entry.Data); err != nil {
return err
}
}
return nil
}
func writeTarEntry(tw *tar.Writer, name string, data []byte) error {
header := &tar.Header{
Name: name,
Mode: 0o600,
Size: int64(len(data)),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err := ioCopyBytes(tw, data)
return err
}
func manifestForEntries(entries []snapshotArchiveEntry) *snapshotManifest {
files := make([]snapshotFileEntry, 0, len(entries))
for _, entry := range entries {
sum := sha256.Sum256(entry.Data)
files = append(files, snapshotFileEntry{
Path: filepath.ToSlash(filepath.Clean(entry.Path)),
Size: int64(len(entry.Data)),
SHA256: hex.EncodeToString(sum[:]),
})
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: "2026-03-11T00:00:00Z",
SourcePath: "/tmp/source",
Files: files,
}
}
func createBoltDBForTest(path string) error {
db, err := bolt.Open(path, 0o600, nil)
if err != nil {
return err
}
defer db.Close()
return db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte("x"))
return err
})
}
func ioCopyBytes(w *tar.Writer, data []byte) (int64, error) {
n, err := bytes.NewReader(data).WriteTo(w)
return n, err
}

388
cmd/admin_user.go Normal file
View File

@@ -0,0 +1,388 @@
package cmd
import (
"context"
"fmt"
"strings"
"github.com/spf13/cobra"
)
func newAdminUserCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "user",
Short: "Manage auth users",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminUserCreateCommand(opts))
cmd.AddCommand(newAdminUserListCommand(opts))
cmd.AddCommand(newAdminUserGetCommand(opts))
cmd.AddCommand(newAdminUserDeleteCommand(opts))
cmd.AddCommand(newAdminUserSetStatusCommand(opts))
cmd.AddCommand(newAdminUserSetRoleCommand(opts))
cmd.AddCommand(newAdminUserRemoveRoleCommand(opts))
return cmd
}
func newAdminUserCreateCommand(opts *adminOptions) *cobra.Command {
var (
accessKey string
secretKey string
status string
role string
bucket string
prefix string
)
cmd := &cobra.Command{
Use: "create",
Short: "Create a user",
RunE: func(cmd *cobra.Command, args []string) error {
accessKey = strings.TrimSpace(accessKey)
if accessKey == "" {
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", "--access-key is required")
}
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", err.Error())
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.CreateUser(context.Background(), createUserRequest{
AccessKeyID: accessKey,
SecretKey: strings.TrimSpace(secretKey),
Status: strings.TrimSpace(status),
Policy: policy,
})
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
if err := writeUserTable(cmd.OutOrStdout(), out, true); err != nil {
return err
}
if strings.TrimSpace(out.SecretKey) != "" {
_, _ = fmt.Fprintln(cmd.OutOrStdout(), "\nsecretKey is only returned once during create; store it securely.")
}
return nil
},
}
cmd.Flags().StringVar(&accessKey, "access-key", "", "User access key ID")
cmd.Flags().StringVar(&secretKey, "secret-key", "", "User secret key (optional; auto-generated when omitted)")
cmd.Flags().StringVar(&status, "status", "active", "User status: active|disabled")
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
return cmd
}
func newAdminUserListCommand(opts *adminOptions) *cobra.Command {
var (
limit int
cursor string
)
cmd := &cobra.Command{
Use: "list",
Short: "List users",
RunE: func(cmd *cobra.Command, args []string) error {
if limit < 1 || limit > 1000 {
return usageError("fs admin user list [--limit 1-1000] [--cursor <token>]", "--limit must be between 1 and 1000")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.ListUsers(context.Background(), limit, cursor)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserListTable(cmd.OutOrStdout(), out)
},
}
cmd.Flags().IntVar(&limit, "limit", 100, "List page size (1-1000)")
cmd.Flags().StringVar(&cursor, "cursor", "", "Pagination cursor from previous list call")
return cmd
}
func newAdminUserGetCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "get <access-key-id>",
Short: "Get one user",
Args: requireAccessKeyArg("fs admin user get <access-key-id>"),
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
return cmd
}
func newAdminUserDeleteCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "delete <access-key-id>",
Short: "Delete one user",
Args: requireAccessKeyArg("fs admin user delete <access-key-id>"),
RunE: func(cmd *cobra.Command, args []string) error {
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
if err := client.DeleteUser(context.Background(), args[0]); err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]string{
"status": "deleted",
"accessKeyId": args[0],
})
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "deleted user %s\n", args[0])
return err
},
}
return cmd
}
func newAdminUserSetStatusCommand(opts *adminOptions) *cobra.Command {
var status string
cmd := &cobra.Command{
Use: "set-status <access-key-id>",
Short: "Set user status",
Args: requireAccessKeyArg("fs admin user set-status <access-key-id> --status active|disabled"),
RunE: func(cmd *cobra.Command, args []string) error {
status = strings.TrimSpace(status)
if status == "" {
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status is required")
}
normalized := strings.ToLower(status)
if normalized != "active" && normalized != "disabled" {
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status must be active or disabled")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
out, err := client.SetUserStatus(context.Background(), args[0], normalized)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&status, "status", "", "User status: active|disabled")
return cmd
}
func newAdminUserSetRoleCommand(opts *adminOptions) *cobra.Command {
var (
role string
bucket string
prefix string
replace bool
)
cmd := &cobra.Command{
Use: "set-role <access-key-id>",
Short: "Add or replace user role policy statement",
Args: requireAccessKeyArg("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]"),
RunE: func(cmd *cobra.Command, args []string) error {
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]", err.Error())
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
finalPolicy := policy
if !replace {
existing, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
finalPolicy = mergePolicyStatements(existing.Policy, policy)
}
out, err := client.SetUserPolicy(context.Background(), args[0], finalPolicy)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
cmd.Flags().BoolVar(&replace, "replace", false, "Replace all existing policy statements instead of appending")
return cmd
}
func newAdminUserRemoveRoleCommand(opts *adminOptions) *cobra.Command {
var (
role string
bucket string
prefix string
)
cmd := &cobra.Command{
Use: "remove-role <access-key-id>",
Short: "Remove one role policy statement from user",
Args: requireAccessKeyArg("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]"),
RunE: func(cmd *cobra.Command, args []string) error {
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", err.Error())
}
if len(policy.Statements) == 0 {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", "no statement to remove")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
existing, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
if existing.Policy == nil || len(existing.Policy.Statements) == 0 {
return fmt.Errorf("user %q has no policy statements", args[0])
}
target := policy.Statements[0]
nextPolicy, removed := removePolicyStatements(existing.Policy, target)
if removed == 0 {
return fmt.Errorf("no matching statement found for role=%s bucket=%s prefix=%s", role, bucket, prefix)
}
if len(nextPolicy.Statements) == 0 {
return fmt.Errorf("cannot remove the last policy statement; add another role first or use set-role --replace")
}
out, err := client.SetUserPolicy(context.Background(), args[0], nextPolicy)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
return cmd
}
func mergePolicyStatements(existing *adminPolicy, addition adminPolicy) adminPolicy {
merged := adminPolicy{}
if existing != nil {
merged.Principal = existing.Principal
merged.Statements = append(merged.Statements, existing.Statements...)
}
if len(addition.Statements) == 0 {
return merged
}
stmt := addition.Statements[0]
for _, current := range merged.Statements {
if policyStatementsEqual(current, stmt) {
return merged
}
}
merged.Statements = append(merged.Statements, stmt)
return merged
}
func policyStatementsEqual(a, b adminPolicyStatement) bool {
if a.Effect != b.Effect || a.Bucket != b.Bucket || a.Prefix != b.Prefix {
return false
}
if len(a.Actions) != len(b.Actions) {
return false
}
for i := range a.Actions {
if a.Actions[i] != b.Actions[i] {
return false
}
}
return true
}
func removePolicyStatements(existing *adminPolicy, target adminPolicyStatement) (adminPolicy, int) {
out := adminPolicy{}
if existing == nil {
return out, 0
}
out.Principal = existing.Principal
out.Statements = make([]adminPolicyStatement, 0, len(existing.Statements))
removed := 0
for _, stmt := range existing.Statements {
if policyStatementsEqual(stmt, target) {
removed++
continue
}
out.Statements = append(out.Statements, stmt)
}
return out, removed
}
func requireAccessKeyArg(usage string) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return usageError(usage, "missing or invalid <access-key-id> argument")
}
if strings.TrimSpace(args[0]) == "" {
return usageError(usage, "access key id cannot be empty")
}
return nil
}
}
func usageError(usage, message string) error {
return fmt.Errorf("%s\nusage: %s", message, usage)
}

20
cmd/buildinfo.go Normal file
View File

@@ -0,0 +1,20 @@
package cmd
type BuildInfo struct {
Version string
Commit string
Date string
}
func (b BuildInfo) normalized() BuildInfo {
if b.Version == "" {
b.Version = "dev"
}
if b.Commit == "" {
b.Commit = "none"
}
if b.Date == "" {
b.Date = "unknown"
}
return b
}

26
cmd/execute.go Normal file
View File

@@ -0,0 +1,26 @@
package cmd
import (
"context"
"fs/app"
"os"
"os/signal"
"syscall"
)
func Execute(build BuildInfo) error {
build = build.normalized()
if len(os.Args) == 1 {
return runServerWithSignals()
}
root := newRootCommand(build)
return root.Execute()
}
func runServerWithSignals() error {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
return app.RunServer(ctx)
}

36
cmd/root.go Normal file
View File

@@ -0,0 +1,36 @@
package cmd
import (
"fmt"
"runtime"
"github.com/spf13/cobra"
)
func newRootCommand(build BuildInfo) *cobra.Command {
root := &cobra.Command{
Use: "fs",
Short: "fs object storage server and admin CLI",
SilenceUsage: true,
SilenceErrors: true,
Version: build.Version,
}
root.SetVersionTemplate(versionTemplate(build))
root.AddCommand(newServerCommand())
root.AddCommand(newAdminCommand(build))
root.AddCommand(&cobra.Command{
Use: "version",
Short: "Print build and runtime version information",
RunE: func(cmd *cobra.Command, args []string) error {
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", build.Version, build.Commit, build.Date, runtime.Version())
return err
},
})
return root
}
func versionTemplate(build BuildInfo) string {
return fmt.Sprintf("version=%s commit=%s date=%s\n", build.Version, build.Commit, build.Date)
}

15
cmd/server.go Normal file
View File

@@ -0,0 +1,15 @@
package cmd
import (
"github.com/spf13/cobra"
)
func newServerCommand() *cobra.Command {
return &cobra.Command{
Use: "server",
Short: "Run fs object storage server",
RunE: func(cmd *cobra.Command, args []string) error {
return runServerWithSignals()
},
}
}

336
docs/admin-api-openapi.yaml Normal file
View File

@@ -0,0 +1,336 @@
openapi: 3.1.0
info:
title: fs Admin API
version: 1.0.0
description: |
JSON admin API for managing local users and policies.
Notes:
- Base path is `/_admin/v1`.
- Requests must be AWS SigV4 signed.
- Only the bootstrap access key is authorized for admin endpoints.
servers:
- url: http://localhost:2600
description: Local development
security:
- AwsSigV4: []
paths:
/_admin/v1/users:
post:
summary: Create user
operationId: createUser
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateUserRequest'
responses:
'201':
description: User created
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'409':
$ref: '#/components/responses/UserAlreadyExists'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
get:
summary: List users
operationId: listUsers
parameters:
- name: limit
in: query
required: false
schema:
type: integer
minimum: 1
maximum: 1000
default: 100
- name: cursor
in: query
required: false
schema:
type: string
responses:
'200':
description: User summaries
content:
application/json:
schema:
$ref: '#/components/schemas/UserListResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}:
get:
summary: Get user with policy
operationId: getUser
parameters:
- $ref: '#/components/parameters/AccessKeyId'
responses:
'200':
description: User details
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
delete:
summary: Delete user
operationId: deleteUser
parameters:
- $ref: '#/components/parameters/AccessKeyId'
responses:
'204':
description: User deleted
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}/policy:
put:
summary: Replace user policy
operationId: setUserPolicy
parameters:
- $ref: '#/components/parameters/AccessKeyId'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/SetPolicyRequest'
responses:
'200':
description: User details with updated policy
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
/_admin/v1/users/{accessKeyId}/status:
put:
summary: Set user status
operationId: setUserStatus
parameters:
- $ref: '#/components/parameters/AccessKeyId'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/SetStatusRequest'
responses:
'200':
description: User details with updated status
content:
application/json:
schema:
$ref: '#/components/schemas/UserResponse'
'400':
$ref: '#/components/responses/InvalidRequest'
'403':
$ref: '#/components/responses/Forbidden'
'404':
$ref: '#/components/responses/UserNotFound'
'503':
$ref: '#/components/responses/AuthDisabled'
'500':
$ref: '#/components/responses/InternalError'
components:
securitySchemes:
AwsSigV4:
type: apiKey
in: header
name: Authorization
description: |
AWS Signature Version 4 headers are required (`Authorization`, `x-amz-date`,
and for payload-signed requests `x-amz-content-sha256`).
Only bootstrap credential is authorized for admin endpoints.
parameters:
AccessKeyId:
name: accessKeyId
in: path
required: true
schema:
type: string
description: User access key ID
responses:
InvalidRequest:
description: Invalid request input
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
Forbidden:
description: Authenticated but not allowed
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
UserAlreadyExists:
description: User already exists
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
UserNotFound:
description: User not found
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
AuthDisabled:
description: Authentication subsystem disabled
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
InternalError:
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/AdminError'
schemas:
AdminError:
type: object
properties:
code:
type: string
message:
type: string
requestId:
type: string
required: [code, message]
PolicyStatement:
type: object
properties:
effect:
type: string
enum: [allow, deny]
actions:
type: array
items:
type: string
minItems: 1
bucket:
type: string
default: "*"
prefix:
type: string
default: "*"
required: [effect, actions]
Policy:
type: object
properties:
principal:
type: string
description: Server-managed; overwritten with target access key ID.
statements:
type: array
items:
$ref: '#/components/schemas/PolicyStatement'
minItems: 1
required: [statements]
CreateUserRequest:
type: object
properties:
accessKeyId:
type: string
secretKey:
type: string
description: If omitted, server generates one.
status:
type: string
enum: [active, disabled]
default: active
policy:
$ref: '#/components/schemas/Policy'
required: [accessKeyId, policy]
SetPolicyRequest:
type: object
properties:
policy:
$ref: '#/components/schemas/Policy'
required: [policy]
SetStatusRequest:
type: object
properties:
status:
type: string
enum: [active, disabled]
required: [status]
UserListItem:
type: object
properties:
accessKeyId:
type: string
status:
type: string
enum: [active, disabled]
createdAt:
type: integer
format: int64
updatedAt:
type: integer
format: int64
required: [accessKeyId, status, createdAt, updatedAt]
UserListResponse:
type: object
properties:
items:
type: array
items:
$ref: '#/components/schemas/UserListItem'
nextCursor:
type: string
required: [items]
UserResponse:
allOf:
- $ref: '#/components/schemas/UserListItem'
- type: object
properties:
policy:
$ref: '#/components/schemas/Policy'
secretKey:
type: string
description: Returned only on create.

53
docs/s3-compatibility.md Normal file
View File

@@ -0,0 +1,53 @@
# S3 Compatibility Matrix
This project is S3-compatible for a focused subset of operations.
## Implemented
### Service and account
- `GET /` list buckets
### Bucket
- `PUT /{bucket}` create bucket
- `HEAD /{bucket}` head bucket
- `DELETE /{bucket}` delete bucket (must be empty)
- `GET /{bucket}?list-type=2...` list objects v2
- `GET /{bucket}?location` get bucket location
- `POST /{bucket}?delete` delete multiple objects
### Object
- `PUT /{bucket}/{key}` put object
- `GET /{bucket}/{key}` get object
- `HEAD /{bucket}/{key}` head object
- `DELETE /{bucket}/{key}` delete object
- `GET /{bucket}/{key}` supports single-range requests
### Multipart upload
- `POST /{bucket}/{key}?uploads` initiate
- `PUT /{bucket}/{key}?uploadId=...&partNumber=N` upload part
- `GET /{bucket}/{key}?uploadId=...` list parts
- `POST /{bucket}/{key}?uploadId=...` complete
- `DELETE /{bucket}/{key}?uploadId=...` abort
### Authentication
- AWS SigV4 header auth
- AWS SigV4 presigned query auth
- `aws-chunked` payload decode for streaming uploads
## Partially Implemented / Differences
- Exact parity with AWS S3 error codes/headers is still evolving.
- Some S3 edge-case behaviors may differ (especially uncommon query/header combinations).
- Admin API is custom JSON (`/_admin/v1/*`).
## Not Implemented (Current)
- Bucket versioning
- Lifecycle rules
- Replication
- Object lock / legal hold / retention
- SSE-S3 / SSE-KMS / SSE-C
- ACL APIs and IAM-compatible policy APIs
- STS / temporary credentials
- Event notifications
- Tagging APIs
- CORS APIs
- Website hosting APIs

5
go.mod
View File

@@ -5,10 +5,13 @@ go 1.25.7
require ( require (
github.com/go-chi/chi/v5 v5.2.5 github.com/go-chi/chi/v5 v5.2.5
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1
github.com/spf13/cobra v1.10.1
go.etcd.io/bbolt v1.4.3 go.etcd.io/bbolt v1.4.3
) )
require ( require (
github.com/joho/godotenv v1.5.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
golang.org/x/sys v0.41.0 // indirect golang.org/x/sys v0.41.0 // indirect
) )

9
go.sum
View File

@@ -1,13 +1,21 @@
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
@@ -16,5 +24,6 @@ golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,6 +1,7 @@
package logging package logging
import ( import (
"fs/metrics"
"log/slog" "log/slog"
"net/http" "net/http"
"os" "os"
@@ -9,6 +10,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware" "github.com/go-chi/chi/v5/middleware"
) )
@@ -86,6 +88,11 @@ func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Han
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now() start := time.Now()
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor) ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
op := metricOperationLabel(r)
metrics.Default.IncHTTPInFlightOp(op)
defer func() {
metrics.Default.DecHTTPInFlightOp(op)
}()
requestID := middleware.GetReqID(r.Context()) requestID := middleware.GetReqID(r.Context())
if requestID != "" { if requestID != "" {
ww.Header().Set("x-amz-request-id", requestID) ww.Header().Set("x-amz-request-id", requestID)
@@ -93,15 +100,18 @@ func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Han
next.ServeHTTP(ww, r) next.ServeHTTP(ww, r)
if !cfg.Audit && !cfg.DebugMode {
return
}
elapsed := time.Since(start) elapsed := time.Since(start)
status := ww.Status() status := ww.Status()
if status == 0 { if status == 0 {
status = http.StatusOK status = http.StatusOK
} }
route := metricRouteLabel(r)
metrics.Default.ObserveHTTPRequestDetailed(r.Method, route, op, status, elapsed, ww.BytesWritten())
if !cfg.Audit && !cfg.DebugMode {
return
}
attrs := []any{ attrs := []any{
"method", r.Method, "method", r.Method,
"path", r.URL.Path, "path", r.URL.Path,
@@ -131,6 +141,46 @@ func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Han
} }
} }
func metricRouteLabel(r *http.Request) string {
if r == nil || r.URL == nil {
return "/unknown"
}
if routeCtx := chi.RouteContext(r.Context()); routeCtx != nil {
if pattern := strings.TrimSpace(routeCtx.RoutePattern()); pattern != "" {
return pattern
}
}
path := strings.TrimSpace(r.URL.Path)
if path == "" || path == "/" {
return "/"
}
if path == "/healthz" || path == "/metrics" {
return path
}
trimmed := strings.Trim(path, "/")
if trimmed == "" {
return "/"
}
if !strings.Contains(trimmed, "/") {
return "/{bucket}"
}
return "/{bucket}/*"
}
func metricOperationLabel(r *http.Request) string {
if r == nil {
return "other"
}
isDeletePost := false
if r.Method == http.MethodPost && r.URL != nil {
_, isDeletePost = r.URL.Query()["delete"]
}
return metrics.NormalizeHTTPOperation(r.Method, isDeletePost)
}
func envBool(key string, defaultValue bool) bool { func envBool(key string, defaultValue bool) bool {
raw := os.Getenv(key) raw := os.Getenv(key)
if raw == "" { if raw == "" {

View File

@@ -0,0 +1,30 @@
package logging
import (
"net/http/httptest"
"testing"
)
func TestMetricRouteLabelFallbacks(t *testing.T) {
testCases := []struct {
name string
path string
want string
}{
{name: "root", path: "/", want: "/"},
{name: "health", path: "/healthz", want: "/healthz"},
{name: "metrics", path: "/metrics", want: "/metrics"},
{name: "bucket", path: "/some-bucket", want: "/{bucket}"},
{name: "object", path: "/some-bucket/private/path/file.jpg", want: "/{bucket}/*"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", tc.path, nil)
got := metricRouteLabel(req)
if got != tc.want {
t.Fatalf("metricRouteLabel(%q) = %q, want %q", tc.path, got, tc.want)
}
})
}
}

93
main.go
View File

@@ -1,89 +1,24 @@
package main package main
import ( import (
"context" "fs/cmd"
"fs/api"
"fs/auth"
"fs/logging"
"fs/metadata"
"fs/service"
"fs/storage"
"fs/utils"
"os" "os"
"os/signal" )
"path/filepath"
"strconv" var (
"syscall" version = "dev"
"time" commit = "none"
date = "unknown"
) )
func main() { func main() {
config := utils.NewConfig() build := cmd.BuildInfo{
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog) Version: version,
authConfig := auth.ConfigFromValues( Commit: commit,
config.AuthEnabled, Date: date,
config.AuthRegion,
config.AuthSkew,
config.AuthMaxPresign,
config.AuthMasterKey,
config.AuthBootstrapAccessKey,
config.AuthBootstrapSecretKey,
config.AuthBootstrapPolicy,
)
logger := logging.NewLogger(logConfig)
logger.Info("boot",
"log_level", logConfig.LevelName,
"log_format", logConfig.Format,
"audit_log", logConfig.Audit,
"data_path", config.DataPath,
"multipart_retention_hours", int(config.MultipartCleanupRetention/time.Hour),
"auth_enabled", authConfig.Enabled,
"auth_region", authConfig.Region,
)
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
return
} }
if err := cmd.Execute(build); err != nil {
dbPath := filepath.Join(config.DataPath, "metadata.db") _, _ = os.Stderr.WriteString(err.Error() + "\n")
metadataHandler, err := metadata.NewMetadataHandler(dbPath) os.Exit(1)
if err != nil {
logger.Error("failed_to_initialize_metadata_handler", "error", err)
return
} }
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_blob_store", "error", err)
return
}
objectService := service.NewObjectService(metadataHandler, blobHandler, config.MultipartCleanupRetention)
authService, err := auth.NewService(authConfig, metadataHandler)
if err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_initialize_auth_service", "error", err)
return
}
if err := authService.EnsureBootstrap(); err != nil {
_ = metadataHandler.Close()
logger.Error("failed_to_ensure_bootstrap_auth_identity", "error", err)
return
}
handler := api.NewHandler(objectService, logger, logConfig, authService)
addr := config.Address + ":" + strconv.Itoa(config.Port)
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
if config.GcEnabled {
go objectService.RunGC(ctx, config.GcInterval)
}
if err = handler.Start(ctx, addr); err != nil {
logger.Error("server_stopped_with_error", "error", err)
return
}
} }

View File

@@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"fs/metrics"
"fs/models" "fs/models"
"net" "net"
"regexp" "regexp"
@@ -47,7 +48,7 @@ func NewMetadataHandler(dbPath string) (*MetadataHandler, error) {
} }
h := &MetadataHandler{db: db} h := &MetadataHandler{db: db}
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(systemIndex) _, err := tx.CreateBucketIfNotExists(systemIndex)
return err return err
}) })
@@ -55,7 +56,7 @@ func NewMetadataHandler(dbPath string) (*MetadataHandler, error) {
_ = db.Close() _ = db.Close()
return nil, err return nil, err
} }
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(multipartUploadIndex) _, err := tx.CreateBucketIfNotExists(multipartUploadIndex)
return err return err
}) })
@@ -63,7 +64,7 @@ func NewMetadataHandler(dbPath string) (*MetadataHandler, error) {
_ = db.Close() _ = db.Close()
return nil, err return nil, err
} }
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(multipartUploadPartsIndex) _, err := tx.CreateBucketIfNotExists(multipartUploadPartsIndex)
return err return err
}) })
@@ -71,7 +72,7 @@ func NewMetadataHandler(dbPath string) (*MetadataHandler, error) {
_ = db.Close() _ = db.Close()
return nil, err return nil, err
} }
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(authIdentitiesIndex) _, err := tx.CreateBucketIfNotExists(authIdentitiesIndex)
return err return err
}) })
@@ -79,7 +80,7 @@ func NewMetadataHandler(dbPath string) (*MetadataHandler, error) {
_ = db.Close() _ = db.Close()
return nil, err return nil, err
} }
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(authPoliciesIndex) _, err := tx.CreateBucketIfNotExists(authPoliciesIndex)
return err return err
}) })
@@ -119,6 +120,20 @@ func (h *MetadataHandler) Close() error {
return h.db.Close() return h.db.Close()
} }
func (h *MetadataHandler) view(fn func(tx *bbolt.Tx) error) error {
start := time.Now()
err := h.db.View(fn)
metrics.Default.ObserveMetadataTx("view", time.Since(start), err == nil)
return err
}
func (h *MetadataHandler) update(fn func(tx *bbolt.Tx) error) error {
start := time.Now()
err := h.db.Update(fn)
metrics.Default.ObserveMetadataTx("update", time.Since(start), err == nil)
return err
}
func (h *MetadataHandler) PutAuthIdentity(identity *models.AuthIdentity) error { func (h *MetadataHandler) PutAuthIdentity(identity *models.AuthIdentity) error {
if identity == nil { if identity == nil {
return errors.New("auth identity is required") return errors.New("auth identity is required")
@@ -126,7 +141,7 @@ func (h *MetadataHandler) PutAuthIdentity(identity *models.AuthIdentity) error {
if strings.TrimSpace(identity.AccessKeyID) == "" { if strings.TrimSpace(identity.AccessKeyID) == "" {
return errors.New("access key id is required") return errors.New("access key id is required")
} }
return h.db.Update(func(tx *bbolt.Tx) error { return h.update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authIdentitiesIndex) bucket := tx.Bucket(authIdentitiesIndex)
if bucket == nil { if bucket == nil {
return errors.New("auth identities index not found") return errors.New("auth identities index not found")
@@ -139,6 +154,23 @@ func (h *MetadataHandler) PutAuthIdentity(identity *models.AuthIdentity) error {
}) })
} }
func (h *MetadataHandler) DeleteAuthIdentity(accessKeyID string) error {
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return errors.New("access key id is required")
}
return h.update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authIdentitiesIndex)
if bucket == nil {
return errors.New("auth identities index not found")
}
if bucket.Get([]byte(accessKeyID)) == nil {
return fmt.Errorf("%w: %s", ErrAuthIdentityNotFound, accessKeyID)
}
return bucket.Delete([]byte(accessKeyID))
})
}
func (h *MetadataHandler) GetAuthIdentity(accessKeyID string) (*models.AuthIdentity, error) { func (h *MetadataHandler) GetAuthIdentity(accessKeyID string) (*models.AuthIdentity, error) {
accessKeyID = strings.TrimSpace(accessKeyID) accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" { if accessKeyID == "" {
@@ -146,7 +178,7 @@ func (h *MetadataHandler) GetAuthIdentity(accessKeyID string) (*models.AuthIdent
} }
var identity *models.AuthIdentity var identity *models.AuthIdentity
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authIdentitiesIndex) bucket := tx.Bucket(authIdentitiesIndex)
if bucket == nil { if bucket == nil {
return errors.New("auth identities index not found") return errors.New("auth identities index not found")
@@ -177,7 +209,7 @@ func (h *MetadataHandler) PutAuthPolicy(policy *models.AuthPolicy) error {
return errors.New("auth policy principal is required") return errors.New("auth policy principal is required")
} }
policy.Principal = principal policy.Principal = principal
return h.db.Update(func(tx *bbolt.Tx) error { return h.update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authPoliciesIndex) bucket := tx.Bucket(authPoliciesIndex)
if bucket == nil { if bucket == nil {
return errors.New("auth policies index not found") return errors.New("auth policies index not found")
@@ -190,6 +222,23 @@ func (h *MetadataHandler) PutAuthPolicy(policy *models.AuthPolicy) error {
}) })
} }
func (h *MetadataHandler) DeleteAuthPolicy(accessKeyID string) error {
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return errors.New("access key id is required")
}
return h.update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authPoliciesIndex)
if bucket == nil {
return errors.New("auth policies index not found")
}
if bucket.Get([]byte(accessKeyID)) == nil {
return fmt.Errorf("%w: %s", ErrAuthPolicyNotFound, accessKeyID)
}
return bucket.Delete([]byte(accessKeyID))
})
}
func (h *MetadataHandler) GetAuthPolicy(accessKeyID string) (*models.AuthPolicy, error) { func (h *MetadataHandler) GetAuthPolicy(accessKeyID string) (*models.AuthPolicy, error) {
accessKeyID = strings.TrimSpace(accessKeyID) accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" { if accessKeyID == "" {
@@ -197,7 +246,7 @@ func (h *MetadataHandler) GetAuthPolicy(accessKeyID string) (*models.AuthPolicy,
} }
var policy *models.AuthPolicy var policy *models.AuthPolicy
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authPoliciesIndex) bucket := tx.Bucket(authPoliciesIndex)
if bucket == nil { if bucket == nil {
return errors.New("auth policies index not found") return errors.New("auth policies index not found")
@@ -219,12 +268,59 @@ func (h *MetadataHandler) GetAuthPolicy(accessKeyID string) (*models.AuthPolicy,
return policy, nil return policy, nil
} }
func (h *MetadataHandler) ListAuthIdentities(limit int, after string) ([]models.AuthIdentity, string, error) {
if limit <= 0 {
limit = 100
}
after = strings.TrimSpace(after)
identities := make([]models.AuthIdentity, 0, limit)
nextCursor := ""
err := h.view(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(authIdentitiesIndex)
if bucket == nil {
return errors.New("auth identities index not found")
}
cursor := bucket.Cursor()
var k, v []byte
if after == "" {
k, v = cursor.First()
} else {
k, v = cursor.Seek([]byte(after))
if k != nil && string(k) == after {
k, v = cursor.Next()
}
}
count := 0
for ; k != nil; k, v = cursor.Next() {
if count >= limit {
nextCursor = string(k)
break
}
record := models.AuthIdentity{}
if err := json.Unmarshal(v, &record); err != nil {
return err
}
identities = append(identities, record)
count++
}
return nil
})
if err != nil {
return nil, "", err
}
return identities, nextCursor, nil
}
func (h *MetadataHandler) CreateBucket(bucketName string) error { func (h *MetadataHandler) CreateBucket(bucketName string) error {
if !isValidBucketName(bucketName) { if !isValidBucketName(bucketName) {
return fmt.Errorf("%w: %s", ErrInvalidBucketName, bucketName) return fmt.Errorf("%w: %s", ErrInvalidBucketName, bucketName)
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
indexBucket, err := tx.CreateBucketIfNotExists([]byte(systemIndex)) indexBucket, err := tx.CreateBucketIfNotExists([]byte(systemIndex))
if err != nil { if err != nil {
return err return err
@@ -256,7 +352,7 @@ func (h *MetadataHandler) DeleteBucket(bucketName string) error {
return fmt.Errorf("%w: %s", ErrInvalidBucketName, bucketName) return fmt.Errorf("%w: %s", ErrInvalidBucketName, bucketName)
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
indexBucket, err := tx.CreateBucketIfNotExists([]byte(systemIndex)) indexBucket, err := tx.CreateBucketIfNotExists([]byte(systemIndex))
if err != nil { if err != nil {
return err return err
@@ -303,7 +399,7 @@ func (h *MetadataHandler) DeleteBucket(bucketName string) error {
func (h *MetadataHandler) ListBuckets() ([]string, error) { func (h *MetadataHandler) ListBuckets() ([]string, error) {
buckets := []string{} buckets := []string{}
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")
@@ -323,7 +419,7 @@ func (h *MetadataHandler) ListBuckets() ([]string, error) {
func (h *MetadataHandler) GetBucketManifest(bucketName string) (*models.BucketManifest, error) { func (h *MetadataHandler) GetBucketManifest(bucketName string) (*models.BucketManifest, error) {
var manifest *models.BucketManifest var manifest *models.BucketManifest
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")
@@ -353,7 +449,7 @@ func (h *MetadataHandler) PutManifest(manifest *models.ObjectManifest) error {
return err return err
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
data, err := json.Marshal(manifest) data, err := json.Marshal(manifest)
if err != nil { if err != nil {
return err return err
@@ -373,7 +469,7 @@ func (h *MetadataHandler) PutManifest(manifest *models.ObjectManifest) error {
func (h *MetadataHandler) GetManifest(bucket, key string) (*models.ObjectManifest, error) { func (h *MetadataHandler) GetManifest(bucket, key string) (*models.ObjectManifest, error) {
var manifest *models.ObjectManifest var manifest *models.ObjectManifest
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
metadataBucket := tx.Bucket([]byte(bucket)) metadataBucket := tx.Bucket([]byte(bucket))
if metadataBucket == nil { if metadataBucket == nil {
return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket) return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket)
@@ -400,7 +496,7 @@ func (h *MetadataHandler) ListObjects(bucket, prefix string) ([]*models.ObjectMa
var objects []*models.ObjectManifest var objects []*models.ObjectManifest
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")
@@ -440,7 +536,7 @@ func (h *MetadataHandler) ForEachObjectFrom(bucket, startKey string, fn func(*mo
return errors.New("object callback is required") return errors.New("object callback is required")
} }
return h.db.View(func(tx *bbolt.Tx) error { return h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")
@@ -480,7 +576,7 @@ func (h *MetadataHandler) DeleteManifest(bucket, key string) error {
return err return err
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
metadataBucket := tx.Bucket([]byte(bucket)) metadataBucket := tx.Bucket([]byte(bucket))
if metadataBucket == nil { if metadataBucket == nil {
return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket) return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket)
@@ -497,7 +593,7 @@ func (h *MetadataHandler) DeleteManifest(bucket, key string) error {
func (h *MetadataHandler) DeleteManifests(bucket string, keys []string) ([]string, error) { func (h *MetadataHandler) DeleteManifests(bucket string, keys []string) ([]string, error) {
deleted := make([]string, 0, len(keys)) deleted := make([]string, 0, len(keys))
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
metadataBucket := tx.Bucket([]byte(bucket)) metadataBucket := tx.Bucket([]byte(bucket))
if metadataBucket == nil { if metadataBucket == nil {
return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket) return fmt.Errorf("%w: %s", ErrBucketNotFound, bucket)
@@ -525,7 +621,7 @@ func (h *MetadataHandler) DeleteManifests(bucket string, keys []string) ([]strin
func (h *MetadataHandler) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) { func (h *MetadataHandler) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) {
var upload *models.MultipartUpload var upload *models.MultipartUpload
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")
@@ -548,7 +644,7 @@ func (h *MetadataHandler) CreateMultipartUpload(bucket, key string) (*models.Mul
State: "pending", State: "pending",
} }
err = h.db.Update(func(tx *bbolt.Tx) error { err = h.update(func(tx *bbolt.Tx) error {
multipartUploadBucket := tx.Bucket([]byte(multipartUploadIndex)) multipartUploadBucket := tx.Bucket([]byte(multipartUploadIndex))
if multipartUploadBucket == nil { if multipartUploadBucket == nil {
return errors.New("multipart upload index not found") return errors.New("multipart upload index not found")
@@ -643,7 +739,7 @@ func deleteMultipartPartsByUploadID(tx *bbolt.Tx, uploadID string) error {
func (h *MetadataHandler) GetMultipartUpload(uploadID string) (*models.MultipartUpload, error) { func (h *MetadataHandler) GetMultipartUpload(uploadID string) (*models.MultipartUpload, error) {
var upload *models.MultipartUpload var upload *models.MultipartUpload
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
var err error var err error
upload, _, err = getMultipartUploadFromTx(tx, uploadID) upload, _, err = getMultipartUploadFromTx(tx, uploadID)
if err != nil { if err != nil {
@@ -661,7 +757,7 @@ func (h *MetadataHandler) PutMultipartPart(uploadID string, part models.Uploaded
return fmt.Errorf("invalid part number: %d", part.PartNumber) return fmt.Errorf("invalid part number: %d", part.PartNumber)
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
upload, _, err := getMultipartUploadFromTx(tx, uploadID) upload, _, err := getMultipartUploadFromTx(tx, uploadID)
if err != nil { if err != nil {
return err return err
@@ -690,7 +786,7 @@ func (h *MetadataHandler) PutMultipartPart(uploadID string, part models.Uploaded
func (h *MetadataHandler) ListMultipartParts(uploadID string) ([]models.UploadedPart, error) { func (h *MetadataHandler) ListMultipartParts(uploadID string) ([]models.UploadedPart, error) {
parts := make([]models.UploadedPart, 0) parts := make([]models.UploadedPart, 0)
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
if _, _, err := getMultipartUploadFromTx(tx, uploadID); err != nil { if _, _, err := getMultipartUploadFromTx(tx, uploadID); err != nil {
return err return err
} }
@@ -724,7 +820,7 @@ func (h *MetadataHandler) CompleteMultipartUpload(uploadID string, final *models
return errors.New("final object manifest is required") return errors.New("final object manifest is required")
} }
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
upload, multipartUploadBucket, err := getMultipartUploadFromTx(tx, uploadID) upload, multipartUploadBucket, err := getMultipartUploadFromTx(tx, uploadID)
if err != nil { if err != nil {
return err return err
@@ -763,7 +859,7 @@ func (h *MetadataHandler) CompleteMultipartUpload(uploadID string, final *models
return nil return nil
} }
func (h *MetadataHandler) AbortMultipartUpload(uploadID string) error { func (h *MetadataHandler) AbortMultipartUpload(uploadID string) error {
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
upload, multipartUploadBucket, err := getMultipartUploadFromTx(tx, uploadID) upload, multipartUploadBucket, err := getMultipartUploadFromTx(tx, uploadID)
if err != nil { if err != nil {
return err return err
@@ -793,7 +889,7 @@ func (h *MetadataHandler) CleanupMultipartUploads(retention time.Duration) (int,
} }
cleaned := 0 cleaned := 0
err := h.db.Update(func(tx *bbolt.Tx) error { err := h.update(func(tx *bbolt.Tx) error {
uploadsBucket, err := getMultipartUploadBucket(tx) uploadsBucket, err := getMultipartUploadBucket(tx)
if err != nil { if err != nil {
return err return err
@@ -843,7 +939,7 @@ func (h *MetadataHandler) GetReferencedChunkSet() (map[string]struct{}, error) {
chunkSet := make(map[string]struct{}) chunkSet := make(map[string]struct{})
pendingUploadSet := make(map[string]struct{}) pendingUploadSet := make(map[string]struct{})
err := h.db.View(func(tx *bbolt.Tx) error { err := h.view(func(tx *bbolt.Tx) error {
systemIndexBucket := tx.Bucket([]byte(systemIndex)) systemIndexBucket := tx.Bucket([]byte(systemIndex))
if systemIndexBucket == nil { if systemIndexBucket == nil {
return errors.New("system index not found") return errors.New("system index not found")

795
metrics/metrics.go Normal file
View File

@@ -0,0 +1,795 @@
package metrics
import (
"fmt"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var defaultBuckets = []float64{
0.0005, 0.001, 0.0025, 0.005, 0.01,
0.025, 0.05, 0.1, 0.25, 0.5,
1, 2.5, 5, 10,
}
var lockBuckets = []float64{
0.000001, 0.000005, 0.00001, 0.00005,
0.0001, 0.0005, 0.001, 0.005, 0.01,
0.025, 0.05, 0.1, 0.25, 0.5, 1,
}
var batchBuckets = []float64{1, 2, 4, 8, 16, 32, 64, 100, 128, 256, 512, 1000, 5000}
var Default = NewRegistry()
type histogram struct {
bounds []float64
counts []uint64
sum float64
count uint64
}
func newHistogram(bounds []float64) *histogram {
cloned := make([]float64, len(bounds))
copy(cloned, bounds)
return &histogram{
bounds: cloned,
counts: make([]uint64, len(bounds)+1),
}
}
func (h *histogram) observe(v float64) {
h.count++
h.sum += v
for i, bound := range h.bounds {
if v <= bound {
h.counts[i]++
return
}
}
h.counts[len(h.counts)-1]++
}
func (h *histogram) snapshot() (bounds []float64, counts []uint64, sum float64, count uint64) {
bounds = make([]float64, len(h.bounds))
copy(bounds, h.bounds)
counts = make([]uint64, len(h.counts))
copy(counts, h.counts)
return bounds, counts, h.sum, h.count
}
type Registry struct {
startedAt time.Time
httpInFlight atomic.Int64
connectionPoolActive atomic.Int64
connectionPoolMax atomic.Int64
connectionPoolWaits atomic.Uint64
requestQueueLength atomic.Int64
mu sync.Mutex
httpRequestsRoute map[string]uint64
httpResponseBytesRoute map[string]uint64
httpDurationRoute map[string]*histogram
httpRequestsOp map[string]uint64
httpDurationOp map[string]*histogram
httpInFlightOp map[string]int64
authRequests map[string]uint64
serviceOps map[string]uint64
serviceDuration map[string]*histogram
dbTxTotal map[string]uint64
dbTxDuration map[string]*histogram
blobOps map[string]uint64
blobBytes map[string]uint64
blobDuration map[string]*histogram
lockWait map[string]*histogram
lockHold map[string]*histogram
cacheHits map[string]uint64
cacheMisses map[string]uint64
batchSize *histogram
retries map[string]uint64
errors map[string]uint64
gcRuns map[string]uint64
gcDuration *histogram
gcDeletedChunks uint64
gcDeleteErrors uint64
gcCleanedUpload uint64
}
func NewRegistry() *Registry {
return &Registry{
startedAt: time.Now(),
httpRequestsRoute: make(map[string]uint64),
httpResponseBytesRoute: make(map[string]uint64),
httpDurationRoute: make(map[string]*histogram),
httpRequestsOp: make(map[string]uint64),
httpDurationOp: make(map[string]*histogram),
httpInFlightOp: make(map[string]int64),
authRequests: make(map[string]uint64),
serviceOps: make(map[string]uint64),
serviceDuration: make(map[string]*histogram),
dbTxTotal: make(map[string]uint64),
dbTxDuration: make(map[string]*histogram),
blobOps: make(map[string]uint64),
blobBytes: make(map[string]uint64),
blobDuration: make(map[string]*histogram),
lockWait: make(map[string]*histogram),
lockHold: make(map[string]*histogram),
cacheHits: make(map[string]uint64),
cacheMisses: make(map[string]uint64),
batchSize: newHistogram(batchBuckets),
retries: make(map[string]uint64),
errors: make(map[string]uint64),
gcRuns: make(map[string]uint64),
gcDuration: newHistogram(defaultBuckets),
}
}
func NormalizeHTTPOperation(method string, isDeletePost bool) string {
switch strings.ToUpper(strings.TrimSpace(method)) {
case "GET":
return "get"
case "PUT":
return "put"
case "DELETE":
return "delete"
case "HEAD":
return "head"
case "POST":
if isDeletePost {
return "delete"
}
return "put"
default:
return "other"
}
}
func statusResult(status int) string {
if status >= 200 && status < 400 {
return "ok"
}
return "error"
}
func normalizeRoute(route string) string {
route = strings.TrimSpace(route)
if route == "" {
return "/unknown"
}
return route
}
func normalizeOp(op string) string {
op = strings.ToLower(strings.TrimSpace(op))
if op == "" {
return "other"
}
return op
}
func (r *Registry) IncHTTPInFlight() {
r.httpInFlight.Add(1)
}
func (r *Registry) DecHTTPInFlight() {
r.httpInFlight.Add(-1)
}
func (r *Registry) IncHTTPInFlightOp(op string) {
r.httpInFlight.Add(1)
op = normalizeOp(op)
r.mu.Lock()
r.httpInFlightOp[op]++
r.mu.Unlock()
}
func (r *Registry) DecHTTPInFlightOp(op string) {
r.httpInFlight.Add(-1)
op = normalizeOp(op)
r.mu.Lock()
r.httpInFlightOp[op]--
if r.httpInFlightOp[op] < 0 {
r.httpInFlightOp[op] = 0
}
r.mu.Unlock()
}
func (r *Registry) ObserveHTTPRequest(method, route string, status int, d time.Duration, responseBytes int) {
op := NormalizeHTTPOperation(method, false)
r.ObserveHTTPRequestDetailed(method, route, op, status, d, responseBytes)
}
func (r *Registry) ObserveHTTPRequestDetailed(method, route, op string, status int, d time.Duration, responseBytes int) {
route = normalizeRoute(route)
op = normalizeOp(op)
result := statusResult(status)
routeKey := method + "|" + route + "|" + strconv.Itoa(status)
routeDurKey := method + "|" + route
opKey := op + "|" + result
r.mu.Lock()
r.httpRequestsRoute[routeKey]++
if responseBytes > 0 {
r.httpResponseBytesRoute[routeKey] += uint64(responseBytes)
}
hRoute := r.httpDurationRoute[routeDurKey]
if hRoute == nil {
hRoute = newHistogram(defaultBuckets)
r.httpDurationRoute[routeDurKey] = hRoute
}
hRoute.observe(d.Seconds())
r.httpRequestsOp[opKey]++
hOp := r.httpDurationOp[opKey]
if hOp == nil {
hOp = newHistogram(defaultBuckets)
r.httpDurationOp[opKey] = hOp
}
hOp.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveAuth(result, authType, reason string) {
authType = strings.TrimSpace(authType)
if authType == "" {
authType = "unknown"
}
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "none"
}
key := result + "|" + authType + "|" + reason
r.mu.Lock()
r.authRequests[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveService(operation string, d time.Duration, ok bool) {
result := "error"
if ok {
result = "ok"
}
key := operation + "|" + result
r.mu.Lock()
r.serviceOps[key]++
h := r.serviceDuration[operation]
if h == nil {
h = newHistogram(defaultBuckets)
r.serviceDuration[operation] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveMetadataTx(txType string, d time.Duration, ok bool) {
result := "error"
if ok {
result = "ok"
}
key := txType + "|" + result
r.mu.Lock()
r.dbTxTotal[key]++
h := r.dbTxDuration[txType]
if h == nil {
h = newHistogram(defaultBuckets)
r.dbTxDuration[txType] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveBlob(operation string, d time.Duration, bytes int64, ok bool, backend ...string) {
be := "disk"
if len(backend) > 0 {
candidate := strings.TrimSpace(backend[0])
if candidate != "" {
be = strings.ToLower(candidate)
}
}
result := "error"
if ok {
result = "ok"
}
op := strings.ToLower(strings.TrimSpace(operation))
if op == "" {
op = "unknown"
}
histKey := op + "|" + be + "|" + result
opsKey := histKey
r.mu.Lock()
r.blobOps[opsKey]++
h := r.blobDuration[histKey]
if h == nil {
h = newHistogram(defaultBuckets)
r.blobDuration[histKey] = h
}
h.observe(d.Seconds())
if bytes > 0 {
r.blobBytes[op] += uint64(bytes)
}
r.mu.Unlock()
}
func (r *Registry) SetConnectionPoolMax(max int) {
if max < 0 {
max = 0
}
r.connectionPoolMax.Store(int64(max))
}
func (r *Registry) IncConnectionPoolActive() {
r.connectionPoolActive.Add(1)
}
func (r *Registry) DecConnectionPoolActive() {
r.connectionPoolActive.Add(-1)
}
func (r *Registry) IncConnectionPoolWait() {
r.connectionPoolWaits.Add(1)
}
func (r *Registry) IncRequestQueueLength() {
r.requestQueueLength.Add(1)
}
func (r *Registry) DecRequestQueueLength() {
r.requestQueueLength.Add(-1)
}
func (r *Registry) ObserveLockWait(lockName string, d time.Duration) {
lockName = strings.TrimSpace(lockName)
if lockName == "" {
lockName = "unknown"
}
r.mu.Lock()
h := r.lockWait[lockName]
if h == nil {
h = newHistogram(lockBuckets)
r.lockWait[lockName] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveLockHold(lockName string, d time.Duration) {
lockName = strings.TrimSpace(lockName)
if lockName == "" {
lockName = "unknown"
}
r.mu.Lock()
h := r.lockHold[lockName]
if h == nil {
h = newHistogram(lockBuckets)
r.lockHold[lockName] = h
}
h.observe(d.Seconds())
r.mu.Unlock()
}
func (r *Registry) ObserveCacheHit(cache string) {
cache = strings.TrimSpace(cache)
if cache == "" {
cache = "unknown"
}
r.mu.Lock()
r.cacheHits[cache]++
r.mu.Unlock()
}
func (r *Registry) ObserveCacheMiss(cache string) {
cache = strings.TrimSpace(cache)
if cache == "" {
cache = "unknown"
}
r.mu.Lock()
r.cacheMisses[cache]++
r.mu.Unlock()
}
func (r *Registry) ObserveBatchSize(size int) {
if size < 0 {
size = 0
}
r.mu.Lock()
r.batchSize.observe(float64(size))
r.mu.Unlock()
}
func (r *Registry) ObserveRetry(op, reason string) {
op = normalizeOp(op)
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "unknown"
}
key := op + "|" + reason
r.mu.Lock()
r.retries[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveError(op, reason string) {
op = normalizeOp(op)
reason = strings.TrimSpace(reason)
if reason == "" {
reason = "unknown"
}
key := op + "|" + reason
r.mu.Lock()
r.errors[key]++
r.mu.Unlock()
}
func (r *Registry) ObserveGC(d time.Duration, deletedChunks, deleteErrors, cleanedUploads int, ok bool) {
result := "error"
if ok {
result = "ok"
}
r.mu.Lock()
r.gcRuns[result]++
r.gcDuration.observe(d.Seconds())
if deletedChunks > 0 {
r.gcDeletedChunks += uint64(deletedChunks)
}
if deleteErrors > 0 {
r.gcDeleteErrors += uint64(deleteErrors)
}
if cleanedUploads > 0 {
r.gcCleanedUpload += uint64(cleanedUploads)
}
r.mu.Unlock()
}
func (r *Registry) RenderPrometheus() string {
now := time.Now()
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
r.mu.Lock()
httpReqRoute := copyCounterMap(r.httpRequestsRoute)
httpRespRoute := copyCounterMap(r.httpResponseBytesRoute)
httpDurRoute := copyHistogramMap(r.httpDurationRoute)
httpReqOp := copyCounterMap(r.httpRequestsOp)
httpDurOp := copyHistogramMap(r.httpDurationOp)
httpInFlightOp := copyIntGaugeMap(r.httpInFlightOp)
authReq := copyCounterMap(r.authRequests)
serviceOps := copyCounterMap(r.serviceOps)
serviceDur := copyHistogramMap(r.serviceDuration)
dbTx := copyCounterMap(r.dbTxTotal)
dbTxDur := copyHistogramMap(r.dbTxDuration)
blobOps := copyCounterMap(r.blobOps)
blobBytes := copyCounterMap(r.blobBytes)
blobDur := copyHistogramMap(r.blobDuration)
lockWait := copyHistogramMap(r.lockWait)
lockHold := copyHistogramMap(r.lockHold)
cacheHits := copyCounterMap(r.cacheHits)
cacheMisses := copyCounterMap(r.cacheMisses)
batchBounds, batchCounts, batchSum, batchCount := r.batchSize.snapshot()
retries := copyCounterMap(r.retries)
errorsTotal := copyCounterMap(r.errors)
gcRuns := copyCounterMap(r.gcRuns)
gcDurBounds, gcDurCounts, gcDurSum, gcDurCount := r.gcDuration.snapshot()
gcDeletedChunks := r.gcDeletedChunks
gcDeleteErrors := r.gcDeleteErrors
gcCleanedUploads := r.gcCleanedUpload
r.mu.Unlock()
connectionActive := float64(r.connectionPoolActive.Load())
connectionMax := float64(r.connectionPoolMax.Load())
connectionWaits := r.connectionPoolWaits.Load()
queueLength := float64(r.requestQueueLength.Load())
resident, hasResident := readResidentMemoryBytes()
cpuSeconds, hasCPU := readProcessCPUSeconds()
var b strings.Builder
httpInFlightOp["all"] = r.httpInFlight.Load()
writeGaugeVecFromInt64(&b, "fs_http_inflight_requests", "Current in-flight HTTP requests by operation.", httpInFlightOp, "op")
writeCounterVecKV(&b, "fs_http_requests_total", "Total HTTP requests by operation and result.", httpReqOp, []string{"op", "result"})
writeHistogramVecKV(&b, "fs_http_request_duration_seconds", "HTTP request latency by operation and result.", httpDurOp, []string{"op", "result"})
writeCounterVecKV(&b, "fs_http_requests_by_route_total", "Total HTTP requests by method/route/status.", httpReqRoute, []string{"method", "route", "status"})
writeCounterVecKV(&b, "fs_http_response_bytes_total", "Total HTTP response bytes written.", httpRespRoute, []string{"method", "route", "status"})
writeHistogramVecKV(&b, "fs_http_request_duration_by_route_seconds", "HTTP request latency by method/route.", httpDurRoute, []string{"method", "route"})
writeCounterVecKV(&b, "fs_auth_requests_total", "Authentication attempts by result.", authReq, []string{"result", "auth_type", "reason"})
writeCounterVecKV(&b, "fs_service_operations_total", "Service-level operation calls.", serviceOps, []string{"operation", "result"})
writeHistogramVecKV(&b, "fs_service_operation_duration_seconds", "Service-level operation latency.", serviceDur, []string{"operation"})
writeCounterVecKV(&b, "fs_metadata_tx_total", "Metadata transaction calls.", dbTx, []string{"type", "result"})
writeHistogramVecKV(&b, "fs_metadata_tx_duration_seconds", "Metadata transaction latency.", dbTxDur, []string{"type"})
writeHistogramVecKV(&b, "fs_blob_operation_duration_seconds", "Blob backend operation latency.", blobDur, []string{"op", "backend", "result"})
writeCounterVecKV(&b, "fs_blob_operations_total", "Blob store operations.", blobOps, []string{"op", "backend", "result"})
writeCounterVecKV(&b, "fs_blob_bytes_total", "Blob bytes processed by operation.", blobBytes, []string{"op"})
writeGauge(&b, "fs_connection_pool_active", "Active pooled connections.", connectionActive)
writeGauge(&b, "fs_connection_pool_max", "Maximum pooled connections.", connectionMax)
writeCounter(&b, "fs_connection_pool_waits_total", "Number of waits due to pool saturation.", connectionWaits)
writeGauge(&b, "fs_request_queue_length", "Requests waiting for an execution slot.", queueLength)
writeHistogramVecKV(&b, "fs_lock_wait_seconds", "Time spent waiting for locks.", lockWait, []string{"lock_name"})
writeHistogramVecKV(&b, "fs_lock_hold_seconds", "Time locks were held.", lockHold, []string{"lock_name"})
writeCounterVecKV(&b, "fs_cache_hits_total", "Cache hits by cache name.", cacheHits, []string{"cache"})
writeCounterVecKV(&b, "fs_cache_misses_total", "Cache misses by cache name.", cacheMisses, []string{"cache"})
writeHistogram(&b, "fs_batch_size_histogram", "Observed batch sizes.", nil, batchBounds, batchCounts, batchSum, batchCount)
writeCounterVecKV(&b, "fs_retries_total", "Retries by operation and reason.", retries, []string{"op", "reason"})
writeCounterVecKV(&b, "fs_errors_total", "Errors by operation and reason.", errorsTotal, []string{"op", "reason"})
writeCounterVecKV(&b, "fs_gc_runs_total", "Garbage collection runs.", gcRuns, []string{"result"})
writeHistogram(&b, "fs_gc_duration_seconds", "Garbage collection runtime.", nil, gcDurBounds, gcDurCounts, gcDurSum, gcDurCount)
writeCounter(&b, "fs_gc_deleted_chunks_total", "Deleted chunks during GC.", gcDeletedChunks)
writeCounter(&b, "fs_gc_delete_errors_total", "Chunk delete errors during GC.", gcDeleteErrors)
writeCounter(&b, "fs_gc_cleaned_uploads_total", "Cleaned multipart uploads during GC.", gcCleanedUploads)
writeGauge(&b, "fs_uptime_seconds", "Process uptime in seconds.", now.Sub(r.startedAt).Seconds())
writeGauge(&b, "fs_runtime_goroutines", "Number of goroutines.", float64(runtime.NumGoroutine()))
writeGaugeVec(&b, "fs_runtime_memory_bytes", "Runtime memory in bytes.", map[string]float64{
"alloc": float64(mem.Alloc),
"total": float64(mem.TotalAlloc),
"sys": float64(mem.Sys),
"heap_alloc": float64(mem.HeapAlloc),
"heap_sys": float64(mem.HeapSys),
"stack_sys": float64(mem.StackSys),
}, "type")
writeCounter(&b, "fs_runtime_gc_cycles_total", "Completed GC cycles.", uint64(mem.NumGC))
writeCounterFloat(&b, "fs_runtime_gc_pause_seconds_total", "Total GC pause time in seconds.", float64(mem.PauseTotalNs)/1e9)
if hasCPU {
writeCounterFloat(&b, "process_cpu_seconds_total", "Total user and system CPU time spent in seconds.", cpuSeconds)
}
if hasResident {
writeGauge(&b, "process_resident_memory_bytes", "Resident memory size in bytes.", resident)
}
return b.String()
}
type histogramSnapshot struct {
bounds []float64
counts []uint64
sum float64
count uint64
}
func copyCounterMap(src map[string]uint64) map[string]uint64 {
out := make(map[string]uint64, len(src))
for k, v := range src {
out[k] = v
}
return out
}
func copyIntGaugeMap(src map[string]int64) map[string]int64 {
out := make(map[string]int64, len(src))
for k, v := range src {
out[k] = v
}
return out
}
func copyHistogramMap(src map[string]*histogram) map[string]histogramSnapshot {
out := make(map[string]histogramSnapshot, len(src))
for k, h := range src {
bounds, counts, sum, count := h.snapshot()
out[k] = histogramSnapshot{bounds: bounds, counts: counts, sum: sum, count: count}
}
return out
}
func writeCounter(b *strings.Builder, name, help string, value uint64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
fmt.Fprintf(b, "%s %d\n", name, value)
}
func writeCounterFloat(b *strings.Builder, name, help string, value float64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
fmt.Fprintf(b, "%s %.9f\n", name, value)
}
func writeGauge(b *strings.Builder, name, help string, value float64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
fmt.Fprintf(b, "%s %.9f\n", name, value)
}
func writeGaugeVec(b *strings.Builder, name, help string, values map[string]float64, labelName string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Fprintf(b, "%s{%s=\"%s\"} %.9f\n", name, labelName, escapeLabelValue(key), values[key])
}
}
func writeGaugeVecFromInt64(b *strings.Builder, name, help string, values map[string]int64, labelName string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s gauge\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Fprintf(b, "%s{%s=\"%s\"} %.9f\n", name, labelName, escapeLabelValue(key), float64(values[key]))
}
}
func writeCounterVecKV(b *strings.Builder, name, help string, values map[string]uint64, labels []string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s counter\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
parts := strings.Split(key, "|")
fmt.Fprintf(b, "%s{%s} %d\n", name, formatLabels(labels, parts), values[key])
}
}
func writeHistogramVecKV(b *strings.Builder, name, help string, values map[string]histogramSnapshot, labels []string) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s histogram\n", name)
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
parts := strings.Split(key, "|")
labelsMap := make(map[string]string, len(labels))
for i, label := range labels {
if i < len(parts) {
labelsMap[label] = parts[i]
} else {
labelsMap[label] = ""
}
}
writeHistogramWithLabelsMap(b, name, labelsMap, values[key])
}
}
func writeHistogram(b *strings.Builder, name, help string, labels map[string]string, bounds []float64, counts []uint64, sum float64, count uint64) {
fmt.Fprintf(b, "# HELP %s %s\n", name, help)
fmt.Fprintf(b, "# TYPE %s histogram\n", name)
writeHistogramWithLabelsMap(b, name, labels, histogramSnapshot{bounds: bounds, counts: counts, sum: sum, count: count})
}
func writeHistogramWithLabelsMap(b *strings.Builder, name string, labels map[string]string, s histogramSnapshot) {
var cumulative uint64
for i, bucketCount := range s.counts {
cumulative += bucketCount
bucketLabels := cloneLabels(labels)
if i < len(s.bounds) {
bucketLabels["le"] = trimFloat(s.bounds[i])
} else {
bucketLabels["le"] = "+Inf"
}
fmt.Fprintf(b, "%s_bucket{%s} %d\n", name, labelsToString(bucketLabels), cumulative)
}
labelsSuffix := formatLabelsSuffix(labels)
fmt.Fprintf(b, "%s_sum%s %.9f\n", name, labelsSuffix, s.sum)
fmt.Fprintf(b, "%s_count%s %d\n", name, labelsSuffix, s.count)
}
func formatLabelsSuffix(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
return "{" + labelsToString(labels) + "}"
}
func formatLabels(keys, values []string) string {
parts := make([]string, 0, len(keys))
for i, key := range keys {
value := ""
if i < len(values) {
value = values[i]
}
parts = append(parts, fmt.Sprintf("%s=\"%s\"", key, escapeLabelValue(value)))
}
return strings.Join(parts, ",")
}
func labelsToString(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
keys := make([]string, 0, len(labels))
for k := range labels {
keys = append(keys, k)
}
sort.Strings(keys)
parts := make([]string, 0, len(keys))
for _, key := range keys {
parts = append(parts, fmt.Sprintf("%s=\"%s\"", key, escapeLabelValue(labels[key])))
}
return strings.Join(parts, ",")
}
func cloneLabels(in map[string]string) map[string]string {
if len(in) == 0 {
return map[string]string{}
}
out := make(map[string]string, len(in)+1)
for k, v := range in {
out[k] = v
}
return out
}
func trimFloat(v float64) string {
return strconv.FormatFloat(v, 'f', -1, 64)
}
func escapeLabelValue(value string) string {
value = strings.ReplaceAll(value, `\`, `\\`)
value = strings.ReplaceAll(value, "\n", `\n`)
value = strings.ReplaceAll(value, `"`, `\"`)
return value
}
func readResidentMemoryBytes() (float64, bool) {
data, err := os.ReadFile("/proc/self/statm")
if err != nil {
return 0, false
}
fields := strings.Fields(string(data))
if len(fields) < 2 {
return 0, false
}
rssPages, err := strconv.ParseInt(fields[1], 10, 64)
if err != nil || rssPages < 0 {
return 0, false
}
return float64(rssPages * int64(os.Getpagesize())), true
}
func readProcessCPUSeconds() (float64, bool) {
var usage syscall.Rusage
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
return 0, false
}
user := float64(usage.Utime.Sec) + float64(usage.Utime.Usec)/1e6
sys := float64(usage.Stime.Sec) + float64(usage.Stime.Usec)/1e6
return user + sys, true
}

34
metrics/metrics_test.go Normal file
View File

@@ -0,0 +1,34 @@
package metrics
import (
"strings"
"testing"
)
func TestRenderPrometheusHistogramNoEmptyLabelSet(t *testing.T) {
reg := NewRegistry()
reg.ObserveBatchSize(3)
reg.ObserveGC(0, 0, 0, 0, true)
out := reg.RenderPrometheus()
if strings.Contains(out, "fs_batch_size_histogram_sum{}") {
t.Fatalf("unexpected empty label set for batch sum metric")
}
if strings.Contains(out, "fs_batch_size_histogram_count{}") {
t.Fatalf("unexpected empty label set for batch count metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_sum{}") {
t.Fatalf("unexpected empty label set for gc sum metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_count{}") {
t.Fatalf("unexpected empty label set for gc count metric")
}
}
func TestEscapeLabelValueEscapesSingleBackslash(t *testing.T) {
got := escapeLabelValue(`a\b`)
want := `a\\b`
if got != want {
t.Fatalf("escapeLabelValue returned %q, want %q", got, want)
}
}

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"fs/metadata" "fs/metadata"
"fs/metrics"
"fs/models" "fs/models"
"fs/storage" "fs/storage"
"io" "io"
@@ -41,9 +42,37 @@ func NewObjectService(metadataHandler *metadata.MetadataHandler, blobHandler *st
} }
} }
func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Reader) (*models.ObjectManifest, error) { func (s *ObjectService) acquireGCRLock() func() {
waitStart := time.Now()
s.gcMu.RLock() s.gcMu.RLock()
defer s.gcMu.RUnlock() metrics.Default.ObserveLockWait("gc_mu_read", time.Since(waitStart))
holdStart := time.Now()
return func() {
metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
s.gcMu.RUnlock()
}
}
func (s *ObjectService) acquireGCLock() func() {
waitStart := time.Now()
s.gcMu.Lock()
metrics.Default.ObserveLockWait("gc_mu_write", time.Since(waitStart))
holdStart := time.Now()
return func() {
metrics.Default.ObserveLockHold("gc_mu_write", time.Since(holdStart))
s.gcMu.Unlock()
}
}
func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Reader) (*models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("put_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
chunks, size, etag, err := s.blob.IngestStream(input) chunks, size, etag, err := s.blob.IngestStream(input)
if err != nil { if err != nil {
@@ -71,110 +100,171 @@ func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Read
return nil, err return nil, err
} }
success = true
return manifest, nil return manifest, nil
} }
func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) { func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) {
start := time.Now()
waitStart := time.Now()
s.gcMu.RLock() s.gcMu.RLock()
metrics.Default.ObserveLockWait("gc_mu_read", time.Since(waitStart))
holdStart := time.Now()
manifest, err := s.metadata.GetManifest(bucket, key) manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil { if err != nil {
metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
s.gcMu.RUnlock() s.gcMu.RUnlock()
metrics.Default.ObserveService("get_object", time.Since(start), false)
return nil, nil, err return nil, nil, err
} }
pr, pw := io.Pipe() pr, pw := io.Pipe()
go func() { go func() {
streamOK := false
defer func() {
metrics.Default.ObserveService("get_object", time.Since(start), streamOK)
}()
defer metrics.Default.ObserveLockHold("gc_mu_read", time.Since(holdStart))
defer s.gcMu.RUnlock() defer s.gcMu.RUnlock()
if err := s.blob.AssembleStream(manifest.Chunks, pw); err != nil { if err := s.blob.AssembleStream(manifest.Chunks, pw); err != nil {
_ = pw.CloseWithError(err) _ = pw.CloseWithError(err)
return return
} }
_ = pw.Close() if err := pw.Close(); err != nil {
return
}
streamOK = true
}() }()
return pr, manifest, nil return pr, manifest, nil
} }
func (s *ObjectService) HeadObject(bucket, key string) (models.ObjectManifest, error) { func (s *ObjectService) HeadObject(bucket, key string) (models.ObjectManifest, error) {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
defer func() {
metrics.Default.ObserveService("head_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
manifest, err := s.metadata.GetManifest(bucket, key) manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil { if err != nil {
return models.ObjectManifest{}, err return models.ObjectManifest{}, err
} }
success = true
return *manifest, nil return *manifest, nil
} }
func (s *ObjectService) DeleteObject(bucket, key string) error { func (s *ObjectService) DeleteObject(bucket, key string) error {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
return s.metadata.DeleteManifest(bucket, key) defer func() {
metrics.Default.ObserveService("delete_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.DeleteManifest(bucket, key)
success = err == nil
return err
} }
func (s *ObjectService) ListObjects(bucket, prefix string) ([]*models.ObjectManifest, error) { func (s *ObjectService) ListObjects(bucket, prefix string) ([]*models.ObjectManifest, error) {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
return s.metadata.ListObjects(bucket, prefix) return s.metadata.ListObjects(bucket, prefix)
} }
func (s *ObjectService) ForEachObjectFrom(bucket, startKey string, fn func(*models.ObjectManifest) error) error { func (s *ObjectService) ForEachObjectFrom(bucket, startKey string, fn func(*models.ObjectManifest) error) error {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
defer func() {
metrics.Default.ObserveService("for_each_object_from", time.Since(start), success)
}()
return s.metadata.ForEachObjectFrom(bucket, startKey, fn) unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.ForEachObjectFrom(bucket, startKey, fn)
success = err == nil
return err
} }
func (s *ObjectService) CreateBucket(bucket string) error { func (s *ObjectService) CreateBucket(bucket string) error {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
return s.metadata.CreateBucket(bucket) defer func() {
metrics.Default.ObserveService("create_bucket", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
err := s.metadata.CreateBucket(bucket)
success = err == nil
return err
} }
func (s *ObjectService) HeadBucket(bucket string) error { func (s *ObjectService) HeadBucket(bucket string) error {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
_, err := s.metadata.GetBucketManifest(bucket) _, err := s.metadata.GetBucketManifest(bucket)
return err return err
} }
func (s *ObjectService) GetBucketManifest(bucket string) (*models.BucketManifest, error) { func (s *ObjectService) GetBucketManifest(bucket string) (*models.BucketManifest, error) {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
return s.metadata.GetBucketManifest(bucket) return s.metadata.GetBucketManifest(bucket)
} }
func (s *ObjectService) DeleteBucket(bucket string) error { func (s *ObjectService) DeleteBucket(bucket string) error {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
return s.metadata.DeleteBucket(bucket) return s.metadata.DeleteBucket(bucket)
} }
func (s *ObjectService) ListBuckets() ([]string, error) { func (s *ObjectService) ListBuckets() ([]string, error) {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
defer func() {
metrics.Default.ObserveService("list_buckets", time.Since(start), success)
}()
return s.metadata.ListBuckets() unlock := s.acquireGCRLock()
defer unlock()
buckets, err := s.metadata.ListBuckets()
success = err == nil
return buckets, err
} }
func (s *ObjectService) DeleteObjects(bucket string, keys []string) ([]string, error) { func (s *ObjectService) DeleteObjects(bucket string, keys []string) ([]string, error) {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
return s.metadata.DeleteManifests(bucket, keys) return s.metadata.DeleteManifests(bucket, keys)
} }
func (s *ObjectService) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) { func (s *ObjectService) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
return s.metadata.CreateMultipartUpload(bucket, key) return s.metadata.CreateMultipartUpload(bucket, key)
} }
func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int, input io.Reader) (string, error) { func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int, input io.Reader) (string, error) {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
defer func() {
metrics.Default.ObserveService("upload_part", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
if partNumber < 1 || partNumber > 10000 { if partNumber < 1 || partNumber > 10000 {
return "", ErrInvalidPart return "", ErrInvalidPart
@@ -204,12 +294,13 @@ func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int,
if err != nil { if err != nil {
return "", err return "", err
} }
success = true
return etag, nil return etag, nil
} }
func (s *ObjectService) ListMultipartParts(bucket, key, uploadID string) ([]models.UploadedPart, error) { func (s *ObjectService) ListMultipartParts(bucket, key, uploadID string) ([]models.UploadedPart, error) {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
upload, err := s.metadata.GetMultipartUpload(uploadID) upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil { if err != nil {
@@ -222,8 +313,14 @@ func (s *ObjectService) ListMultipartParts(bucket, key, uploadID string) ([]mode
} }
func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, completed []models.CompletedPart) (*models.ObjectManifest, error) { func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, completed []models.CompletedPart) (*models.ObjectManifest, error) {
s.gcMu.RLock() start := time.Now()
defer s.gcMu.RUnlock() success := false
defer func() {
metrics.Default.ObserveService("complete_multipart_upload", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
if len(completed) == 0 { if len(completed) == 0 {
return nil, ErrInvalidCompleteRequest return nil, ErrInvalidCompleteRequest
@@ -288,12 +385,13 @@ func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, co
return nil, err return nil, err
} }
success = true
return manifest, nil return manifest, nil
} }
func (s *ObjectService) AbortMultipartUpload(bucket, key, uploadID string) error { func (s *ObjectService) AbortMultipartUpload(bucket, key, uploadID string) error {
s.gcMu.RLock() unlock := s.acquireGCRLock()
defer s.gcMu.RUnlock() defer unlock()
upload, err := s.metadata.GetMultipartUpload(uploadID) upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil { if err != nil {
@@ -327,8 +425,17 @@ func (s *ObjectService) Close() error {
} }
func (s *ObjectService) GarbageCollect() error { func (s *ObjectService) GarbageCollect() error {
s.gcMu.Lock() start := time.Now()
defer s.gcMu.Unlock() success := false
deletedChunks := 0
deleteErrors := 0
cleanedUploads := 0
defer func() {
metrics.Default.ObserveGC(time.Since(start), deletedChunks, deleteErrors, cleanedUploads, success)
}()
unlock := s.acquireGCLock()
defer unlock()
referencedChunkSet, err := s.metadata.GetReferencedChunkSet() referencedChunkSet, err := s.metadata.GetReferencedChunkSet()
if err != nil { if err != nil {
@@ -336,9 +443,6 @@ func (s *ObjectService) GarbageCollect() error {
} }
totalChunks := 0 totalChunks := 0
deletedChunks := 0
deleteErrors := 0
cleanedUploads := 0
if err := s.blob.ForEachChunk(func(chunkID string) error { if err := s.blob.ForEachChunk(func(chunkID string) error {
totalChunks++ totalChunks++
@@ -368,6 +472,7 @@ func (s *ObjectService) GarbageCollect() error {
"delete_errors", deleteErrors, "delete_errors", deleteErrors,
"cleaned_uploads", cleanedUploads, "cleaned_uploads", cleanedUploads,
) )
success = true
return nil return nil
} }

View File

@@ -6,10 +6,12 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"fs/metrics"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
) )
const blobRoot = "blobs" const blobRoot = "blobs"
@@ -37,11 +39,16 @@ func NewBlobStore(root string, chunkSize int) (*BlobStore, error) {
} }
func (bs *BlobStore) IngestStream(stream io.Reader) ([]string, int64, string, error) { func (bs *BlobStore) IngestStream(stream io.Reader) ([]string, int64, string, error) {
start := time.Now()
fullFileHasher := md5.New() fullFileHasher := md5.New()
buffer := make([]byte, bs.chunkSize) buffer := make([]byte, bs.chunkSize)
var totalSize int64 var totalSize int64
var chunkIDs []string var chunkIDs []string
success := false
defer func() {
metrics.Default.ObserveBlob("ingest_stream", time.Since(start), 0, success)
}()
for { for {
bytesRead, err := io.ReadFull(stream, buffer) bytesRead, err := io.ReadFull(stream, buffer)
@@ -74,10 +81,18 @@ func (bs *BlobStore) IngestStream(stream io.Reader) ([]string, int64, string, er
} }
etag := hex.EncodeToString(fullFileHasher.Sum(nil)) etag := hex.EncodeToString(fullFileHasher.Sum(nil))
success = true
return chunkIDs, totalSize, etag, nil return chunkIDs, totalSize, etag, nil
} }
func (bs *BlobStore) saveBlob(chunkID string, data []byte) error { func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
start := time.Now()
success := false
writtenBytes := int64(0)
defer func() {
metrics.Default.ObserveBlob("write_chunk", time.Since(start), writtenBytes, success)
}()
if !isValidChunkID(chunkID) { if !isValidChunkID(chunkID) {
return fmt.Errorf("invalid chunk id: %q", chunkID) return fmt.Errorf("invalid chunk id: %q", chunkID)
} }
@@ -88,6 +103,7 @@ func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
fullPath := filepath.Join(dir, chunkID) fullPath := filepath.Join(dir, chunkID)
if _, err := os.Stat(fullPath); err == nil { if _, err := os.Stat(fullPath); err == nil {
success = true
return nil return nil
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
return err return err
@@ -119,6 +135,7 @@ func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
if err := os.Rename(tmpPath, fullPath); err != nil { if err := os.Rename(tmpPath, fullPath); err != nil {
if _, statErr := os.Stat(fullPath); statErr == nil { if _, statErr := os.Stat(fullPath); statErr == nil {
success = true
return nil return nil
} }
return err return err
@@ -128,10 +145,18 @@ func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
if err := syncDir(dir); err != nil { if err := syncDir(dir); err != nil {
return err return err
} }
writtenBytes = int64(len(data))
success = true
return nil return nil
} }
func (bs *BlobStore) AssembleStream(chunkIDs []string, w *io.PipeWriter) error { func (bs *BlobStore) AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveBlob("assemble_stream", time.Since(start), 0, success)
}()
for _, chunkID := range chunkIDs { for _, chunkID := range chunkIDs {
chunkData, err := bs.GetBlob(chunkID) chunkData, err := bs.GetBlob(chunkID)
if err != nil { if err != nil {
@@ -141,14 +166,28 @@ func (bs *BlobStore) AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
return err return err
} }
} }
success = true
return nil return nil
} }
func (bs *BlobStore) GetBlob(chunkID string) ([]byte, error) { func (bs *BlobStore) GetBlob(chunkID string) ([]byte, error) {
start := time.Now()
success := false
var size int64
defer func() {
metrics.Default.ObserveBlob("read_chunk", time.Since(start), size, success)
}()
if !isValidChunkID(chunkID) { if !isValidChunkID(chunkID) {
return nil, fmt.Errorf("invalid chunk id: %q", chunkID) return nil, fmt.Errorf("invalid chunk id: %q", chunkID)
} }
return os.ReadFile(filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4], chunkID)) data, err := os.ReadFile(filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4], chunkID))
if err != nil {
return nil, err
}
size = int64(len(data))
success = true
return data, nil
} }
func (bs *BlobStore) DeleteBlob(chunkID string) error { func (bs *BlobStore) DeleteBlob(chunkID string) error {

View File

@@ -29,6 +29,7 @@ type Config struct {
AuthBootstrapAccessKey string AuthBootstrapAccessKey string
AuthBootstrapSecretKey string AuthBootstrapSecretKey string
AuthBootstrapPolicy string AuthBootstrapPolicy string
AdminAPIEnabled bool
} }
func NewConfig() *Config { func NewConfig() *Config {
@@ -37,7 +38,7 @@ func NewConfig() *Config {
config := &Config{ config := &Config{
DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")), DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")),
Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"), Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"),
Port: envIntRange("PORT", 3000, 1, 65535), Port: envIntRange("PORT", 2600, 1, 65535),
ChunkSize: envIntRange("CHUNK_SIZE", 8192000, 1, 64*1024*1024), ChunkSize: envIntRange("CHUNK_SIZE", 8192000, 1, 64*1024*1024),
LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")), LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")),
LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")), LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")),
@@ -47,14 +48,15 @@ func NewConfig() *Config {
MultipartCleanupRetention: time.Duration( MultipartCleanupRetention: time.Duration(
envIntRange("MULTIPART_RETENTION_HOURS", 24, 1, 24*30), envIntRange("MULTIPART_RETENTION_HOURS", 24, 1, 24*30),
) * time.Hour, ) * time.Hour,
AuthEnabled: envBool("AUTH_ENABLED", true), AuthEnabled: envBool("FS_AUTH_ENABLED", false),
AuthRegion: firstNonEmpty(strings.TrimSpace(os.Getenv("AUTH_REGION")), "us-east-1"), AuthRegion: firstNonEmpty(strings.TrimSpace(os.Getenv("FS_AUTH_REGION")), "us-east-1"),
AuthSkew: time.Duration(envIntRange("AUTH_SKEW_SECONDS", 300, 30, 3600)) * time.Second, AuthSkew: time.Duration(envIntRange("FS_AUTH_CLOCK_SKEW_SECONDS", 300, 30, 3600)) * time.Second,
AuthMaxPresign: time.Duration(envIntRange("AUTH_MAX_PRESIGN_SECONDS", 86400, 60, 86400)) * time.Second, AuthMaxPresign: time.Duration(envIntRange("FS_AUTH_MAX_PRESIGN_SECONDS", 86400, 60, 86400)) * time.Second,
AuthMasterKey: strings.TrimSpace(os.Getenv("AUTH_MASTER_KEY")), AuthMasterKey: strings.TrimSpace(os.Getenv("FS_MASTER_KEY")),
AuthBootstrapAccessKey: strings.TrimSpace(os.Getenv("AUTH_BOOTSTRAP_ACCESS_KEY")), AuthBootstrapAccessKey: strings.TrimSpace(os.Getenv("FS_ROOT_USER")),
AuthBootstrapSecretKey: strings.TrimSpace(os.Getenv("AUTH_BOOTSTRAP_SECRET_KEY")), AuthBootstrapSecretKey: strings.TrimSpace(os.Getenv("FS_ROOT_PASSWORD")),
AuthBootstrapPolicy: strings.TrimSpace(os.Getenv("AUTH_BOOTSTRAP_POLICY")), AuthBootstrapPolicy: strings.TrimSpace(os.Getenv("FS_ROOT_POLICY_JSON")),
AdminAPIEnabled: envBool("ADMIN_API_ENABLED", true),
} }
if config.LogFormat != "json" && config.LogFormat != "text" { if config.LogFormat != "json" && config.LogFormat != "text" {