14 Commits

Author SHA1 Message Date
eac20f7fda Merge pull request #10 from ferdzo/hotfix/copy-object-fix-url-encoding-fix
HOTFIX: Copy object and key extraction improvement
2026-03-13 01:32:10 +01:00
9bfdceca08 HOTFIX: Copy object and encoding fixed 2026-03-13 01:29:29 +01:00
6473726a45 Merge pull request #9 from ferdzo/hotfix/streaming-error-and-equalsign-in-sigv4
HOTFIX: Fixed chunked stream problems with size and equal sign encoding
2026-03-13 00:27:57 +01:00
115d825234 HOTFIX: Fixed streaming problems with size and equal sign encoding 2026-03-13 00:25:48 +01:00
237063d9fc Merge pull request #8 from ferdzo/fix/bucket-creation-status201-to-200ok
Changed 201 Created to 200 OK when creating bucket
2026-03-12 00:29:05 +01:00
c2215d8589 Changed 201 Created to 200 OK when creating bucket 2026-03-12 00:28:01 +01:00
82cb58dff1 README.md edit 2026-03-11 23:39:25 +01:00
b592d6a2f0 Merge pull request #7 from ferdzo/feat/cli
Fs CLI
2026-03-11 20:27:22 +01:00
ef12326975 Copilot suggestions fixed 2026-03-11 20:26:17 +01:00
a23577d531 Update cmd/admin_snapshot.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-11 20:19:50 +01:00
e8256d66e3 Added Github Action for build and release. 2026-03-11 20:18:24 +01:00
ad53a6d8ac Backup/restore option for cli 2026-03-11 20:12:00 +01:00
cfb9b591ac Policy example documentation 2026-03-11 00:50:09 +01:00
b27f1186cf Remove Role command. 2026-03-11 00:47:24 +01:00
16 changed files with 1488 additions and 31 deletions

24
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: CI
on:
push:
branches: ["main"]
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run tests
run: |
export GOCACHE=/tmp/go-build-cache
go test ./...

66
.github/workflows/release-image.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Release Image
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
permissions:
contents: read
packages: write
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set build date
id: date
run: echo "value=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$GITHUB_OUTPUT"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
labels: |
org.opencontainers.image.title=fs
org.opencontainers.image.description=Lightweight S3-compatible object storage
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.created=${{ steps.date.outputs.value }}
- name: Build and push image
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ github.ref_name }}
COMMIT=${{ github.sha }}
DATE=${{ steps.date.outputs.value }}

View File

@@ -6,7 +6,13 @@ COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/fs .
ARG VERSION=dev
ARG COMMIT=none
ARG DATE=unknown
RUN CGO_ENABLED=0 GOOS=linux go build \
-trimpath \
-ldflags "-s -w -X main.version=${VERSION} -X main.commit=${COMMIT} -X main.date=${DATE}" \
-o /app/fs .
FROM alpine:3.23 AS runner

View File

@@ -49,6 +49,61 @@ Admin API (JSON):
- `PUT /_admin/v1/users/{accessKeyId}/status`
- `DELETE /_admin/v1/users/{accessKeyId}`
Admin API policy examples (SigV4):
```bash
ENDPOINT="http://localhost:2600"
REGION="us-east-1"
ADMIN_ACCESS_KEY="${FS_ROOT_USER}"
ADMIN_SECRET_KEY="${FS_ROOT_PASSWORD}"
SIGV4="aws:amz:${REGION}:s3"
```
Replace user policy with one scoped statement:
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "backup",
"prefix": "restic/*"
}
]
}
}'
```
Set multiple statements (for multiple buckets):
```bash
curl --aws-sigv4 "$SIGV4" \
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
-H "Content-Type: application/json" \
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
-d '{
"policy": {
"statements": [
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject"],
"bucket": "test-bucket",
"prefix": "*"
},
{
"effect": "allow",
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"bucket": "test-bucket-2",
"prefix": "*"
}
]
}
}'
```
Admin CLI:
- `fs admin user create --access-key backup-user --role readwrite`
- `fs admin user list`
@@ -56,8 +111,12 @@ Admin CLI:
- `fs admin user set-status backup-user --status disabled`
- `fs admin user set-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role readwrite --bucket backups-2` (appends another statement)
- `fs admin user remove-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
- `fs admin user set-role backup-user --role admin --replace` (replaces all statements)
- `fs admin user delete backup-user`
- `fs admin snapshot create --data-path /var/lib/fs --out /backup/fs-20260311.tar.gz`
- `fs admin snapshot inspect --file /backup/fs-20260311.tar.gz`
- `fs admin snapshot restore --file /backup/fs-20260311.tar.gz --data-path /var/lib/fs --force`
- `fs admin diag health`
- `fs admin diag version`
@@ -80,9 +139,12 @@ CLI credential/env resolution for `fs admin`:
- `FS_ROOT_USER` / `FS_ROOT_PASSWORD` (same defaults as server bootstrap)
- `FSCLI_ACCESS_KEY` / `FSCLI_SECRET_KEY`
- `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY`
- `FSCLI_ENDPOINT` (fallback to `ADDRESS` + `PORT`, then `http://localhost:3000`)
- `FSCLI_ENDPOINT` (fallback to `ADDRESS` + `PORT`, then `http://localhost:2600`)
- `FSCLI_REGION` (fallback `FS_AUTH_REGION`, default `us-east-1`)
Note:
- `fs admin snapshot ...` commands operate locally on filesystem paths and do not require endpoint or auth credentials.
Health:
- `GET /healthz`
- `HEAD /healthz`

View File

@@ -2,6 +2,7 @@ package api
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/xml"
@@ -138,11 +139,83 @@ func validateObjectKey(key string) *s3APIError {
return nil
}
func objectKeyFromRequest(r *http.Request) (string, *s3APIError) {
rawKey := rawObjectKeyFromRequest(r)
key, err := normalizeObjectKey(rawKey)
if err != nil {
apiErr := s3ErrInvalidArgument
return "", &apiErr
}
if apiErr := validateObjectKey(key); apiErr != nil {
return "", apiErr
}
return key, nil
}
func rawObjectKeyFromRequest(r *http.Request) string {
if r == nil || r.URL == nil {
return ""
}
bucket := chi.URLParam(r, "bucket")
if bucket == "" {
return chi.URLParam(r, "*")
}
escapedPath := r.URL.EscapedPath()
prefix := "/" + bucket + "/"
if strings.HasPrefix(escapedPath, prefix) {
return strings.TrimPrefix(escapedPath, prefix)
}
return chi.URLParam(r, "*")
}
func normalizeObjectKey(raw string) (string, error) {
if raw == "" {
return "", nil
}
return url.PathUnescape(raw)
}
func parseCopySource(raw string) (string, string, error) {
raw = strings.TrimSpace(raw)
raw = strings.TrimPrefix(raw, "/")
if idx := strings.IndexByte(raw, '?'); idx >= 0 {
raw = raw[:idx]
}
bucket, rawKey, found := strings.Cut(raw, "/")
if !found || strings.TrimSpace(bucket) == "" || rawKey == "" {
return "", "", errors.New("invalid copy source")
}
key, err := normalizeObjectKey(rawKey)
if err != nil {
return "", "", err
}
if apiErr := validateObjectKey(key); apiErr != nil {
return "", "", errors.New(apiErr.Code)
}
return bucket, key, nil
}
func (h *Handler) authorizeCopySource(r *http.Request, bucket, key string) error {
if h.authSvc == nil || !h.authSvc.Config().Enabled {
return nil
}
authCtx, ok := auth.GetRequestContext(r.Context())
if !ok || !authCtx.Authenticated {
return auth.ErrAccessDenied
}
return h.authSvc.Authorize(authCtx.AccessKeyID, auth.RequestTarget{
Action: auth.ActionGetObject,
Bucket: bucket,
Key: key,
})
}
func (h *Handler) handleGetObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*")
if apiErr := validateObjectKey(key); apiErr != nil {
key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path)
return
}
@@ -199,8 +272,8 @@ func (h *Handler) handleGetObject(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*")
if apiErr := validateObjectKey(key); apiErr != nil {
key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path)
return
}
@@ -275,8 +348,8 @@ func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*")
if apiErr := validateObjectKey(key); apiErr != nil {
key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path)
return
}
@@ -289,6 +362,10 @@ func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
writeS3Error(w, r, s3ErrInvalidPart, r.URL.Path)
return
}
if strings.TrimSpace(r.Header.Get("x-amz-copy-source")) != "" {
writeS3Error(w, r, s3ErrNotImplemented, r.URL.Path)
return
}
partNumber, err := strconv.Atoi(partNumberRaw)
if err != nil {
@@ -333,6 +410,42 @@ func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
}
}
if copySourceRaw := strings.TrimSpace(r.Header.Get("x-amz-copy-source")); copySourceRaw != "" {
srcBucket, srcKey, err := parseCopySource(copySourceRaw)
if err != nil {
writeS3Error(w, r, s3ErrInvalidArgument, r.URL.Path)
return
}
if err := h.authorizeCopySource(r, srcBucket, srcKey); err != nil {
writeMappedS3Error(w, r, err)
return
}
manifest, err := h.svc.CopyObject(srcBucket, srcKey, bucket, key)
if err != nil {
writeMappedS3Error(w, r, err)
return
}
response := models.CopyObjectResult{
Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
LastModified: time.Unix(manifest.CreatedAt, 0).UTC().Format("2006-01-02T15:04:05.000Z"),
ETag: `"` + manifest.ETag + `"`,
}
payload, err := xml.MarshalIndent(response, "", " ")
if err != nil {
writeMappedS3Error(w, r, err)
return
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.Header().Set("ETag", `"`+manifest.ETag+`"`)
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(xml.Header))
_, _ = w.Write(payload)
return
}
contentType := r.Header.Get("Content-Type")
if contentType == "" {
contentType = "application/octet-stream"
@@ -400,13 +513,21 @@ func shouldDecodeAWSChunkedPayload(r *http.Request) bool {
return true
}
signingMode := strings.ToLower(r.Header.Get("x-amz-content-sha256"))
return strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload")
if strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload") {
return true
}
return strings.HasPrefix(signingMode, "streaming-unsigned-payload")
}
func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
probedReader, isAWSChunked := probeAWSChunkedPayload(src)
if !isAWSChunked {
return io.NopCloser(probedReader)
}
pr, pw := io.Pipe()
go func() {
if err := decodeAWSChunkedPayload(src, pw); err != nil {
if err := decodeAWSChunkedPayload(probedReader, pw); err != nil {
_ = pw.CloseWithError(err)
return
}
@@ -415,6 +536,30 @@ func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
return pr
}
func probeAWSChunkedPayload(src io.Reader) (io.Reader, bool) {
reader := bufio.NewReaderSize(src, 512)
headerLine, err := reader.ReadSlice('\n')
replay := io.MultiReader(bytes.NewReader(headerLine), reader)
if err != nil {
return replay, false
}
line := strings.TrimRight(string(headerLine), "\r\n")
chunkSizeToken := line
if idx := strings.IndexByte(chunkSizeToken, ';'); idx >= 0 {
chunkSizeToken = chunkSizeToken[:idx]
}
chunkSizeToken = strings.TrimSpace(chunkSizeToken)
if chunkSizeToken == "" {
return replay, false
}
size, parseErr := strconv.ParseInt(chunkSizeToken, 16, 64)
if parseErr != nil || size < 0 {
return replay, false
}
return replay, true
}
func decodeAWSChunkedPayload(src io.Reader, dst io.Writer) error {
reader := bufio.NewReader(src)
for {
@@ -488,7 +633,7 @@ func (h *Handler) handlePutBucket(w http.ResponseWriter, r *http.Request) {
writeMappedS3Error(w, r, err)
return
}
w.WriteHeader(http.StatusCreated)
w.WriteHeader(http.StatusOK)
}
func (h *Handler) handleDeleteBucket(w http.ResponseWriter, r *http.Request) {
@@ -584,8 +729,8 @@ func (h *Handler) handlePostBucket(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handleDeleteObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*")
if apiErr := validateObjectKey(key); apiErr != nil {
key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path)
return
}
@@ -621,8 +766,8 @@ func (h *Handler) handleHeadBucket(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handleHeadObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*")
if apiErr := validateObjectKey(key); apiErr != nil {
key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path)
return
}

View File

@@ -2,7 +2,6 @@ package app
import (
"context"
"fmt"
"fs/api"
"fs/auth"
"fs/logging"
@@ -85,7 +84,7 @@ func RunServer(ctx context.Context) error {
if err := handler.Start(ctx, addr); err != nil {
logger.Error("server_stopped_with_error", "error", err)
return fmt.Errorf("server start failed: %w", err)
return err
}
return nil
}

View File

@@ -137,14 +137,6 @@ Each audit entry includes method, path, remote IP, and request ID (if present).
## Current Scope / Limitations
- No STS/session-token auth yet.
- No admin API for managing multiple users yet.
- Policy language is intentionally minimal, not full IAM.
- No automatic key rotation workflows.
## Practical Next Step
To support multiple users cleanly, add admin operations in auth service + API:
- create user
- rotate secret
- set policy
- disable/enable
- delete user
- No key rotation endpoint for existing users yet.

View File

@@ -205,6 +205,29 @@ func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
}, nil
}
func (s *Service) Authorize(accessKeyID string, target RequestTarget) error {
if !s.cfg.Enabled {
return nil
}
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return ErrAccessDenied
}
if target.Action == "" {
return ErrAccessDenied
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
return ErrAccessDenied
}
if !isAllowed(policy, target) {
return ErrAccessDenied
}
return nil
}
func (s *Service) CreateUser(input CreateUserInput) (*CreateUserResult, error) {
if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled

View File

@@ -259,7 +259,57 @@ func canonicalPath(u *url.URL) string {
if path == "" {
return "/"
}
return path
return awsEncodePath(path)
}
func awsEncodePath(path string) string {
var b strings.Builder
b.Grow(len(path))
for i := 0; i < len(path); i++ {
ch := path[i]
if ch == '/' || isUnreserved(ch) {
b.WriteByte(ch)
continue
}
if ch == '%' && i+2 < len(path) && isHex(path[i+1]) && isHex(path[i+2]) {
b.WriteByte('%')
b.WriteByte(toUpperHex(path[i+1]))
b.WriteByte(toUpperHex(path[i+2]))
i += 2
continue
}
b.WriteByte('%')
b.WriteByte(hexUpper(ch >> 4))
b.WriteByte(hexUpper(ch & 0x0F))
}
return b.String()
}
func isUnreserved(ch byte) bool {
return (ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '_' || ch == '.' || ch == '~'
}
func isHex(ch byte) bool {
return (ch >= '0' && ch <= '9') ||
(ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F')
}
func toUpperHex(ch byte) byte {
if ch >= 'a' && ch <= 'f' {
return ch - ('a' - 'A')
}
return ch
}
func hexUpper(nibble byte) byte {
if nibble < 10 {
return '0' + nibble
}
return 'A' + (nibble - 10)
}
type queryPair struct {

View File

@@ -15,7 +15,7 @@ import (
)
const (
defaultAdminEndpoint = "http://localhost:3000"
defaultAdminEndpoint = "http://localhost:2600"
defaultAdminRegion = "us-east-1"
)
@@ -48,6 +48,7 @@ func newAdminCommand(build BuildInfo) *cobra.Command {
cmd.AddCommand(newAdminUserCommand(opts))
cmd.AddCommand(newAdminDiagCommand(opts, build))
cmd.AddCommand(newAdminSnapshotCommand(opts))
return cmd
}
@@ -107,7 +108,7 @@ func endpointFromServerConfig(address string, port int) string {
host = "localhost"
}
if port <= 0 || port > 65535 {
port = 3000
port = 2600
}
return "http://" + net.JoinHostPort(host, strconv.Itoa(port))
}

728
cmd/admin_snapshot.go Normal file
View File

@@ -0,0 +1,728 @@
package cmd
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
bolt "go.etcd.io/bbolt"
"github.com/spf13/cobra"
)
const (
snapshotManifestPath = ".fs-snapshot/manifest.json"
snapshotFormat = 1
)
type snapshotFileEntry struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
}
type snapshotManifest struct {
FormatVersion int `json:"formatVersion"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
Files []snapshotFileEntry `json:"files"`
}
type snapshotSummary struct {
SnapshotFile string `json:"snapshotFile"`
CreatedAt string `json:"createdAt"`
SourcePath string `json:"sourcePath"`
FileCount int `json:"fileCount"`
TotalBytes int64 `json:"totalBytes"`
}
func newAdminSnapshotCommand(opts *adminOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "snapshot",
Short: "Offline snapshot and restore utilities",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newAdminSnapshotCreateCommand(opts))
cmd.AddCommand(newAdminSnapshotInspectCommand(opts))
cmd.AddCommand(newAdminSnapshotRestoreCommand(opts))
return cmd
}
func newAdminSnapshotCreateCommand(opts *adminOptions) *cobra.Command {
var dataPath string
var outFile string
cmd := &cobra.Command{
Use: "create",
Short: "Create offline snapshot tarball (.tar.gz)",
RunE: func(cmd *cobra.Command, args []string) error {
dataPath = strings.TrimSpace(dataPath)
outFile = strings.TrimSpace(outFile)
if dataPath == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--data-path is required")
}
if outFile == "" {
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--out is required")
}
result, err := createSnapshotArchive(context.Background(), dataPath, outFile)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(cmd.OutOrStdout(), "snapshot created: %s (files=%d bytes=%d)\n", result.SnapshotFile, result.FileCount, result.TotalBytes)
return err
},
}
cmd.Flags().StringVar(&dataPath, "data-path", "", "Source data path (must contain metadata.db)")
cmd.Flags().StringVar(&outFile, "out", "", "Output snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotInspectCommand(opts *adminOptions) *cobra.Command {
var filePath string
cmd := &cobra.Command{
Use: "inspect",
Short: "Inspect and verify snapshot archive integrity",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return usageError("fs admin snapshot inspect --file <snapshot.tar.gz>", "--file is required")
}
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), map[string]any{
"summary": summary,
"manifest": manifest,
})
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot ok: %s\ncreated_at=%s source=%s files=%d bytes=%d\n",
summary.SnapshotFile,
summary.CreatedAt,
summary.SourcePath,
summary.FileCount,
summary.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
return cmd
}
func newAdminSnapshotRestoreCommand(opts *adminOptions) *cobra.Command {
var filePath string
var dataPath string
var force bool
cmd := &cobra.Command{
Use: "restore",
Short: "Restore snapshot into a data path (offline only)",
RunE: func(cmd *cobra.Command, args []string) error {
filePath = strings.TrimSpace(filePath)
dataPath = strings.TrimSpace(dataPath)
if filePath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--file is required")
}
if dataPath == "" {
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--data-path is required")
}
result, err := restoreSnapshotArchive(context.Background(), filePath, dataPath, force)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), result)
}
_, err = fmt.Fprintf(
cmd.OutOrStdout(),
"snapshot restored to %s (files=%d bytes=%d)\n",
result.SourcePath,
result.FileCount,
result.TotalBytes,
)
return err
},
}
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
cmd.Flags().StringVar(&dataPath, "data-path", "", "Destination data path")
cmd.Flags().BoolVar(&force, "force", false, "Overwrite destination data path if it exists")
return cmd
}
func createSnapshotArchive(ctx context.Context, dataPath, outFile string) (*snapshotSummary, error) {
_ = ctx
sourceAbs, err := filepath.Abs(filepath.Clean(dataPath))
if err != nil {
return nil, err
}
outAbs, err := filepath.Abs(filepath.Clean(outFile))
if err != nil {
return nil, err
}
if isPathWithin(sourceAbs, outAbs) {
return nil, errors.New("output file cannot be inside --data-path")
}
info, err := os.Stat(sourceAbs)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("data path %q is not a directory", sourceAbs)
}
if err := ensureMetadataExists(sourceAbs); err != nil {
return nil, err
}
if err := ensureDataPathOffline(sourceAbs); err != nil {
return nil, err
}
manifest, totalBytes, err := buildSnapshotManifest(sourceAbs)
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(outAbs), 0o755); err != nil {
return nil, err
}
tmpPath := outAbs + ".tmp-" + strconvNowNano()
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o600)
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
}()
gzw := gzip.NewWriter(file)
tw := tar.NewWriter(gzw)
if err := writeManifestToTar(tw, manifest); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
for _, entry := range manifest.Files {
absPath := filepath.Join(sourceAbs, filepath.FromSlash(entry.Path))
if err := writeFileToTar(tw, absPath, entry.Path); err != nil {
_ = tw.Close()
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
}
if err := tw.Close(); err != nil {
_ = gzw.Close()
_ = os.Remove(tmpPath)
return nil, err
}
if err := gzw.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Sync(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := file.Close(); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := os.Rename(tmpPath, outAbs); err != nil {
_ = os.Remove(tmpPath)
return nil, err
}
if err := syncDir(filepath.Dir(outAbs)); err != nil {
return nil, err
}
return &snapshotSummary{
SnapshotFile: outAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: sourceAbs,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func inspectSnapshotArchive(filePath string) (*snapshotManifest, *snapshotSummary, error) {
fileAbs, err := filepath.Abs(filepath.Clean(filePath))
if err != nil {
return nil, nil, err
}
manifest, actual, err := readSnapshotArchive(fileAbs)
if err != nil {
return nil, nil, err
}
expected := map[string]snapshotFileEntry{}
var totalBytes int64
for _, entry := range manifest.Files {
expected[entry.Path] = entry
totalBytes += entry.Size
}
if len(expected) != len(actual) {
return nil, nil, fmt.Errorf("snapshot validation failed: expected %d files, got %d", len(expected), len(actual))
}
for path, exp := range expected {
got, ok := actual[path]
if !ok {
return nil, nil, fmt.Errorf("snapshot validation failed: missing file %s", path)
}
if got.Size != exp.Size || got.SHA256 != exp.SHA256 {
return nil, nil, fmt.Errorf("snapshot validation failed: checksum mismatch for %s", path)
}
}
return manifest, &snapshotSummary{
SnapshotFile: fileAbs,
CreatedAt: manifest.CreatedAt,
SourcePath: manifest.SourcePath,
FileCount: len(manifest.Files),
TotalBytes: totalBytes,
}, nil
}
func restoreSnapshotArchive(ctx context.Context, filePath, destinationPath string, force bool) (*snapshotSummary, error) {
_ = ctx
manifest, summary, err := inspectSnapshotArchive(filePath)
if err != nil {
return nil, err
}
destAbs, err := filepath.Abs(filepath.Clean(destinationPath))
if err != nil {
return nil, err
}
if fi, statErr := os.Stat(destAbs); statErr == nil && fi.IsDir() {
if err := ensureDataPathOffline(destAbs); err != nil {
return nil, err
}
entries, err := os.ReadDir(destAbs)
if err == nil && len(entries) > 0 && !force {
return nil, errors.New("destination data path is not empty; use --force to overwrite")
}
}
parent := filepath.Dir(destAbs)
if err := os.MkdirAll(parent, 0o755); err != nil {
return nil, err
}
stage := filepath.Join(parent, "."+filepath.Base(destAbs)+".restore-"+strconvNowNano())
if err := os.MkdirAll(stage, 0o755); err != nil {
return nil, err
}
cleanupStage := true
defer func() {
if cleanupStage {
_ = os.RemoveAll(stage)
}
}()
if err := extractSnapshotArchive(filePath, stage, manifest); err != nil {
return nil, err
}
if err := syncDir(stage); err != nil {
return nil, err
}
if _, err := os.Stat(destAbs); err == nil {
if !force {
return nil, errors.New("destination data path exists; use --force to overwrite")
}
if err := os.RemoveAll(destAbs); err != nil {
return nil, err
}
}
if err := os.Rename(stage, destAbs); err != nil {
return nil, err
}
if err := syncDir(parent); err != nil {
return nil, err
}
cleanupStage = false
summary.SourcePath = destAbs
return summary, nil
}
func ensureMetadataExists(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
info, err := os.Stat(dbPath)
if err != nil {
return fmt.Errorf("metadata.db not found in %s", dataPath)
}
if !info.Mode().IsRegular() {
return fmt.Errorf("metadata.db in %s is not a regular file", dataPath)
}
return nil
}
func ensureDataPathOffline(dataPath string) error {
dbPath := filepath.Join(dataPath, "metadata.db")
if _, err := os.Stat(dbPath); err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
db, err := bolt.Open(dbPath, 0o600, &bolt.Options{
Timeout: 100 * time.Millisecond,
ReadOnly: true,
})
if err != nil {
return fmt.Errorf("data path appears in use (metadata.db locked): %w", err)
}
return db.Close()
}
func buildSnapshotManifest(dataPath string) (*snapshotManifest, int64, error) {
entries := make([]snapshotFileEntry, 0, 128)
var totalBytes int64
err := filepath.WalkDir(dataPath, func(path string, d fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
rel, err := filepath.Rel(dataPath, path)
if err != nil {
return err
}
rel = filepath.ToSlash(filepath.Clean(rel))
if rel == "." || rel == "" {
return nil
}
sum, err := sha256File(path)
if err != nil {
return err
}
totalBytes += info.Size()
entries = append(entries, snapshotFileEntry{
Path: rel,
Size: info.Size(),
SHA256: sum,
})
return nil
})
if err != nil {
return nil, 0, err
}
if len(entries) == 0 {
return nil, 0, errors.New("data path contains no regular files to snapshot")
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: time.Now().UTC().Format(time.RFC3339Nano),
SourcePath: dataPath,
Files: entries,
}, totalBytes, nil
}
func writeManifestToTar(tw *tar.Writer, manifest *snapshotManifest) error {
if tw == nil || manifest == nil {
return errors.New("invalid manifest writer input")
}
payload, err := json.Marshal(manifest)
if err != nil {
return err
}
header := &tar.Header{
Name: snapshotManifestPath,
Mode: 0o600,
Size: int64(len(payload)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = tw.Write(payload)
return err
}
func writeFileToTar(tw *tar.Writer, absPath, relPath string) error {
file, err := os.Open(absPath)
if err != nil {
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: relPath,
Mode: int64(info.Mode().Perm()),
Size: info.Size(),
ModTime: info.ModTime(),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = io.Copy(tw, file)
return err
}
func readSnapshotArchive(filePath string) (*snapshotManifest, map[string]snapshotFileEntry, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return nil, nil, err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
actual := make(map[string]snapshotFileEntry)
var manifest *snapshotManifest
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, nil, err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return nil, nil, err
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return nil, nil, fmt.Errorf("unsupported tar entry type for %s", name)
}
if name == snapshotManifestPath {
raw, err := io.ReadAll(tr)
if err != nil {
return nil, nil, err
}
current := &snapshotManifest{}
if err := json.Unmarshal(raw, current); err != nil {
return nil, nil, err
}
manifest = current
continue
}
size, hashHex, err := digestReader(tr)
if err != nil {
return nil, nil, err
}
actual[name] = snapshotFileEntry{
Path: name,
Size: size,
SHA256: hashHex,
}
}
if manifest == nil {
return nil, nil, errors.New("snapshot manifest.json not found")
}
if manifest.FormatVersion != snapshotFormat {
return nil, nil, fmt.Errorf("unsupported snapshot format version %d", manifest.FormatVersion)
}
return manifest, actual, nil
}
func extractSnapshotArchive(filePath, destination string, manifest *snapshotManifest) error {
expected := make(map[string]snapshotFileEntry, len(manifest.Files))
for _, entry := range manifest.Files {
expected[entry.Path] = entry
}
seen := make(map[string]struct{}, len(expected))
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return err
}
name, err := cleanArchivePath(header.Name)
if err != nil {
return err
}
if name == snapshotManifestPath {
if _, err := io.Copy(io.Discard, tr); err != nil {
return err
}
continue
}
if header.Typeflag == tar.TypeDir {
continue
}
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
return fmt.Errorf("unsupported tar entry type for %s", name)
}
exp, ok := expected[name]
if !ok {
return fmt.Errorf("snapshot contains unexpected file %s", name)
}
targetPath := filepath.Join(destination, filepath.FromSlash(name))
if !isPathWithin(destination, targetPath) {
return fmt.Errorf("invalid archive path %s", name)
}
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return err
}
out, err := os.OpenFile(targetPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(header.Mode)&0o777)
if err != nil {
return err
}
hasher := sha256.New()
written, copyErr := io.Copy(io.MultiWriter(out, hasher), tr)
syncErr := out.Sync()
closeErr := out.Close()
if copyErr != nil {
return copyErr
}
if syncErr != nil {
return syncErr
}
if closeErr != nil {
return closeErr
}
if err := syncDir(filepath.Dir(targetPath)); err != nil {
return err
}
sum := hex.EncodeToString(hasher.Sum(nil))
if written != exp.Size || sum != exp.SHA256 {
return fmt.Errorf("checksum mismatch while extracting %s", name)
}
seen[name] = struct{}{}
}
if len(seen) != len(expected) {
return fmt.Errorf("restore validation failed: extracted %d files, expected %d", len(seen), len(expected))
}
for path := range expected {
if _, ok := seen[path]; !ok {
return fmt.Errorf("restore validation failed: missing file %s", path)
}
}
return nil
}
func cleanArchivePath(name string) (string, error) {
name = strings.TrimSpace(name)
if name == "" {
return "", errors.New("empty archive path")
}
name = filepath.ToSlash(filepath.Clean(name))
if strings.HasPrefix(name, "/") || strings.HasPrefix(name, "../") || strings.Contains(name, "/../") || name == ".." {
return "", fmt.Errorf("unsafe archive path %q", name)
}
return name, nil
}
func digestReader(r io.Reader) (int64, string, error) {
hasher := sha256.New()
n, err := io.Copy(hasher, r)
if err != nil {
return 0, "", err
}
return n, hex.EncodeToString(hasher.Sum(nil)), nil
}
func sha256File(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
return sha256FromReader(file)
}
func sha256FromReader(reader io.Reader) (string, error) {
hasher := sha256.New()
if _, err := io.Copy(hasher, reader); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func isPathWithin(base, candidate string) bool {
base = filepath.Clean(base)
candidate = filepath.Clean(candidate)
rel, err := filepath.Rel(base, candidate)
if err != nil {
return false
}
return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)))
}
func syncDir(path string) error {
dir, err := os.Open(path)
if err != nil {
return err
}
defer dir.Close()
return dir.Sync()
}
func strconvNowNano() string {
return fmt.Sprintf("%d", time.Now().UnixNano())
}

240
cmd/admin_snapshot_test.go Normal file
View File

@@ -0,0 +1,240 @@
package cmd
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
bolt "go.etcd.io/bbolt"
)
type snapshotArchiveEntry struct {
Path string
Data []byte
}
func TestInspectSnapshotArchiveRejectsUnsafePath(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "bad.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "../escape", Data: []byte("oops")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsafe archive path") {
t.Fatalf("expected unsafe archive path error, got %v", err)
}
}
func TestInspectSnapshotArchiveChecksumMismatch(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "mismatch.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("good")},
})
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("bad")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "checksum mismatch") {
t.Fatalf("expected checksum mismatch error, got %v", err)
}
}
func TestInspectSnapshotArchiveMissingManifest(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "no-manifest.tar.gz")
err := writeSnapshotArchiveForTest(archive, nil, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, false)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "manifest.json not found") {
t.Fatalf("expected missing manifest error, got %v", err)
}
}
func TestInspectSnapshotArchiveUnsupportedFormat(t *testing.T) {
t.Parallel()
archive := filepath.Join(t.TempDir(), "unsupported-format.tar.gz")
manifest := manifestForEntries([]snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
})
manifest.FormatVersion = 99
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
{Path: "chunks/c1", Data: []byte("x")},
}, true)
if err != nil {
t.Fatalf("write test archive: %v", err)
}
_, _, err = inspectSnapshotArchive(archive)
if err == nil || !strings.Contains(err.Error(), "unsupported snapshot format version") {
t.Fatalf("expected unsupported format error, got %v", err)
}
}
func TestRestoreSnapshotArchiveDestinationBehavior(t *testing.T) {
t.Parallel()
root := t.TempDir()
archive := filepath.Join(root, "ok.tar.gz")
destination := filepath.Join(root, "dst")
entries := []snapshotArchiveEntry{
{Path: "metadata.db", Data: []byte("db-bytes")},
{Path: "chunks/c1", Data: []byte("chunk-1")},
}
manifest := manifestForEntries(entries)
if err := writeSnapshotArchiveForTest(archive, manifest, entries, true); err != nil {
t.Fatalf("write test archive: %v", err)
}
if err := os.MkdirAll(destination, 0o755); err != nil {
t.Fatalf("mkdir destination: %v", err)
}
if err := os.WriteFile(filepath.Join(destination, "old.txt"), []byte("old"), 0o600); err != nil {
t.Fatalf("seed destination: %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, false); err == nil || !strings.Contains(err.Error(), "not empty") {
t.Fatalf("expected non-empty destination error, got %v", err)
}
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, true); err != nil {
t.Fatalf("restore with force: %v", err)
}
if _, err := os.Stat(filepath.Join(destination, "old.txt")); !os.IsNotExist(err) {
t.Fatalf("expected old file to be removed, stat err=%v", err)
}
got, err := os.ReadFile(filepath.Join(destination, "chunks/c1"))
if err != nil {
t.Fatalf("read restored chunk: %v", err)
}
if string(got) != "chunk-1" {
t.Fatalf("restored chunk mismatch: got %q", string(got))
}
}
func TestCreateSnapshotArchiveRejectsOutputInsideDataPath(t *testing.T) {
t.Parallel()
root := t.TempDir()
if err := os.MkdirAll(filepath.Join(root, "chunks"), 0o755); err != nil {
t.Fatalf("mkdir chunks: %v", err)
}
if err := createBoltDBForTest(filepath.Join(root, "metadata.db")); err != nil {
t.Fatalf("create metadata db: %v", err)
}
if err := os.WriteFile(filepath.Join(root, "chunks/c1"), []byte("x"), 0o600); err != nil {
t.Fatalf("write chunk: %v", err)
}
out := filepath.Join(root, "inside.tar.gz")
if _, err := createSnapshotArchive(context.Background(), root, out); err == nil || !strings.Contains(err.Error(), "cannot be inside") {
t.Fatalf("expected output-inside-data-path error, got %v", err)
}
}
func writeSnapshotArchiveForTest(path string, manifest *snapshotManifest, entries []snapshotArchiveEntry, includeManifest bool) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
gzw := gzip.NewWriter(file)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
if includeManifest {
raw, err := json.Marshal(manifest)
if err != nil {
return err
}
if err := writeTarEntry(tw, snapshotManifestPath, raw); err != nil {
return err
}
}
for _, entry := range entries {
if err := writeTarEntry(tw, entry.Path, entry.Data); err != nil {
return err
}
}
return nil
}
func writeTarEntry(tw *tar.Writer, name string, data []byte) error {
header := &tar.Header{
Name: name,
Mode: 0o600,
Size: int64(len(data)),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err := ioCopyBytes(tw, data)
return err
}
func manifestForEntries(entries []snapshotArchiveEntry) *snapshotManifest {
files := make([]snapshotFileEntry, 0, len(entries))
for _, entry := range entries {
sum := sha256.Sum256(entry.Data)
files = append(files, snapshotFileEntry{
Path: filepath.ToSlash(filepath.Clean(entry.Path)),
Size: int64(len(entry.Data)),
SHA256: hex.EncodeToString(sum[:]),
})
}
return &snapshotManifest{
FormatVersion: snapshotFormat,
CreatedAt: "2026-03-11T00:00:00Z",
SourcePath: "/tmp/source",
Files: files,
}
}
func createBoltDBForTest(path string) error {
db, err := bolt.Open(path, 0o600, nil)
if err != nil {
return err
}
defer db.Close()
return db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte("x"))
return err
})
}
func ioCopyBytes(w *tar.Writer, data []byte) (int64, error) {
n, err := bytes.NewReader(data).WriteTo(w)
return n, err
}

View File

@@ -22,6 +22,7 @@ func newAdminUserCommand(opts *adminOptions) *cobra.Command {
cmd.AddCommand(newAdminUserDeleteCommand(opts))
cmd.AddCommand(newAdminUserSetStatusCommand(opts))
cmd.AddCommand(newAdminUserSetRoleCommand(opts))
cmd.AddCommand(newAdminUserRemoveRoleCommand(opts))
return cmd
}
@@ -253,6 +254,68 @@ func newAdminUserSetRoleCommand(opts *adminOptions) *cobra.Command {
return cmd
}
func newAdminUserRemoveRoleCommand(opts *adminOptions) *cobra.Command {
var (
role string
bucket string
prefix string
)
cmd := &cobra.Command{
Use: "remove-role <access-key-id>",
Short: "Remove one role policy statement from user",
Args: requireAccessKeyArg("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]"),
RunE: func(cmd *cobra.Command, args []string) error {
policy, err := buildPolicyFromRole(rolePolicyOptions{
Role: role,
Bucket: bucket,
Prefix: prefix,
})
if err != nil {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", err.Error())
}
if len(policy.Statements) == 0 {
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", "no statement to remove")
}
client, err := newAdminAPIClient(opts, true)
if err != nil {
return err
}
existing, err := client.GetUser(context.Background(), args[0])
if err != nil {
return err
}
if existing.Policy == nil || len(existing.Policy.Statements) == 0 {
return fmt.Errorf("user %q has no policy statements", args[0])
}
target := policy.Statements[0]
nextPolicy, removed := removePolicyStatements(existing.Policy, target)
if removed == 0 {
return fmt.Errorf("no matching statement found for role=%s bucket=%s prefix=%s", role, bucket, prefix)
}
if len(nextPolicy.Statements) == 0 {
return fmt.Errorf("cannot remove the last policy statement; add another role first or use set-role --replace")
}
out, err := client.SetUserPolicy(context.Background(), args[0], nextPolicy)
if err != nil {
return err
}
if opts.JSON {
return writeJSON(cmd.OutOrStdout(), out)
}
return writeUserTable(cmd.OutOrStdout(), out, false)
},
}
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
return cmd
}
func mergePolicyStatements(existing *adminPolicy, addition adminPolicy) adminPolicy {
merged := adminPolicy{}
if existing != nil {
@@ -289,6 +352,25 @@ func policyStatementsEqual(a, b adminPolicyStatement) bool {
return true
}
func removePolicyStatements(existing *adminPolicy, target adminPolicyStatement) (adminPolicy, int) {
out := adminPolicy{}
if existing == nil {
return out, 0
}
out.Principal = existing.Principal
out.Statements = make([]adminPolicyStatement, 0, len(existing.Statements))
removed := 0
for _, stmt := range existing.Statements {
if policyStatementsEqual(stmt, target) {
removed++
continue
}
out.Statements = append(out.Statements, stmt)
}
return out, removed
}
func requireAccessKeyArg(usage string) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {

View File

@@ -158,6 +158,13 @@ type CompleteMultipartUploadResult struct {
Location string `xml:"Location,omitempty"`
}
type CopyObjectResult struct {
XMLName xml.Name `xml:"CopyObjectResult"`
Xmlns string `xml:"xmlns,attr,omitempty"`
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
}
type ListPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"`
Xmlns string `xml:"xmlns,attr"`

View File

@@ -104,6 +104,38 @@ func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Read
return manifest, nil
}
func (s *ObjectService) CopyObject(srcBucket, srcKey, dstBucket, dstKey string) (*models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("copy_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
source, err := s.metadata.GetManifest(srcBucket, srcKey)
if err != nil {
return nil, err
}
manifest := &models.ObjectManifest{
Bucket: dstBucket,
Key: dstKey,
Size: source.Size,
ContentType: source.ContentType,
ETag: source.ETag,
Chunks: append([]string(nil), source.Chunks...),
CreatedAt: time.Now().Unix(),
}
if err := s.metadata.PutManifest(manifest); err != nil {
return nil, err
}
success = true
return manifest, nil
}
func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) {
start := time.Now()

View File

@@ -38,7 +38,7 @@ func NewConfig() *Config {
config := &Config{
DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")),
Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"),
Port: envIntRange("PORT", 3000, 1, 65535),
Port: envIntRange("PORT", 2600, 1, 65535),
ChunkSize: envIntRange("CHUNK_SIZE", 8192000, 1, 64*1024*1024),
LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")),
LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")),