mirror of
https://github.com/ferdzo/fs.git
synced 2026-04-04 20:36:25 +00:00
24
.github/workflows/ci.yml
vendored
Normal file
24
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
export GOCACHE=/tmp/go-build-cache
|
||||
go test ./...
|
||||
|
||||
66
.github/workflows/release-image.yml
vendored
Normal file
66
.github/workflows/release-image.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: Release Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set build date
|
||||
id: date
|
||||
run: echo "value=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
labels: |
|
||||
org.opencontainers.image.title=fs
|
||||
org.opencontainers.image.description=Lightweight S3-compatible object storage
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.created=${{ steps.date.outputs.value }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
VERSION=${{ github.ref_name }}
|
||||
COMMIT=${{ github.sha }}
|
||||
DATE=${{ steps.date.outputs.value }}
|
||||
|
||||
@@ -6,7 +6,13 @@ COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/fs .
|
||||
ARG VERSION=dev
|
||||
ARG COMMIT=none
|
||||
ARG DATE=unknown
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
||||
-trimpath \
|
||||
-ldflags "-s -w -X main.version=${VERSION} -X main.commit=${COMMIT} -X main.date=${DATE}" \
|
||||
-o /app/fs .
|
||||
|
||||
FROM alpine:3.23 AS runner
|
||||
|
||||
|
||||
103
README.md
103
README.md
@@ -2,6 +2,26 @@
|
||||
|
||||
An experimental Object Storage written in Go that should be partially compatible with S3
|
||||
|
||||
## Running
|
||||
|
||||
Single binary, two modes:
|
||||
- `fs` (no subcommand) starts the server (backward compatible)
|
||||
- `fs server` starts the server explicitly
|
||||
- `fs admin ...` runs admin CLI commands
|
||||
|
||||
## Versioning and Releases
|
||||
|
||||
- Versioning follows SemVer tags: `vMAJOR.MINOR.PATCH` (example: `v0.4.2`).
|
||||
- `fs version` shows build metadata (`version`, `commit`, `date`).
|
||||
- Pushing a tag like `v0.4.2` triggers Docker image build/push via GitHub Actions.
|
||||
- Published images: `ghcr.io/<owner>/<repo>:v0.4.2` and related semver tags.
|
||||
|
||||
Tag release example:
|
||||
```bash
|
||||
git tag v0.1.0
|
||||
git push origin v0.1.0
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
Bucket operations:
|
||||
@@ -42,6 +62,77 @@ Admin API (JSON):
|
||||
- `PUT /_admin/v1/users/{accessKeyId}/status`
|
||||
- `DELETE /_admin/v1/users/{accessKeyId}`
|
||||
|
||||
Admin API policy examples (SigV4):
|
||||
```bash
|
||||
ENDPOINT="http://localhost:2600"
|
||||
REGION="us-east-1"
|
||||
ADMIN_ACCESS_KEY="${FS_ROOT_USER}"
|
||||
ADMIN_SECRET_KEY="${FS_ROOT_PASSWORD}"
|
||||
SIGV4="aws:amz:${REGION}:s3"
|
||||
```
|
||||
|
||||
Replace user policy with one scoped statement:
|
||||
```bash
|
||||
curl --aws-sigv4 "$SIGV4" \
|
||||
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
|
||||
-d '{
|
||||
"policy": {
|
||||
"statements": [
|
||||
{
|
||||
"effect": "allow",
|
||||
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
|
||||
"bucket": "backup",
|
||||
"prefix": "restic/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
Set multiple statements (for multiple buckets):
|
||||
```bash
|
||||
curl --aws-sigv4 "$SIGV4" \
|
||||
--user "${ADMIN_ACCESS_KEY}:${ADMIN_SECRET_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X PUT "${ENDPOINT}/_admin/v1/users/test-user/policy" \
|
||||
-d '{
|
||||
"policy": {
|
||||
"statements": [
|
||||
{
|
||||
"effect": "allow",
|
||||
"actions": ["s3:ListBucket", "s3:GetObject"],
|
||||
"bucket": "test-bucket",
|
||||
"prefix": "*"
|
||||
},
|
||||
{
|
||||
"effect": "allow",
|
||||
"actions": ["s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
|
||||
"bucket": "test-bucket-2",
|
||||
"prefix": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
Admin CLI:
|
||||
- `fs admin user create --access-key backup-user --role readwrite`
|
||||
- `fs admin user list`
|
||||
- `fs admin user get backup-user`
|
||||
- `fs admin user set-status backup-user --status disabled`
|
||||
- `fs admin user set-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
|
||||
- `fs admin user set-role backup-user --role readwrite --bucket backups-2` (appends another statement)
|
||||
- `fs admin user remove-role backup-user --role readonly --bucket backup-bucket --prefix restic/`
|
||||
- `fs admin user set-role backup-user --role admin --replace` (replaces all statements)
|
||||
- `fs admin user delete backup-user`
|
||||
- `fs admin snapshot create --data-path /var/lib/fs --out /backup/fs-20260311.tar.gz`
|
||||
- `fs admin snapshot inspect --file /backup/fs-20260311.tar.gz`
|
||||
- `fs admin snapshot restore --file /backup/fs-20260311.tar.gz --data-path /var/lib/fs --force`
|
||||
- `fs admin diag health`
|
||||
- `fs admin diag version`
|
||||
|
||||
## Auth Setup
|
||||
|
||||
Required when `FS_AUTH_ENABLED=true`:
|
||||
@@ -55,6 +146,18 @@ Additional docs:
|
||||
- Admin OpenAPI spec: `docs/admin-api-openapi.yaml`
|
||||
- S3 compatibility matrix: `docs/s3-compatibility.md`
|
||||
|
||||
CLI credential/env resolution for `fs admin`:
|
||||
- Flags: `--access-key`, `--secret-key`, `--endpoint`, `--region`
|
||||
- Env fallback:
|
||||
- `FS_ROOT_USER` / `FS_ROOT_PASSWORD` (same defaults as server bootstrap)
|
||||
- `FSCLI_ACCESS_KEY` / `FSCLI_SECRET_KEY`
|
||||
- `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY`
|
||||
- `FSCLI_ENDPOINT` (fallback to `ADDRESS` + `PORT`, then `http://localhost:2600`)
|
||||
- `FSCLI_REGION` (fallback `FS_AUTH_REGION`, default `us-east-1`)
|
||||
|
||||
Note:
|
||||
- `fs admin snapshot ...` commands operate locally on filesystem paths and do not require endpoint or auth credentials.
|
||||
|
||||
Health:
|
||||
- `GET /healthz`
|
||||
- `HEAD /healthz`
|
||||
|
||||
90
app/server.go
Normal file
90
app/server.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fs/api"
|
||||
"fs/auth"
|
||||
"fs/logging"
|
||||
"fs/metadata"
|
||||
"fs/service"
|
||||
"fs/storage"
|
||||
"fs/utils"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func RunServer(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
config := utils.NewConfig()
|
||||
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog)
|
||||
authConfig := auth.ConfigFromValues(
|
||||
config.AuthEnabled,
|
||||
config.AuthRegion,
|
||||
config.AuthSkew,
|
||||
config.AuthMaxPresign,
|
||||
config.AuthMasterKey,
|
||||
config.AuthBootstrapAccessKey,
|
||||
config.AuthBootstrapSecretKey,
|
||||
config.AuthBootstrapPolicy,
|
||||
)
|
||||
logger := logging.NewLogger(logConfig)
|
||||
logger.Info("boot",
|
||||
"log_level", logConfig.LevelName,
|
||||
"log_format", logConfig.Format,
|
||||
"audit_log", logConfig.Audit,
|
||||
"data_path", config.DataPath,
|
||||
"multipart_retention_hours", int(config.MultipartCleanupRetention/time.Hour),
|
||||
"auth_enabled", authConfig.Enabled,
|
||||
"auth_region", authConfig.Region,
|
||||
"admin_api_enabled", config.AdminAPIEnabled,
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
|
||||
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(config.DataPath, "metadata.db")
|
||||
metadataHandler, err := metadata.NewMetadataHandler(dbPath)
|
||||
if err != nil {
|
||||
logger.Error("failed_to_initialize_metadata_handler", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
|
||||
if err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_initialize_blob_store", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
objectService := service.NewObjectService(metadataHandler, blobHandler, config.MultipartCleanupRetention)
|
||||
authService, err := auth.NewService(authConfig, metadataHandler)
|
||||
if err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_initialize_auth_service", "error", err)
|
||||
return err
|
||||
}
|
||||
if err := authService.EnsureBootstrap(); err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_ensure_bootstrap_auth_identity", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
handler := api.NewHandler(objectService, logger, logConfig, authService, config.AdminAPIEnabled)
|
||||
addr := config.Address + ":" + strconv.Itoa(config.Port)
|
||||
if config.GcEnabled {
|
||||
go objectService.RunGC(ctx, config.GcInterval)
|
||||
}
|
||||
|
||||
if err := handler.Start(ctx, addr); err != nil {
|
||||
logger.Error("server_stopped_with_error", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -137,14 +137,6 @@ Each audit entry includes method, path, remote IP, and request ID (if present).
|
||||
|
||||
## Current Scope / Limitations
|
||||
- No STS/session-token auth yet.
|
||||
- No admin API for managing multiple users yet.
|
||||
- Policy language is intentionally minimal, not full IAM.
|
||||
- No automatic key rotation workflows.
|
||||
|
||||
## Practical Next Step
|
||||
To support multiple users cleanly, add admin operations in auth service + API:
|
||||
- create user
|
||||
- rotate secret
|
||||
- set policy
|
||||
- disable/enable
|
||||
- delete user
|
||||
- No key rotation endpoint for existing users yet.
|
||||
|
||||
123
cmd/admin.go
Normal file
123
cmd/admin.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"fs/utils"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAdminEndpoint = "http://localhost:2600"
|
||||
defaultAdminRegion = "us-east-1"
|
||||
)
|
||||
|
||||
type adminOptions struct {
|
||||
Endpoint string
|
||||
Region string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
JSON bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func newAdminCommand(build BuildInfo) *cobra.Command {
|
||||
opts := &adminOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "admin",
|
||||
Short: "Admin operations over the fs admin API",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVar(&opts.Endpoint, "endpoint", "", "Admin API endpoint (env: FSCLI_ENDPOINT, fallback ADDRESS+PORT)")
|
||||
cmd.PersistentFlags().StringVar(&opts.Region, "region", "", "SigV4 region (env: FSCLI_REGION or FS_AUTH_REGION)")
|
||||
cmd.PersistentFlags().StringVar(&opts.AccessKey, "access-key", "", "Admin access key (env: FS_ROOT_USER, FSCLI_ACCESS_KEY)")
|
||||
cmd.PersistentFlags().StringVar(&opts.SecretKey, "secret-key", "", "Admin secret key (env: FS_ROOT_PASSWORD, FSCLI_SECRET_KEY)")
|
||||
cmd.PersistentFlags().BoolVar(&opts.JSON, "json", false, "Emit JSON output")
|
||||
cmd.PersistentFlags().DurationVar(&opts.Timeout, "timeout", 15*time.Second, "HTTP timeout")
|
||||
|
||||
cmd.AddCommand(newAdminUserCommand(opts))
|
||||
cmd.AddCommand(newAdminDiagCommand(opts, build))
|
||||
cmd.AddCommand(newAdminSnapshotCommand(opts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *adminOptions) resolve(requireCredentials bool) error {
|
||||
serverCfg := utils.NewConfig()
|
||||
o.Endpoint = strings.TrimSpace(firstNonEmpty(
|
||||
o.Endpoint,
|
||||
os.Getenv("FSCLI_ENDPOINT"),
|
||||
endpointFromServerConfig(serverCfg.Address, serverCfg.Port),
|
||||
defaultAdminEndpoint,
|
||||
))
|
||||
o.Region = strings.TrimSpace(firstNonEmpty(
|
||||
o.Region,
|
||||
os.Getenv("FSCLI_REGION"),
|
||||
os.Getenv("FS_AUTH_REGION"),
|
||||
serverCfg.AuthRegion,
|
||||
defaultAdminRegion,
|
||||
))
|
||||
o.AccessKey = strings.TrimSpace(firstNonEmpty(
|
||||
o.AccessKey,
|
||||
os.Getenv("FS_ROOT_USER"),
|
||||
os.Getenv("FSCLI_ACCESS_KEY"),
|
||||
os.Getenv("AWS_ACCESS_KEY_ID"),
|
||||
serverCfg.AuthBootstrapAccessKey,
|
||||
))
|
||||
o.SecretKey = strings.TrimSpace(firstNonEmpty(
|
||||
o.SecretKey,
|
||||
os.Getenv("FS_ROOT_PASSWORD"),
|
||||
os.Getenv("FSCLI_SECRET_KEY"),
|
||||
os.Getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
serverCfg.AuthBootstrapSecretKey,
|
||||
))
|
||||
|
||||
if o.Timeout <= 0 {
|
||||
o.Timeout = 15 * time.Second
|
||||
}
|
||||
|
||||
if o.Endpoint == "" {
|
||||
return errors.New("admin endpoint is required")
|
||||
}
|
||||
parsed, err := url.Parse(o.Endpoint)
|
||||
if err != nil || parsed.Scheme == "" || parsed.Host == "" {
|
||||
return fmt.Errorf("invalid endpoint %q", o.Endpoint)
|
||||
}
|
||||
if o.Region == "" {
|
||||
return errors.New("region is required")
|
||||
}
|
||||
if requireCredentials && (o.AccessKey == "" || o.SecretKey == "") {
|
||||
return errors.New("credentials required: set --access-key/--secret-key or FSCLI_ACCESS_KEY/FSCLI_SECRET_KEY")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func endpointFromServerConfig(address string, port int) string {
|
||||
host := strings.TrimSpace(address)
|
||||
if host == "" || host == "0.0.0.0" || host == "::" || host == "[::]" {
|
||||
host = "localhost"
|
||||
}
|
||||
if port <= 0 || port > 65535 {
|
||||
port = 2600
|
||||
}
|
||||
return "http://" + net.JoinHostPort(host, strconv.Itoa(port))
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, v := range values {
|
||||
if strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
281
cmd/admin_client.go
Normal file
281
cmd/admin_client.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type adminUserListItem struct {
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt int64 `json:"createdAt"`
|
||||
UpdatedAt int64 `json:"updatedAt"`
|
||||
}
|
||||
|
||||
type adminUserListResponse struct {
|
||||
Items []adminUserListItem `json:"items"`
|
||||
NextCursor string `json:"nextCursor,omitempty"`
|
||||
}
|
||||
|
||||
type adminPolicyStatement struct {
|
||||
Effect string `json:"effect"`
|
||||
Actions []string `json:"actions"`
|
||||
Bucket string `json:"bucket"`
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
type adminPolicy struct {
|
||||
Principal string `json:"principal,omitempty"`
|
||||
Statements []adminPolicyStatement `json:"statements"`
|
||||
}
|
||||
|
||||
type adminUserResponse struct {
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt int64 `json:"createdAt"`
|
||||
UpdatedAt int64 `json:"updatedAt"`
|
||||
Policy *adminPolicy `json:"policy,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
}
|
||||
|
||||
type createUserRequest struct {
|
||||
AccessKeyID string `json:"accessKeyId"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Policy adminPolicy `json:"policy"`
|
||||
}
|
||||
|
||||
type setStatusRequest struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type setPolicyRequest struct {
|
||||
Policy adminPolicy `json:"policy"`
|
||||
}
|
||||
|
||||
type adminErrorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"requestId,omitempty"`
|
||||
}
|
||||
|
||||
type adminAPIError struct {
|
||||
StatusCode int
|
||||
Code string
|
||||
Message string
|
||||
RequestID string
|
||||
}
|
||||
|
||||
func (e *adminAPIError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Code == "" {
|
||||
return fmt.Sprintf("admin API request failed: status=%d", e.StatusCode)
|
||||
}
|
||||
if e.RequestID == "" {
|
||||
return fmt.Sprintf("%s: %s", e.Code, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("%s: %s (requestId=%s)", e.Code, e.Message, e.RequestID)
|
||||
}
|
||||
|
||||
type adminAPIClient struct {
|
||||
baseURL *url.URL
|
||||
region string
|
||||
accessKey string
|
||||
secretKey string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func newAdminAPIClient(opts *adminOptions, requireCredentials bool) (*adminAPIClient, error) {
|
||||
if opts == nil {
|
||||
return nil, errors.New("admin options are required")
|
||||
}
|
||||
if err := opts.resolve(requireCredentials); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseURL, err := url.Parse(opts.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adminAPIClient{
|
||||
baseURL: baseURL,
|
||||
region: opts.Region,
|
||||
accessKey: opts.AccessKey,
|
||||
secretKey: opts.SecretKey,
|
||||
client: &http.Client{
|
||||
Timeout: opts.Timeout,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) CreateUser(ctx context.Context, request createUserRequest) (*adminUserResponse, error) {
|
||||
var out adminUserResponse
|
||||
if err := c.doJSON(ctx, http.MethodPost, "/_admin/v1/users", nil, request, &out, http.StatusCreated); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) ListUsers(ctx context.Context, limit int, cursor string) (*adminUserListResponse, error) {
|
||||
query := make(url.Values)
|
||||
if limit > 0 {
|
||||
query.Set("limit", strconv.Itoa(limit))
|
||||
}
|
||||
if strings.TrimSpace(cursor) != "" {
|
||||
query.Set("cursor", strings.TrimSpace(cursor))
|
||||
}
|
||||
|
||||
var out adminUserListResponse
|
||||
if err := c.doJSON(ctx, http.MethodGet, "/_admin/v1/users", query, nil, &out, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) GetUser(ctx context.Context, accessKeyID string) (*adminUserResponse, error) {
|
||||
var out adminUserResponse
|
||||
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
|
||||
if err := c.doJSON(ctx, http.MethodGet, path, nil, nil, &out, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) DeleteUser(ctx context.Context, accessKeyID string) error {
|
||||
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID))
|
||||
return c.doJSON(ctx, http.MethodDelete, path, nil, nil, nil, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) SetUserStatus(ctx context.Context, accessKeyID, status string) (*adminUserResponse, error) {
|
||||
var out adminUserResponse
|
||||
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/status"
|
||||
if err := c.doJSON(ctx, http.MethodPut, path, nil, setStatusRequest{Status: status}, &out, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) SetUserPolicy(ctx context.Context, accessKeyID string, policy adminPolicy) (*adminUserResponse, error) {
|
||||
var out adminUserResponse
|
||||
path := "/_admin/v1/users/" + url.PathEscape(strings.TrimSpace(accessKeyID)) + "/policy"
|
||||
if err := c.doJSON(ctx, http.MethodPut, path, nil, setPolicyRequest{Policy: policy}, &out, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) Health(ctx context.Context) (string, error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, "/healthz", nil, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<10))
|
||||
text := strings.TrimSpace(string(body))
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if text == "" {
|
||||
text = http.StatusText(resp.StatusCode)
|
||||
}
|
||||
return text, fmt.Errorf("health check failed: status=%d", resp.StatusCode)
|
||||
}
|
||||
if text == "" {
|
||||
text = "ok"
|
||||
}
|
||||
return text, nil
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) doJSON(
|
||||
ctx context.Context,
|
||||
method string,
|
||||
path string,
|
||||
query url.Values,
|
||||
body any,
|
||||
out any,
|
||||
expectedStatus int,
|
||||
) error {
|
||||
var payload []byte
|
||||
var err error
|
||||
if body != nil {
|
||||
payload, err = json.Marshal(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
req, err := c.newRequest(ctx, method, path, query, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(payload) > 0 {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
if err := signSigV4Request(req, payload, c.accessKey, c.secretKey, c.region, "s3"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
raw, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20))
|
||||
if readErr != nil {
|
||||
return readErr
|
||||
}
|
||||
if resp.StatusCode != expectedStatus {
|
||||
apiErr := &adminAPIError{StatusCode: resp.StatusCode}
|
||||
parsed := adminErrorResponse{}
|
||||
if len(raw) > 0 && json.Unmarshal(raw, &parsed) == nil {
|
||||
apiErr.Code = parsed.Code
|
||||
apiErr.Message = parsed.Message
|
||||
apiErr.RequestID = parsed.RequestID
|
||||
}
|
||||
if apiErr.Message == "" {
|
||||
apiErr.Message = strings.TrimSpace(string(raw))
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
if out == nil || len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.NewDecoder(bytes.NewReader(raw)).Decode(out)
|
||||
}
|
||||
|
||||
func (c *adminAPIClient) newRequest(
|
||||
ctx context.Context,
|
||||
method string,
|
||||
path string,
|
||||
query url.Values,
|
||||
payload []byte,
|
||||
) (*http.Request, error) {
|
||||
u := *c.baseURL
|
||||
u.Path = strings.TrimRight(c.baseURL.Path, "/") + path
|
||||
u.RawQuery = ""
|
||||
if len(query) > 0 {
|
||||
u.RawQuery = query.Encode()
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
68
cmd/admin_diag.go
Normal file
68
cmd/admin_diag.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newAdminDiagCommand(opts *adminOptions, build BuildInfo) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "diag",
|
||||
Short: "Diagnostics and connectivity checks",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(newAdminDiagHealthCommand(opts))
|
||||
cmd.AddCommand(newAdminDiagVersionCommand(build, opts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminDiagHealthCommand(opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "health",
|
||||
Short: "Check server health endpoint",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := newAdminAPIClient(opts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status, err := client.Health(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), map[string]string{
|
||||
"status": status,
|
||||
})
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "health: %s\n", status)
|
||||
return err
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminDiagVersionCommand(build BuildInfo, opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print CLI version metadata",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
out := map[string]string{
|
||||
"version": build.Version,
|
||||
"commit": build.Commit,
|
||||
"date": build.Date,
|
||||
"go": runtime.Version(),
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", out["version"], out["commit"], out["date"], out["go"])
|
||||
return err
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
80
cmd/admin_output.go
Normal file
80
cmd/admin_output.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
func writeJSON(out io.Writer, value any) error {
|
||||
encoder := json.NewEncoder(out)
|
||||
encoder.SetIndent("", " ")
|
||||
return encoder.Encode(value)
|
||||
}
|
||||
|
||||
func formatUnix(ts int64) string {
|
||||
if ts <= 0 {
|
||||
return "-"
|
||||
}
|
||||
return time.Unix(ts, 0).UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func writeUserListTable(out io.Writer, value *adminUserListResponse) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintln(w, "ACCESS_KEY_ID\tSTATUS\tCREATED_AT\tUPDATED_AT"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range value.Items {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", item.AccessKeyID, item.Status, formatUnix(item.CreatedAt), formatUnix(item.UpdatedAt)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if strings.TrimSpace(value.NextCursor) != "" {
|
||||
if _, err := fmt.Fprintf(w, "\nNEXT_CURSOR\t%s\t\t\n", value.NextCursor); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func writeUserTable(out io.Writer, value *adminUserResponse, includeSecret bool) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0)
|
||||
if _, err := fmt.Fprintf(w, "accessKeyId\t%s\n", value.AccessKeyID); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "status\t%s\n", value.Status); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "createdAt\t%s\n", formatUnix(value.CreatedAt)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "updatedAt\t%s\n", formatUnix(value.UpdatedAt)); err != nil {
|
||||
return err
|
||||
}
|
||||
if includeSecret && strings.TrimSpace(value.SecretKey) != "" {
|
||||
if _, err := fmt.Fprintf(w, "secretKey\t%s\n", value.SecretKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if value.Policy != nil {
|
||||
for i, stmt := range value.Policy.Statements {
|
||||
idx := i + 1
|
||||
if _, err := fmt.Fprintf(w, "policy[%d].effect\t%s\n", idx, stmt.Effect); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "policy[%d].actions\t%s\n", idx, strings.Join(stmt.Actions, ",")); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "policy[%d].bucket\t%s\n", idx, stmt.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "policy[%d].prefix\t%s\n", idx, stmt.Prefix); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
47
cmd/admin_policy.go
Normal file
47
cmd/admin_policy.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type rolePolicyOptions struct {
|
||||
Role string
|
||||
Bucket string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func buildPolicyFromRole(opts rolePolicyOptions) (adminPolicy, error) {
|
||||
role := strings.ToLower(strings.TrimSpace(opts.Role))
|
||||
bucket := strings.TrimSpace(opts.Bucket)
|
||||
prefix := strings.TrimSpace(opts.Prefix)
|
||||
if bucket == "" {
|
||||
bucket = "*"
|
||||
}
|
||||
if prefix == "" {
|
||||
prefix = "*"
|
||||
}
|
||||
|
||||
var actions []string
|
||||
switch role {
|
||||
case "admin":
|
||||
actions = []string{"s3:*"}
|
||||
case "readwrite":
|
||||
actions = []string{"s3:ListBucket", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"}
|
||||
case "readonly":
|
||||
actions = []string{"s3:ListBucket", "s3:GetObject"}
|
||||
default:
|
||||
return adminPolicy{}, fmt.Errorf("invalid role %q (allowed: admin, readwrite, readonly)", opts.Role)
|
||||
}
|
||||
|
||||
return adminPolicy{
|
||||
Statements: []adminPolicyStatement{
|
||||
{
|
||||
Effect: "allow",
|
||||
Actions: actions,
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
166
cmd/admin_sigv4.go
Normal file
166
cmd/admin_sigv4.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const sigV4Algorithm = "AWS4-HMAC-SHA256"
|
||||
|
||||
func signSigV4Request(req *http.Request, body []byte, accessKey, secretKey, region, service string) error {
|
||||
if req == nil {
|
||||
return fmt.Errorf("nil request")
|
||||
}
|
||||
if strings.TrimSpace(accessKey) == "" || strings.TrimSpace(secretKey) == "" {
|
||||
return fmt.Errorf("missing signing credentials")
|
||||
}
|
||||
if strings.TrimSpace(region) == "" || strings.TrimSpace(service) == "" {
|
||||
return fmt.Errorf("missing signing scope")
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
amzDate := now.Format("20060102T150405Z")
|
||||
shortDate := now.Format("20060102")
|
||||
scope := shortDate + "/" + region + "/" + service + "/aws4_request"
|
||||
|
||||
payloadHash := sha256Hex(body)
|
||||
req.Header.Set("x-amz-date", amzDate)
|
||||
req.Header.Set("x-amz-content-sha256", payloadHash)
|
||||
|
||||
host := req.URL.Host
|
||||
signedHeaders := []string{"host", "x-amz-content-sha256", "x-amz-date"}
|
||||
canonicalRequest, signedHeadersRaw := buildCanonicalRequest(req, signedHeaders, payloadHash)
|
||||
stringToSign := buildStringToSign(amzDate, scope, canonicalRequest)
|
||||
signature := hex.EncodeToString(hmacSHA256(deriveSigningKey(secretKey, shortDate, region, service), stringToSign))
|
||||
|
||||
authHeader := fmt.Sprintf(
|
||||
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
|
||||
sigV4Algorithm,
|
||||
accessKey,
|
||||
scope,
|
||||
signedHeadersRaw,
|
||||
signature,
|
||||
)
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
req.Host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildCanonicalRequest(req *http.Request, signedHeaders []string, payloadHash string) (string, string) {
|
||||
canonicalHeaders, signedHeadersRaw := canonicalHeaders(req, signedHeaders)
|
||||
return strings.Join([]string{
|
||||
req.Method,
|
||||
canonicalPath(req.URL),
|
||||
canonicalQuery(req.URL),
|
||||
canonicalHeaders,
|
||||
signedHeadersRaw,
|
||||
payloadHash,
|
||||
}, "\n"), signedHeadersRaw
|
||||
}
|
||||
|
||||
func canonicalPath(u *url.URL) string {
|
||||
if u == nil {
|
||||
return "/"
|
||||
}
|
||||
path := u.EscapedPath()
|
||||
if path == "" {
|
||||
return "/"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func canonicalQuery(u *url.URL) string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
values := u.Query()
|
||||
type pair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
pairs := make([]pair, 0, len(values))
|
||||
for key, vals := range values {
|
||||
if len(vals) == 0 {
|
||||
pairs = append(pairs, pair{key: key, value: ""})
|
||||
continue
|
||||
}
|
||||
for _, v := range vals {
|
||||
pairs = append(pairs, pair{key: key, value: v})
|
||||
}
|
||||
}
|
||||
sort.Slice(pairs, func(i, j int) bool {
|
||||
if pairs[i].key == pairs[j].key {
|
||||
return pairs[i].value < pairs[j].value
|
||||
}
|
||||
return pairs[i].key < pairs[j].key
|
||||
})
|
||||
out := make([]string, 0, len(pairs))
|
||||
for _, p := range pairs {
|
||||
out = append(out, awsEncodeQuery(p.key)+"="+awsEncodeQuery(p.value))
|
||||
}
|
||||
return strings.Join(out, "&")
|
||||
}
|
||||
|
||||
func awsEncodeQuery(value string) string {
|
||||
encoded := url.QueryEscape(value)
|
||||
encoded = strings.ReplaceAll(encoded, "+", "%20")
|
||||
encoded = strings.ReplaceAll(encoded, "*", "%2A")
|
||||
encoded = strings.ReplaceAll(encoded, "%7E", "~")
|
||||
return encoded
|
||||
}
|
||||
|
||||
func canonicalHeaders(req *http.Request, headers []string) (string, string) {
|
||||
names := make([]string, 0, len(headers))
|
||||
lines := make([]string, 0, len(headers))
|
||||
for _, h := range headers {
|
||||
name := strings.ToLower(strings.TrimSpace(h))
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
var value string
|
||||
if name == "host" {
|
||||
value = req.URL.Host
|
||||
} else {
|
||||
value = strings.Join(req.Header.Values(http.CanonicalHeaderKey(name)), ",")
|
||||
}
|
||||
value = strings.Join(strings.Fields(strings.TrimSpace(value)), " ")
|
||||
names = append(names, name)
|
||||
lines = append(lines, name+":"+value)
|
||||
}
|
||||
return strings.Join(lines, "\n") + "\n", strings.Join(names, ";")
|
||||
}
|
||||
|
||||
func buildStringToSign(amzDate, scope, canonicalRequest string) string {
|
||||
hash := sha256.Sum256([]byte(canonicalRequest))
|
||||
return strings.Join([]string{
|
||||
sigV4Algorithm,
|
||||
amzDate,
|
||||
scope,
|
||||
hex.EncodeToString(hash[:]),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func deriveSigningKey(secret, date, region, service string) []byte {
|
||||
kDate := hmacSHA256([]byte("AWS4"+secret), date)
|
||||
kRegion := hmacSHA256(kDate, region)
|
||||
kService := hmacSHA256(kRegion, service)
|
||||
return hmacSHA256(kService, "aws4_request")
|
||||
}
|
||||
|
||||
func hmacSHA256(key []byte, message string) []byte {
|
||||
mac := hmac.New(sha256.New, key)
|
||||
_, _ = mac.Write([]byte(message))
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func sha256Hex(payload []byte) string {
|
||||
sum := sha256.Sum256(payload)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
728
cmd/admin_snapshot.go
Normal file
728
cmd/admin_snapshot.go
Normal file
@@ -0,0 +1,728 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
snapshotManifestPath = ".fs-snapshot/manifest.json"
|
||||
snapshotFormat = 1
|
||||
)
|
||||
|
||||
type snapshotFileEntry struct {
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
type snapshotManifest struct {
|
||||
FormatVersion int `json:"formatVersion"`
|
||||
CreatedAt string `json:"createdAt"`
|
||||
SourcePath string `json:"sourcePath"`
|
||||
Files []snapshotFileEntry `json:"files"`
|
||||
}
|
||||
|
||||
type snapshotSummary struct {
|
||||
SnapshotFile string `json:"snapshotFile"`
|
||||
CreatedAt string `json:"createdAt"`
|
||||
SourcePath string `json:"sourcePath"`
|
||||
FileCount int `json:"fileCount"`
|
||||
TotalBytes int64 `json:"totalBytes"`
|
||||
}
|
||||
|
||||
func newAdminSnapshotCommand(opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "snapshot",
|
||||
Short: "Offline snapshot and restore utilities",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(newAdminSnapshotCreateCommand(opts))
|
||||
cmd.AddCommand(newAdminSnapshotInspectCommand(opts))
|
||||
cmd.AddCommand(newAdminSnapshotRestoreCommand(opts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminSnapshotCreateCommand(opts *adminOptions) *cobra.Command {
|
||||
var dataPath string
|
||||
var outFile string
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create offline snapshot tarball (.tar.gz)",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
dataPath = strings.TrimSpace(dataPath)
|
||||
outFile = strings.TrimSpace(outFile)
|
||||
if dataPath == "" {
|
||||
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--data-path is required")
|
||||
}
|
||||
if outFile == "" {
|
||||
return usageError("fs admin snapshot create --data-path <path> --out <snapshot.tar.gz>", "--out is required")
|
||||
}
|
||||
|
||||
result, err := createSnapshotArchive(context.Background(), dataPath, outFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), result)
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "snapshot created: %s (files=%d bytes=%d)\n", result.SnapshotFile, result.FileCount, result.TotalBytes)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&dataPath, "data-path", "", "Source data path (must contain metadata.db)")
|
||||
cmd.Flags().StringVar(&outFile, "out", "", "Output snapshot file path (.tar.gz)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminSnapshotInspectCommand(opts *adminOptions) *cobra.Command {
|
||||
var filePath string
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect",
|
||||
Short: "Inspect and verify snapshot archive integrity",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
filePath = strings.TrimSpace(filePath)
|
||||
if filePath == "" {
|
||||
return usageError("fs admin snapshot inspect --file <snapshot.tar.gz>", "--file is required")
|
||||
}
|
||||
|
||||
manifest, summary, err := inspectSnapshotArchive(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), map[string]any{
|
||||
"summary": summary,
|
||||
"manifest": manifest,
|
||||
})
|
||||
}
|
||||
_, err = fmt.Fprintf(
|
||||
cmd.OutOrStdout(),
|
||||
"snapshot ok: %s\ncreated_at=%s source=%s files=%d bytes=%d\n",
|
||||
summary.SnapshotFile,
|
||||
summary.CreatedAt,
|
||||
summary.SourcePath,
|
||||
summary.FileCount,
|
||||
summary.TotalBytes,
|
||||
)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminSnapshotRestoreCommand(opts *adminOptions) *cobra.Command {
|
||||
var filePath string
|
||||
var dataPath string
|
||||
var force bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore snapshot into a data path (offline only)",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
filePath = strings.TrimSpace(filePath)
|
||||
dataPath = strings.TrimSpace(dataPath)
|
||||
if filePath == "" {
|
||||
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--file is required")
|
||||
}
|
||||
if dataPath == "" {
|
||||
return usageError("fs admin snapshot restore --file <snapshot.tar.gz> --data-path <path> [--force]", "--data-path is required")
|
||||
}
|
||||
|
||||
result, err := restoreSnapshotArchive(context.Background(), filePath, dataPath, force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), result)
|
||||
}
|
||||
_, err = fmt.Fprintf(
|
||||
cmd.OutOrStdout(),
|
||||
"snapshot restored to %s (files=%d bytes=%d)\n",
|
||||
result.SourcePath,
|
||||
result.FileCount,
|
||||
result.TotalBytes,
|
||||
)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&filePath, "file", "", "Snapshot file path (.tar.gz)")
|
||||
cmd.Flags().StringVar(&dataPath, "data-path", "", "Destination data path")
|
||||
cmd.Flags().BoolVar(&force, "force", false, "Overwrite destination data path if it exists")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func createSnapshotArchive(ctx context.Context, dataPath, outFile string) (*snapshotSummary, error) {
|
||||
_ = ctx
|
||||
sourceAbs, err := filepath.Abs(filepath.Clean(dataPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outAbs, err := filepath.Abs(filepath.Clean(outFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isPathWithin(sourceAbs, outAbs) {
|
||||
return nil, errors.New("output file cannot be inside --data-path")
|
||||
}
|
||||
|
||||
info, err := os.Stat(sourceAbs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil, fmt.Errorf("data path %q is not a directory", sourceAbs)
|
||||
}
|
||||
if err := ensureMetadataExists(sourceAbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ensureDataPathOffline(sourceAbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manifest, totalBytes, err := buildSnapshotManifest(sourceAbs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(outAbs), 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpPath := outAbs + ".tmp-" + strconvNowNano()
|
||||
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
gzw := gzip.NewWriter(file)
|
||||
tw := tar.NewWriter(gzw)
|
||||
if err := writeManifestToTar(tw, manifest); err != nil {
|
||||
_ = tw.Close()
|
||||
_ = gzw.Close()
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, entry := range manifest.Files {
|
||||
absPath := filepath.Join(sourceAbs, filepath.FromSlash(entry.Path))
|
||||
if err := writeFileToTar(tw, absPath, entry.Path); err != nil {
|
||||
_ = tw.Close()
|
||||
_ = gzw.Close()
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
_ = gzw.Close()
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
if err := gzw.Close(); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
if err := file.Sync(); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
if err := os.Rename(tmpPath, outAbs); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return nil, err
|
||||
}
|
||||
if err := syncDir(filepath.Dir(outAbs)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &snapshotSummary{
|
||||
SnapshotFile: outAbs,
|
||||
CreatedAt: manifest.CreatedAt,
|
||||
SourcePath: sourceAbs,
|
||||
FileCount: len(manifest.Files),
|
||||
TotalBytes: totalBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func inspectSnapshotArchive(filePath string) (*snapshotManifest, *snapshotSummary, error) {
|
||||
fileAbs, err := filepath.Abs(filepath.Clean(filePath))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
manifest, actual, err := readSnapshotArchive(fileAbs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
expected := map[string]snapshotFileEntry{}
|
||||
var totalBytes int64
|
||||
for _, entry := range manifest.Files {
|
||||
expected[entry.Path] = entry
|
||||
totalBytes += entry.Size
|
||||
}
|
||||
if len(expected) != len(actual) {
|
||||
return nil, nil, fmt.Errorf("snapshot validation failed: expected %d files, got %d", len(expected), len(actual))
|
||||
}
|
||||
for path, exp := range expected {
|
||||
got, ok := actual[path]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("snapshot validation failed: missing file %s", path)
|
||||
}
|
||||
if got.Size != exp.Size || got.SHA256 != exp.SHA256 {
|
||||
return nil, nil, fmt.Errorf("snapshot validation failed: checksum mismatch for %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
return manifest, &snapshotSummary{
|
||||
SnapshotFile: fileAbs,
|
||||
CreatedAt: manifest.CreatedAt,
|
||||
SourcePath: manifest.SourcePath,
|
||||
FileCount: len(manifest.Files),
|
||||
TotalBytes: totalBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func restoreSnapshotArchive(ctx context.Context, filePath, destinationPath string, force bool) (*snapshotSummary, error) {
|
||||
_ = ctx
|
||||
manifest, summary, err := inspectSnapshotArchive(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destAbs, err := filepath.Abs(filepath.Clean(destinationPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fi, statErr := os.Stat(destAbs); statErr == nil && fi.IsDir() {
|
||||
if err := ensureDataPathOffline(destAbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries, err := os.ReadDir(destAbs)
|
||||
if err == nil && len(entries) > 0 && !force {
|
||||
return nil, errors.New("destination data path is not empty; use --force to overwrite")
|
||||
}
|
||||
}
|
||||
|
||||
parent := filepath.Dir(destAbs)
|
||||
if err := os.MkdirAll(parent, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stage := filepath.Join(parent, "."+filepath.Base(destAbs)+".restore-"+strconvNowNano())
|
||||
if err := os.MkdirAll(stage, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupStage := true
|
||||
defer func() {
|
||||
if cleanupStage {
|
||||
_ = os.RemoveAll(stage)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := extractSnapshotArchive(filePath, stage, manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := syncDir(stage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(destAbs); err == nil {
|
||||
if !force {
|
||||
return nil, errors.New("destination data path exists; use --force to overwrite")
|
||||
}
|
||||
if err := os.RemoveAll(destAbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(stage, destAbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := syncDir(parent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupStage = false
|
||||
|
||||
summary.SourcePath = destAbs
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func ensureMetadataExists(dataPath string) error {
|
||||
dbPath := filepath.Join(dataPath, "metadata.db")
|
||||
info, err := os.Stat(dbPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metadata.db not found in %s", dataPath)
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return fmt.Errorf("metadata.db in %s is not a regular file", dataPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureDataPathOffline(dataPath string) error {
|
||||
dbPath := filepath.Join(dataPath, "metadata.db")
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
db, err := bolt.Open(dbPath, 0o600, &bolt.Options{
|
||||
Timeout: 100 * time.Millisecond,
|
||||
ReadOnly: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("data path appears in use (metadata.db locked): %w", err)
|
||||
}
|
||||
return db.Close()
|
||||
}
|
||||
|
||||
func buildSnapshotManifest(dataPath string) (*snapshotManifest, int64, error) {
|
||||
entries := make([]snapshotFileEntry, 0, 128)
|
||||
var totalBytes int64
|
||||
err := filepath.WalkDir(dataPath, func(path string, d fs.DirEntry, walkErr error) error {
|
||||
if walkErr != nil {
|
||||
return walkErr
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rel, err := filepath.Rel(dataPath, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel = filepath.ToSlash(filepath.Clean(rel))
|
||||
if rel == "." || rel == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
sum, err := sha256File(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalBytes += info.Size()
|
||||
entries = append(entries, snapshotFileEntry{
|
||||
Path: rel,
|
||||
Size: info.Size(),
|
||||
SHA256: sum,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
return nil, 0, errors.New("data path contains no regular files to snapshot")
|
||||
}
|
||||
|
||||
return &snapshotManifest{
|
||||
FormatVersion: snapshotFormat,
|
||||
CreatedAt: time.Now().UTC().Format(time.RFC3339Nano),
|
||||
SourcePath: dataPath,
|
||||
Files: entries,
|
||||
}, totalBytes, nil
|
||||
}
|
||||
|
||||
func writeManifestToTar(tw *tar.Writer, manifest *snapshotManifest) error {
|
||||
if tw == nil || manifest == nil {
|
||||
return errors.New("invalid manifest writer input")
|
||||
}
|
||||
payload, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header := &tar.Header{
|
||||
Name: snapshotManifestPath,
|
||||
Mode: 0o600,
|
||||
Size: int64(len(payload)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tw.Write(payload)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeFileToTar(tw *tar.Writer, absPath, relPath string) error {
|
||||
file, err := os.Open(absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header := &tar.Header{
|
||||
Name: relPath,
|
||||
Mode: int64(info.Mode().Perm()),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
}
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(tw, file)
|
||||
return err
|
||||
}
|
||||
|
||||
func readSnapshotArchive(filePath string) (*snapshotManifest, map[string]snapshotFileEntry, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
actual := make(map[string]snapshotFileEntry)
|
||||
var manifest *snapshotManifest
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
name, err := cleanArchivePath(header.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if header.Typeflag == tar.TypeDir {
|
||||
continue
|
||||
}
|
||||
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
|
||||
return nil, nil, fmt.Errorf("unsupported tar entry type for %s", name)
|
||||
}
|
||||
|
||||
if name == snapshotManifestPath {
|
||||
raw, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
current := &snapshotManifest{}
|
||||
if err := json.Unmarshal(raw, current); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
manifest = current
|
||||
continue
|
||||
}
|
||||
|
||||
size, hashHex, err := digestReader(tr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
actual[name] = snapshotFileEntry{
|
||||
Path: name,
|
||||
Size: size,
|
||||
SHA256: hashHex,
|
||||
}
|
||||
}
|
||||
|
||||
if manifest == nil {
|
||||
return nil, nil, errors.New("snapshot manifest.json not found")
|
||||
}
|
||||
if manifest.FormatVersion != snapshotFormat {
|
||||
return nil, nil, fmt.Errorf("unsupported snapshot format version %d", manifest.FormatVersion)
|
||||
}
|
||||
return manifest, actual, nil
|
||||
}
|
||||
|
||||
func extractSnapshotArchive(filePath, destination string, manifest *snapshotManifest) error {
|
||||
expected := make(map[string]snapshotFileEntry, len(manifest.Files))
|
||||
for _, entry := range manifest.Files {
|
||||
expected[entry.Path] = entry
|
||||
}
|
||||
seen := make(map[string]struct{}, len(expected))
|
||||
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := cleanArchivePath(header.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == snapshotManifestPath {
|
||||
if _, err := io.Copy(io.Discard, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if header.Typeflag == tar.TypeDir {
|
||||
continue
|
||||
}
|
||||
if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA {
|
||||
return fmt.Errorf("unsupported tar entry type for %s", name)
|
||||
}
|
||||
|
||||
exp, ok := expected[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("snapshot contains unexpected file %s", name)
|
||||
}
|
||||
targetPath := filepath.Join(destination, filepath.FromSlash(name))
|
||||
if !isPathWithin(destination, targetPath) {
|
||||
return fmt.Errorf("invalid archive path %s", name)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := os.OpenFile(targetPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(header.Mode)&0o777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasher := sha256.New()
|
||||
written, copyErr := io.Copy(io.MultiWriter(out, hasher), tr)
|
||||
syncErr := out.Sync()
|
||||
closeErr := out.Close()
|
||||
if copyErr != nil {
|
||||
return copyErr
|
||||
}
|
||||
if syncErr != nil {
|
||||
return syncErr
|
||||
}
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
if err := syncDir(filepath.Dir(targetPath)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sum := hex.EncodeToString(hasher.Sum(nil))
|
||||
if written != exp.Size || sum != exp.SHA256 {
|
||||
return fmt.Errorf("checksum mismatch while extracting %s", name)
|
||||
}
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
|
||||
if len(seen) != len(expected) {
|
||||
return fmt.Errorf("restore validation failed: extracted %d files, expected %d", len(seen), len(expected))
|
||||
}
|
||||
for path := range expected {
|
||||
if _, ok := seen[path]; !ok {
|
||||
return fmt.Errorf("restore validation failed: missing file %s", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanArchivePath(name string) (string, error) {
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
return "", errors.New("empty archive path")
|
||||
}
|
||||
name = filepath.ToSlash(filepath.Clean(name))
|
||||
if strings.HasPrefix(name, "/") || strings.HasPrefix(name, "../") || strings.Contains(name, "/../") || name == ".." {
|
||||
return "", fmt.Errorf("unsafe archive path %q", name)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func digestReader(r io.Reader) (int64, string, error) {
|
||||
hasher := sha256.New()
|
||||
n, err := io.Copy(hasher, r)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
return n, hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func sha256File(path string) (string, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
return sha256FromReader(file)
|
||||
}
|
||||
|
||||
func sha256FromReader(reader io.Reader) (string, error) {
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, reader); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func isPathWithin(base, candidate string) bool {
|
||||
base = filepath.Clean(base)
|
||||
candidate = filepath.Clean(candidate)
|
||||
rel, err := filepath.Rel(base, candidate)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)))
|
||||
}
|
||||
|
||||
func syncDir(path string) error {
|
||||
dir, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dir.Close()
|
||||
return dir.Sync()
|
||||
}
|
||||
|
||||
func strconvNowNano() string {
|
||||
return fmt.Sprintf("%d", time.Now().UnixNano())
|
||||
}
|
||||
240
cmd/admin_snapshot_test.go
Normal file
240
cmd/admin_snapshot_test.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type snapshotArchiveEntry struct {
|
||||
Path string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func TestInspectSnapshotArchiveRejectsUnsafePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
archive := filepath.Join(t.TempDir(), "bad.tar.gz")
|
||||
manifest := manifestForEntries([]snapshotArchiveEntry{
|
||||
{Path: "metadata.db", Data: []byte("db")},
|
||||
})
|
||||
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
|
||||
{Path: "../escape", Data: []byte("oops")},
|
||||
}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("write test archive: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = inspectSnapshotArchive(archive)
|
||||
if err == nil || !strings.Contains(err.Error(), "unsafe archive path") {
|
||||
t.Fatalf("expected unsafe archive path error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectSnapshotArchiveChecksumMismatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
archive := filepath.Join(t.TempDir(), "mismatch.tar.gz")
|
||||
manifest := manifestForEntries([]snapshotArchiveEntry{
|
||||
{Path: "chunks/c1", Data: []byte("good")},
|
||||
})
|
||||
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
|
||||
{Path: "chunks/c1", Data: []byte("bad")},
|
||||
}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("write test archive: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = inspectSnapshotArchive(archive)
|
||||
if err == nil || !strings.Contains(err.Error(), "checksum mismatch") {
|
||||
t.Fatalf("expected checksum mismatch error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectSnapshotArchiveMissingManifest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
archive := filepath.Join(t.TempDir(), "no-manifest.tar.gz")
|
||||
err := writeSnapshotArchiveForTest(archive, nil, []snapshotArchiveEntry{
|
||||
{Path: "chunks/c1", Data: []byte("x")},
|
||||
}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("write test archive: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = inspectSnapshotArchive(archive)
|
||||
if err == nil || !strings.Contains(err.Error(), "manifest.json not found") {
|
||||
t.Fatalf("expected missing manifest error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectSnapshotArchiveUnsupportedFormat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
archive := filepath.Join(t.TempDir(), "unsupported-format.tar.gz")
|
||||
manifest := manifestForEntries([]snapshotArchiveEntry{
|
||||
{Path: "chunks/c1", Data: []byte("x")},
|
||||
})
|
||||
manifest.FormatVersion = 99
|
||||
err := writeSnapshotArchiveForTest(archive, manifest, []snapshotArchiveEntry{
|
||||
{Path: "chunks/c1", Data: []byte("x")},
|
||||
}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("write test archive: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = inspectSnapshotArchive(archive)
|
||||
if err == nil || !strings.Contains(err.Error(), "unsupported snapshot format version") {
|
||||
t.Fatalf("expected unsupported format error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestoreSnapshotArchiveDestinationBehavior(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
root := t.TempDir()
|
||||
archive := filepath.Join(root, "ok.tar.gz")
|
||||
destination := filepath.Join(root, "dst")
|
||||
|
||||
entries := []snapshotArchiveEntry{
|
||||
{Path: "metadata.db", Data: []byte("db-bytes")},
|
||||
{Path: "chunks/c1", Data: []byte("chunk-1")},
|
||||
}
|
||||
manifest := manifestForEntries(entries)
|
||||
if err := writeSnapshotArchiveForTest(archive, manifest, entries, true); err != nil {
|
||||
t.Fatalf("write test archive: %v", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(destination, 0o755); err != nil {
|
||||
t.Fatalf("mkdir destination: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(destination, "old.txt"), []byte("old"), 0o600); err != nil {
|
||||
t.Fatalf("seed destination: %v", err)
|
||||
}
|
||||
|
||||
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, false); err == nil || !strings.Contains(err.Error(), "not empty") {
|
||||
t.Fatalf("expected non-empty destination error, got %v", err)
|
||||
}
|
||||
|
||||
if _, err := restoreSnapshotArchive(context.Background(), archive, destination, true); err != nil {
|
||||
t.Fatalf("restore with force: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(destination, "old.txt")); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected old file to be removed, stat err=%v", err)
|
||||
}
|
||||
got, err := os.ReadFile(filepath.Join(destination, "chunks/c1"))
|
||||
if err != nil {
|
||||
t.Fatalf("read restored chunk: %v", err)
|
||||
}
|
||||
if string(got) != "chunk-1" {
|
||||
t.Fatalf("restored chunk mismatch: got %q", string(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateSnapshotArchiveRejectsOutputInsideDataPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
root := t.TempDir()
|
||||
if err := os.MkdirAll(filepath.Join(root, "chunks"), 0o755); err != nil {
|
||||
t.Fatalf("mkdir chunks: %v", err)
|
||||
}
|
||||
if err := createBoltDBForTest(filepath.Join(root, "metadata.db")); err != nil {
|
||||
t.Fatalf("create metadata db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(root, "chunks/c1"), []byte("x"), 0o600); err != nil {
|
||||
t.Fatalf("write chunk: %v", err)
|
||||
}
|
||||
|
||||
out := filepath.Join(root, "inside.tar.gz")
|
||||
if _, err := createSnapshotArchive(context.Background(), root, out); err == nil || !strings.Contains(err.Error(), "cannot be inside") {
|
||||
t.Fatalf("expected output-inside-data-path error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeSnapshotArchiveForTest(path string, manifest *snapshotManifest, entries []snapshotArchiveEntry, includeManifest bool) error {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzw := gzip.NewWriter(file)
|
||||
defer gzw.Close()
|
||||
tw := tar.NewWriter(gzw)
|
||||
defer tw.Close()
|
||||
|
||||
if includeManifest {
|
||||
raw, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeTarEntry(tw, snapshotManifestPath, raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if err := writeTarEntry(tw, entry.Path, entry.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeTarEntry(tw *tar.Writer, name string, data []byte) error {
|
||||
header := &tar.Header{
|
||||
Name: name,
|
||||
Mode: 0o600,
|
||||
Size: int64(len(data)),
|
||||
}
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := ioCopyBytes(tw, data)
|
||||
return err
|
||||
}
|
||||
|
||||
func manifestForEntries(entries []snapshotArchiveEntry) *snapshotManifest {
|
||||
files := make([]snapshotFileEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
sum := sha256.Sum256(entry.Data)
|
||||
files = append(files, snapshotFileEntry{
|
||||
Path: filepath.ToSlash(filepath.Clean(entry.Path)),
|
||||
Size: int64(len(entry.Data)),
|
||||
SHA256: hex.EncodeToString(sum[:]),
|
||||
})
|
||||
}
|
||||
return &snapshotManifest{
|
||||
FormatVersion: snapshotFormat,
|
||||
CreatedAt: "2026-03-11T00:00:00Z",
|
||||
SourcePath: "/tmp/source",
|
||||
Files: files,
|
||||
}
|
||||
}
|
||||
|
||||
func createBoltDBForTest(path string) error {
|
||||
db, err := bolt.Open(path, 0o600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
return db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte("x"))
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func ioCopyBytes(w *tar.Writer, data []byte) (int64, error) {
|
||||
n, err := bytes.NewReader(data).WriteTo(w)
|
||||
return n, err
|
||||
}
|
||||
388
cmd/admin_user.go
Normal file
388
cmd/admin_user.go
Normal file
@@ -0,0 +1,388 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newAdminUserCommand(opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "user",
|
||||
Short: "Manage auth users",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(newAdminUserCreateCommand(opts))
|
||||
cmd.AddCommand(newAdminUserListCommand(opts))
|
||||
cmd.AddCommand(newAdminUserGetCommand(opts))
|
||||
cmd.AddCommand(newAdminUserDeleteCommand(opts))
|
||||
cmd.AddCommand(newAdminUserSetStatusCommand(opts))
|
||||
cmd.AddCommand(newAdminUserSetRoleCommand(opts))
|
||||
cmd.AddCommand(newAdminUserRemoveRoleCommand(opts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserCreateCommand(opts *adminOptions) *cobra.Command {
|
||||
var (
|
||||
accessKey string
|
||||
secretKey string
|
||||
status string
|
||||
role string
|
||||
bucket string
|
||||
prefix string
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a user",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
accessKey = strings.TrimSpace(accessKey)
|
||||
if accessKey == "" {
|
||||
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", "--access-key is required")
|
||||
}
|
||||
policy, err := buildPolicyFromRole(rolePolicyOptions{
|
||||
Role: role,
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return usageError("fs admin user create --access-key <id> --role admin|readwrite|readonly [--status active|disabled] [--bucket <name>] [--prefix <path>]", err.Error())
|
||||
}
|
||||
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := client.CreateUser(context.Background(), createUserRequest{
|
||||
AccessKeyID: accessKey,
|
||||
SecretKey: strings.TrimSpace(secretKey),
|
||||
Status: strings.TrimSpace(status),
|
||||
Policy: policy,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
if err := writeUserTable(cmd.OutOrStdout(), out, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(out.SecretKey) != "" {
|
||||
_, _ = fmt.Fprintln(cmd.OutOrStdout(), "\nsecretKey is only returned once during create; store it securely.")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&accessKey, "access-key", "", "User access key ID")
|
||||
cmd.Flags().StringVar(&secretKey, "secret-key", "", "User secret key (optional; auto-generated when omitted)")
|
||||
cmd.Flags().StringVar(&status, "status", "active", "User status: active|disabled")
|
||||
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
|
||||
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
|
||||
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserListCommand(opts *adminOptions) *cobra.Command {
|
||||
var (
|
||||
limit int
|
||||
cursor string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List users",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if limit < 1 || limit > 1000 {
|
||||
return usageError("fs admin user list [--limit 1-1000] [--cursor <token>]", "--limit must be between 1 and 1000")
|
||||
}
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := client.ListUsers(context.Background(), limit, cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
return writeUserListTable(cmd.OutOrStdout(), out)
|
||||
},
|
||||
}
|
||||
cmd.Flags().IntVar(&limit, "limit", 100, "List page size (1-1000)")
|
||||
cmd.Flags().StringVar(&cursor, "cursor", "", "Pagination cursor from previous list call")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserGetCommand(opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "get <access-key-id>",
|
||||
Short: "Get one user",
|
||||
Args: requireAccessKeyArg("fs admin user get <access-key-id>"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := client.GetUser(context.Background(), args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
return writeUserTable(cmd.OutOrStdout(), out, false)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserDeleteCommand(opts *adminOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete <access-key-id>",
|
||||
Short: "Delete one user",
|
||||
Args: requireAccessKeyArg("fs admin user delete <access-key-id>"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.DeleteUser(context.Background(), args[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), map[string]string{
|
||||
"status": "deleted",
|
||||
"accessKeyId": args[0],
|
||||
})
|
||||
}
|
||||
_, err = fmt.Fprintf(cmd.OutOrStdout(), "deleted user %s\n", args[0])
|
||||
return err
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserSetStatusCommand(opts *adminOptions) *cobra.Command {
|
||||
var status string
|
||||
cmd := &cobra.Command{
|
||||
Use: "set-status <access-key-id>",
|
||||
Short: "Set user status",
|
||||
Args: requireAccessKeyArg("fs admin user set-status <access-key-id> --status active|disabled"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
status = strings.TrimSpace(status)
|
||||
if status == "" {
|
||||
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status is required")
|
||||
}
|
||||
normalized := strings.ToLower(status)
|
||||
if normalized != "active" && normalized != "disabled" {
|
||||
return usageError("fs admin user set-status <access-key-id> --status active|disabled", "--status must be active or disabled")
|
||||
}
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := client.SetUserStatus(context.Background(), args[0], normalized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
return writeUserTable(cmd.OutOrStdout(), out, false)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&status, "status", "", "User status: active|disabled")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserSetRoleCommand(opts *adminOptions) *cobra.Command {
|
||||
var (
|
||||
role string
|
||||
bucket string
|
||||
prefix string
|
||||
replace bool
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "set-role <access-key-id>",
|
||||
Short: "Add or replace user role policy statement",
|
||||
Args: requireAccessKeyArg("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
policy, err := buildPolicyFromRole(rolePolicyOptions{
|
||||
Role: role,
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return usageError("fs admin user set-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>] [--replace]", err.Error())
|
||||
}
|
||||
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finalPolicy := policy
|
||||
if !replace {
|
||||
existing, err := client.GetUser(context.Background(), args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalPolicy = mergePolicyStatements(existing.Policy, policy)
|
||||
}
|
||||
|
||||
out, err := client.SetUserPolicy(context.Background(), args[0], finalPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
return writeUserTable(cmd.OutOrStdout(), out, false)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
|
||||
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
|
||||
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
|
||||
cmd.Flags().BoolVar(&replace, "replace", false, "Replace all existing policy statements instead of appending")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newAdminUserRemoveRoleCommand(opts *adminOptions) *cobra.Command {
|
||||
var (
|
||||
role string
|
||||
bucket string
|
||||
prefix string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "remove-role <access-key-id>",
|
||||
Short: "Remove one role policy statement from user",
|
||||
Args: requireAccessKeyArg("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
policy, err := buildPolicyFromRole(rolePolicyOptions{
|
||||
Role: role,
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", err.Error())
|
||||
}
|
||||
if len(policy.Statements) == 0 {
|
||||
return usageError("fs admin user remove-role <access-key-id> --role admin|readwrite|readonly [--bucket <name>] [--prefix <path>]", "no statement to remove")
|
||||
}
|
||||
|
||||
client, err := newAdminAPIClient(opts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existing, err := client.GetUser(context.Background(), args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existing.Policy == nil || len(existing.Policy.Statements) == 0 {
|
||||
return fmt.Errorf("user %q has no policy statements", args[0])
|
||||
}
|
||||
|
||||
target := policy.Statements[0]
|
||||
nextPolicy, removed := removePolicyStatements(existing.Policy, target)
|
||||
if removed == 0 {
|
||||
return fmt.Errorf("no matching statement found for role=%s bucket=%s prefix=%s", role, bucket, prefix)
|
||||
}
|
||||
if len(nextPolicy.Statements) == 0 {
|
||||
return fmt.Errorf("cannot remove the last policy statement; add another role first or use set-role --replace")
|
||||
}
|
||||
|
||||
out, err := client.SetUserPolicy(context.Background(), args[0], nextPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.JSON {
|
||||
return writeJSON(cmd.OutOrStdout(), out)
|
||||
}
|
||||
return writeUserTable(cmd.OutOrStdout(), out, false)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&role, "role", "readwrite", "Role: admin|readwrite|readonly")
|
||||
cmd.Flags().StringVar(&bucket, "bucket", "*", "Bucket scope, defaults to *")
|
||||
cmd.Flags().StringVar(&prefix, "prefix", "*", "Prefix scope, defaults to *")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func mergePolicyStatements(existing *adminPolicy, addition adminPolicy) adminPolicy {
|
||||
merged := adminPolicy{}
|
||||
if existing != nil {
|
||||
merged.Principal = existing.Principal
|
||||
merged.Statements = append(merged.Statements, existing.Statements...)
|
||||
}
|
||||
|
||||
if len(addition.Statements) == 0 {
|
||||
return merged
|
||||
}
|
||||
|
||||
stmt := addition.Statements[0]
|
||||
for _, current := range merged.Statements {
|
||||
if policyStatementsEqual(current, stmt) {
|
||||
return merged
|
||||
}
|
||||
}
|
||||
merged.Statements = append(merged.Statements, stmt)
|
||||
return merged
|
||||
}
|
||||
|
||||
func policyStatementsEqual(a, b adminPolicyStatement) bool {
|
||||
if a.Effect != b.Effect || a.Bucket != b.Bucket || a.Prefix != b.Prefix {
|
||||
return false
|
||||
}
|
||||
if len(a.Actions) != len(b.Actions) {
|
||||
return false
|
||||
}
|
||||
for i := range a.Actions {
|
||||
if a.Actions[i] != b.Actions[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func removePolicyStatements(existing *adminPolicy, target adminPolicyStatement) (adminPolicy, int) {
|
||||
out := adminPolicy{}
|
||||
if existing == nil {
|
||||
return out, 0
|
||||
}
|
||||
out.Principal = existing.Principal
|
||||
out.Statements = make([]adminPolicyStatement, 0, len(existing.Statements))
|
||||
|
||||
removed := 0
|
||||
for _, stmt := range existing.Statements {
|
||||
if policyStatementsEqual(stmt, target) {
|
||||
removed++
|
||||
continue
|
||||
}
|
||||
out.Statements = append(out.Statements, stmt)
|
||||
}
|
||||
return out, removed
|
||||
}
|
||||
|
||||
func requireAccessKeyArg(usage string) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return usageError(usage, "missing or invalid <access-key-id> argument")
|
||||
}
|
||||
if strings.TrimSpace(args[0]) == "" {
|
||||
return usageError(usage, "access key id cannot be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func usageError(usage, message string) error {
|
||||
return fmt.Errorf("%s\nusage: %s", message, usage)
|
||||
}
|
||||
20
cmd/buildinfo.go
Normal file
20
cmd/buildinfo.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cmd
|
||||
|
||||
type BuildInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
}
|
||||
|
||||
func (b BuildInfo) normalized() BuildInfo {
|
||||
if b.Version == "" {
|
||||
b.Version = "dev"
|
||||
}
|
||||
if b.Commit == "" {
|
||||
b.Commit = "none"
|
||||
}
|
||||
if b.Date == "" {
|
||||
b.Date = "unknown"
|
||||
}
|
||||
return b
|
||||
}
|
||||
26
cmd/execute.go
Normal file
26
cmd/execute.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fs/app"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func Execute(build BuildInfo) error {
|
||||
build = build.normalized()
|
||||
|
||||
if len(os.Args) == 1 {
|
||||
return runServerWithSignals()
|
||||
}
|
||||
|
||||
root := newRootCommand(build)
|
||||
return root.Execute()
|
||||
}
|
||||
|
||||
func runServerWithSignals() error {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
return app.RunServer(ctx)
|
||||
}
|
||||
36
cmd/root.go
Normal file
36
cmd/root.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newRootCommand(build BuildInfo) *cobra.Command {
|
||||
root := &cobra.Command{
|
||||
Use: "fs",
|
||||
Short: "fs object storage server and admin CLI",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
Version: build.Version,
|
||||
}
|
||||
|
||||
root.SetVersionTemplate(versionTemplate(build))
|
||||
root.AddCommand(newServerCommand())
|
||||
root.AddCommand(newAdminCommand(build))
|
||||
root.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print build and runtime version information",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_, err := fmt.Fprintf(cmd.OutOrStdout(), "version=%s commit=%s date=%s go=%s\n", build.Version, build.Commit, build.Date, runtime.Version())
|
||||
return err
|
||||
},
|
||||
})
|
||||
|
||||
return root
|
||||
}
|
||||
|
||||
func versionTemplate(build BuildInfo) string {
|
||||
return fmt.Sprintf("version=%s commit=%s date=%s\n", build.Version, build.Commit, build.Date)
|
||||
}
|
||||
15
cmd/server.go
Normal file
15
cmd/server.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newServerCommand() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Run fs object storage server",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runServerWithSignals()
|
||||
},
|
||||
}
|
||||
}
|
||||
5
go.mod
5
go.mod
@@ -5,10 +5,13 @@ go 1.25.7
|
||||
require (
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/spf13/cobra v1.10.1
|
||||
go.etcd.io/bbolt v1.4.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/joho/godotenv v1.5.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
)
|
||||
|
||||
9
go.sum
9
go.sum
@@ -1,13 +1,21 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
|
||||
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
|
||||
@@ -16,5 +24,6 @@ golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
94
main.go
94
main.go
@@ -1,90 +1,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fs/api"
|
||||
"fs/auth"
|
||||
"fs/logging"
|
||||
"fs/metadata"
|
||||
"fs/service"
|
||||
"fs/storage"
|
||||
"fs/utils"
|
||||
"fs/cmd"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "none"
|
||||
date = "unknown"
|
||||
)
|
||||
|
||||
func main() {
|
||||
config := utils.NewConfig()
|
||||
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog)
|
||||
authConfig := auth.ConfigFromValues(
|
||||
config.AuthEnabled,
|
||||
config.AuthRegion,
|
||||
config.AuthSkew,
|
||||
config.AuthMaxPresign,
|
||||
config.AuthMasterKey,
|
||||
config.AuthBootstrapAccessKey,
|
||||
config.AuthBootstrapSecretKey,
|
||||
config.AuthBootstrapPolicy,
|
||||
)
|
||||
logger := logging.NewLogger(logConfig)
|
||||
logger.Info("boot",
|
||||
"log_level", logConfig.LevelName,
|
||||
"log_format", logConfig.Format,
|
||||
"audit_log", logConfig.Audit,
|
||||
"data_path", config.DataPath,
|
||||
"multipart_retention_hours", int(config.MultipartCleanupRetention/time.Hour),
|
||||
"auth_enabled", authConfig.Enabled,
|
||||
"auth_region", authConfig.Region,
|
||||
"admin_api_enabled", config.AdminAPIEnabled,
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
|
||||
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
|
||||
return
|
||||
build := cmd.BuildInfo{
|
||||
Version: version,
|
||||
Commit: commit,
|
||||
Date: date,
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(config.DataPath, "metadata.db")
|
||||
metadataHandler, err := metadata.NewMetadataHandler(dbPath)
|
||||
if err != nil {
|
||||
logger.Error("failed_to_initialize_metadata_handler", "error", err)
|
||||
return
|
||||
if err := cmd.Execute(build); err != nil {
|
||||
_, _ = os.Stderr.WriteString(err.Error() + "\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
|
||||
if err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_initialize_blob_store", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
objectService := service.NewObjectService(metadataHandler, blobHandler, config.MultipartCleanupRetention)
|
||||
authService, err := auth.NewService(authConfig, metadataHandler)
|
||||
if err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_initialize_auth_service", "error", err)
|
||||
return
|
||||
}
|
||||
if err := authService.EnsureBootstrap(); err != nil {
|
||||
_ = metadataHandler.Close()
|
||||
logger.Error("failed_to_ensure_bootstrap_auth_identity", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
handler := api.NewHandler(objectService, logger, logConfig, authService, config.AdminAPIEnabled)
|
||||
addr := config.Address + ":" + strconv.Itoa(config.Port)
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
if config.GcEnabled {
|
||||
go objectService.RunGC(ctx, config.GcInterval)
|
||||
}
|
||||
|
||||
if err = handler.Start(ctx, addr); err != nil {
|
||||
logger.Error("server_stopped_with_error", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func NewConfig() *Config {
|
||||
config := &Config{
|
||||
DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")),
|
||||
Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"),
|
||||
Port: envIntRange("PORT", 3000, 1, 65535),
|
||||
Port: envIntRange("PORT", 2600, 1, 65535),
|
||||
ChunkSize: envIntRange("CHUNK_SIZE", 8192000, 1, 64*1024*1024),
|
||||
LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")),
|
||||
LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")),
|
||||
|
||||
Reference in New Issue
Block a user