Fixed logging, added config and .env example

This commit is contained in:
2026-02-23 21:52:45 +01:00
parent d7bdb3177b
commit a8204de914
11 changed files with 187 additions and 63 deletions

View File

@@ -1,3 +1,4 @@
*.md
.gocache/
blobs/
data/

6
.env.example Normal file
View File

@@ -0,0 +1,6 @@
LOG_LEVEL=debug
LOG_FORMAT=text
DATA_PATH=data/
PORT=2600
AUDIT_LOG=true
ADDRESS=0.0.0.0

4
.gitignore vendored
View File

@@ -1,6 +1,8 @@
.env
*.db
.vscode/
blobs/
*.db
data/
.idea/
.gocache/
.gomodcache/

View File

@@ -49,13 +49,7 @@ func NewHandler(svc *service.ObjectService, logger *slog.Logger, logConfig loggi
}
func (h *Handler) setupRoutes() {
if h.logConfig.Format == "text" {
if h.logConfig.Audit || h.logConfig.DebugMode {
h.router.Use(middleware.Logger)
}
} else {
h.router.Use(logging.HTTPMiddleware(h.logger, h.logConfig))
}
h.router.Use(logging.HTTPMiddleware(h.logger, h.logConfig))
h.router.Get("/", h.handleGetBuckets)

5
go.mod
View File

@@ -8,4 +8,7 @@ require (
go.etcd.io/bbolt v1.4.3
)
require golang.org/x/sys v0.41.0 // indirect
require (
github.com/joho/godotenv v1.5.1 // indirect
golang.org/x/sys v0.41.0 // indirect
)

2
go.sum
View File

@@ -4,6 +4,8 @@ github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=

View File

@@ -21,13 +21,19 @@ type Config struct {
func ConfigFromEnv() Config {
levelName := strings.ToLower(strings.TrimSpace(os.Getenv("LOG_LEVEL")))
format := strings.ToLower(strings.TrimSpace(os.Getenv("LOG_FORMAT")))
return ConfigFromValues(levelName, format, envBool("AUDIT_LOG", true))
}
func ConfigFromValues(levelName, format string, audit bool) Config {
levelName = strings.ToLower(strings.TrimSpace(levelName))
if levelName == "" {
levelName = "info"
}
level := parseLevel(levelName)
levelName = level.String()
levelName = strings.ToUpper(level.String())
format := strings.ToLower(strings.TrimSpace(os.Getenv("LOG_FORMAT")))
format = strings.ToLower(strings.TrimSpace(format))
if format == "" {
format = "text"
}
@@ -40,7 +46,7 @@ func ConfigFromEnv() Config {
Level: level,
LevelName: levelName,
Format: format,
Audit: envBool("AUDIT_LOG", true),
Audit: audit,
AddSource: debugMode,
DebugMode: debugMode,
}
@@ -91,16 +97,9 @@ func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Han
"path", r.URL.Path,
"status", ww.status,
"bytes", ww.bytes,
"duration_ms", float64(elapsed.Nanoseconds()) / 1_000_000.0,
"remote_addr", r.RemoteAddr,
}
switch {
case elapsed < time.Microsecond:
attrs = append(attrs, "duration_ns", elapsed.Nanoseconds())
case elapsed < time.Millisecond:
attrs = append(attrs, "duration_us", elapsed.Microseconds())
default:
attrs = append(attrs, "duration_ms", elapsed.Milliseconds())
}
if cfg.DebugMode {
attrs = append(attrs,

27
main.go
View File

@@ -5,26 +5,45 @@ import (
"fs/logging"
"fs/metadata"
"fs/service"
"fs/storage"
"fs/utils"
"os"
"path/filepath"
"strconv"
)
func main() {
logConfig := logging.ConfigFromEnv()
config := utils.NewConfig()
logConfig := logging.ConfigFromValues(config.LogLevel, config.LogFormat, config.AuditLog)
logger := logging.NewLogger(logConfig)
logger.Info("boot",
"log_level", logConfig.LevelName,
"log_format", logConfig.Format,
"audit_log", logConfig.Audit,
"data_path", config.DataPath,
)
metadataHandler, err := metadata.NewMetadataHandler("metadata.db")
if err := os.MkdirAll(config.DataPath, 0o755); err != nil {
logger.Error("failed_to_prepare_data_path", "path", config.DataPath, "error", err)
return
}
dbPath := filepath.Join(config.DataPath, "metadata.db")
metadataHandler, err := metadata.NewMetadataHandler(dbPath)
if err != nil {
logger.Error("failed_to_initialize_metadata_handler", "error", err)
return
}
blobHandler, err := storage.NewBlobStore(config.DataPath, config.ChunkSize)
if err != nil {
logger.Error("failed_to_initialize_blob_store", "error", err)
return
}
objectService := service.NewObjectService(metadataHandler)
objectService := service.NewObjectService(metadataHandler, blobHandler)
handler := api.NewHandler(objectService, logger, logConfig)
if err = handler.Start("0.0.0.0:3000"); err != nil {
addr := config.Address + ":" + strconv.Itoa(config.Port)
if err = handler.Start(addr); err != nil {
logger.Error("server_stopped_with_error", "error", err)
return
}

View File

@@ -15,7 +15,8 @@ import (
)
type ObjectService struct {
metadataHandler *metadata.MetadataHandler
metadata *metadata.MetadataHandler
blob *storage.BlobStore
}
var (
@@ -25,13 +26,13 @@ var (
ErrEntityTooSmall = errors.New("multipart entity too small")
)
func NewObjectService(metadataHandler *metadata.MetadataHandler) *ObjectService {
return &ObjectService{metadataHandler: metadataHandler}
func NewObjectService(metadataHandler *metadata.MetadataHandler, blobHandler *storage.BlobStore) *ObjectService {
return &ObjectService{metadata: metadataHandler, blob: blobHandler}
}
func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Reader) (*models.ObjectManifest, error) {
chunks, size, etag, err := storage.IngestStream(input)
chunks, size, etag, err := s.blob.IngestStream(input)
if err != nil {
return nil, err
}
@@ -53,7 +54,7 @@ func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Read
"chunk_count", len(manifest.Chunks),
"etag", manifest.ETag,
)
if err = s.metadataHandler.PutManifest(manifest); err != nil {
if err = s.metadata.PutManifest(manifest); err != nil {
return nil, err
}
@@ -61,7 +62,7 @@ func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Read
}
func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) {
manifest, err := s.metadataHandler.GetManifest(bucket, key)
manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil {
return nil, nil, err
}
@@ -75,7 +76,7 @@ func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.Ob
}
}(pw)
err := storage.AssembleStream(manifest.Chunks, pw)
err := s.blob.AssembleStream(manifest.Chunks, pw)
if err != nil {
return
}
@@ -84,7 +85,7 @@ func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.Ob
}
func (s *ObjectService) HeadObject(bucket, key string) (models.ObjectManifest, error) {
manifest, err := s.metadataHandler.GetManifest(bucket, key)
manifest, err := s.metadata.GetManifest(bucket, key)
if err != nil {
return models.ObjectManifest{}, err
}
@@ -92,36 +93,36 @@ func (s *ObjectService) HeadObject(bucket, key string) (models.ObjectManifest, e
}
func (s *ObjectService) DeleteObject(bucket, key string) error {
return s.metadataHandler.DeleteManifest(bucket, key)
return s.metadata.DeleteManifest(bucket, key)
}
func (s *ObjectService) ListObjects(bucket, prefix string) ([]*models.ObjectManifest, error) {
return s.metadataHandler.ListObjects(bucket, prefix)
return s.metadata.ListObjects(bucket, prefix)
}
func (s *ObjectService) CreateBucket(bucket string) error {
return s.metadataHandler.CreateBucket(bucket)
return s.metadata.CreateBucket(bucket)
}
func (s *ObjectService) HeadBucket(bucket string) error {
_, err := s.metadataHandler.GetBucketManifest(bucket)
_, err := s.metadata.GetBucketManifest(bucket)
return err
}
func (s *ObjectService) DeleteBucket(bucket string) error {
return s.metadataHandler.DeleteBucket(bucket)
return s.metadata.DeleteBucket(bucket)
}
func (s *ObjectService) ListBuckets() ([]string, error) {
return s.metadataHandler.ListBuckets()
return s.metadata.ListBuckets()
}
func (s *ObjectService) DeleteObjects(bucket string, keys []string) ([]string, error) {
return s.metadataHandler.DeleteManifests(bucket, keys)
return s.metadata.DeleteManifests(bucket, keys)
}
func (s *ObjectService) CreateMultipartUpload(bucket, key string) (*models.MultipartUpload, error) {
return s.metadataHandler.CreateMultipartUpload(bucket, key)
return s.metadata.CreateMultipartUpload(bucket, key)
}
func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int, input io.Reader) (string, error) {
@@ -129,7 +130,7 @@ func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int,
return "", ErrInvalidPart
}
upload, err := s.metadataHandler.GetMultipartUpload(uploadId)
upload, err := s.metadata.GetMultipartUpload(uploadId)
if err != nil {
return "", err
}
@@ -138,7 +139,7 @@ func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int,
}
var uploadedPart models.UploadedPart
chunkIds, totalSize, etag, err := storage.IngestStream(input)
chunkIds, totalSize, etag, err := s.blob.IngestStream(input)
if err != nil {
return "", err
}
@@ -149,7 +150,7 @@ func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int,
Chunks: chunkIds,
CreatedAt: time.Now().Unix(),
}
err = s.metadataHandler.PutMultipartPart(uploadId, uploadedPart)
err = s.metadata.PutMultipartPart(uploadId, uploadedPart)
if err != nil {
return "", err
}
@@ -157,14 +158,14 @@ func (s *ObjectService) UploadPart(bucket, key, uploadId string, partNumber int,
}
func (s *ObjectService) ListMultipartParts(bucket, key, uploadID string) ([]models.UploadedPart, error) {
upload, err := s.metadataHandler.GetMultipartUpload(uploadID)
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return nil, err
}
if upload.Bucket != bucket || upload.Key != key {
return nil, metadata.ErrMultipartNotFound
}
return s.metadataHandler.ListMultipartParts(uploadID)
return s.metadata.ListMultipartParts(uploadID)
}
func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, completed []models.CompletedPart) (*models.ObjectManifest, error) {
@@ -172,7 +173,7 @@ func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, co
return nil, ErrInvalidCompleteRequest
}
upload, err := s.metadataHandler.GetMultipartUpload(uploadID)
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return nil, err
}
@@ -180,7 +181,7 @@ func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, co
return nil, metadata.ErrMultipartNotFound
}
storedParts, err := s.metadataHandler.ListMultipartParts(uploadID)
storedParts, err := s.metadata.ListMultipartParts(uploadID)
if err != nil {
return nil, err
}
@@ -227,7 +228,7 @@ func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, co
CreatedAt: time.Now().Unix(),
}
if err := s.metadataHandler.CompleteMultipartUpload(uploadID, manifest); err != nil {
if err := s.metadata.CompleteMultipartUpload(uploadID, manifest); err != nil {
return nil, err
}
@@ -235,14 +236,14 @@ func (s *ObjectService) CompleteMultipartUpload(bucket, key, uploadID string, co
}
func (s *ObjectService) AbortMultipartUpload(bucket, key, uploadID string) error {
upload, err := s.metadataHandler.GetMultipartUpload(uploadID)
upload, err := s.metadata.GetMultipartUpload(uploadID)
if err != nil {
return err
}
if upload.Bucket != bucket || upload.Key != key {
return metadata.ErrMultipartNotFound
}
return s.metadataHandler.AbortMultipartUpload(uploadID)
return s.metadata.AbortMultipartUpload(uploadID)
}
func normalizeETag(etag string) string {
@@ -263,5 +264,5 @@ func buildMultipartETag(parts []models.UploadedPart) string {
}
func (s *ObjectService) Close() error {
return s.metadataHandler.Close()
return s.metadata.Close()
}

View File

@@ -13,10 +13,22 @@ import (
const chunkSize = 64 * 1024
const blobRoot = "blobs/"
func IngestStream(stream io.Reader) ([]string, int64, string, error) {
type BlobStore struct {
dataRoot string
chunkSize int
}
func NewBlobStore(root string, chunkSize int) (*BlobStore, error) {
if err := os.MkdirAll(filepath.Join(root, blobRoot), 0o755); err != nil {
return nil, err
}
return &BlobStore{chunkSize: chunkSize, dataRoot: root}, nil
}
func (bs *BlobStore) IngestStream(stream io.Reader) ([]string, int64, string, error) {
fullFileHasher := md5.New()
buffer := make([]byte, chunkSize)
buffer := make([]byte, bs.chunkSize)
var totalSize int64
var chunkIDs []string
@@ -35,7 +47,7 @@ func IngestStream(stream io.Reader) ([]string, int64, string, error) {
chunkHash := sha256.Sum256(chunkData)
chunkID := hex.EncodeToString(chunkHash[:])
err := saveBlob(chunkID, chunkData)
err := bs.saveBlob(chunkID, chunkData)
if err != nil {
return nil, 0, "", err
}
@@ -54,8 +66,8 @@ func IngestStream(stream io.Reader) ([]string, int64, string, error) {
return chunkIDs, totalSize, etag, nil
}
func saveBlob(chunkID string, data []byte) error {
dir := filepath.Join(blobRoot, chunkID[:2], chunkID[2:4])
func (bs *BlobStore) saveBlob(chunkID string, data []byte) error {
dir := filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4])
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
@@ -69,9 +81,9 @@ func saveBlob(chunkID string, data []byte) error {
return nil
}
func AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
func (bs *BlobStore) AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
for _, chunkID := range chunkIDs {
chunkData, err := GetBlob(chunkID)
chunkData, err := bs.GetBlob(chunkID)
if err != nil {
return err
}
@@ -82,7 +94,6 @@ func AssembleStream(chunkIDs []string, w *io.PipeWriter) error {
return nil
}
func GetBlob(chunkID string) ([]byte, error) {
return os.ReadFile(filepath.Join(blobRoot, chunkID[:2], chunkID[2:4], chunkID))
func (bs *BlobStore) GetBlob(chunkID string) ([]byte, error) {
return os.ReadFile(filepath.Join(bs.dataRoot, blobRoot, chunkID[:2], chunkID[2:4], chunkID))
}

86
utils/config.go Normal file
View File

@@ -0,0 +1,86 @@
package utils
import (
"os"
"path/filepath"
"strconv"
"strings"
"github.com/joho/godotenv"
)
type Config struct {
DataPath string
Address string
Port int
ChunkSize int
LogLevel string
LogFormat string
AuditLog bool
}
func NewConfig() *Config {
_ = godotenv.Load()
config := &Config{
DataPath: sanitizeDataPath(os.Getenv("DATA_PATH")),
Address: firstNonEmpty(strings.TrimSpace(os.Getenv("ADDRESS")), "0.0.0.0"),
Port: envInt("PORT", 3000),
ChunkSize: envInt("CHUNK_SIZE", 8192000),
LogLevel: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_LEVEL")), "info")),
LogFormat: strings.ToLower(firstNonEmpty(strings.TrimSpace(os.Getenv("LOG_FORMAT")), strings.TrimSpace(os.Getenv("LOG_TYPE")), "text")),
AuditLog: envBool("AUDIT_LOG", true),
}
if config.LogFormat != "json" && config.LogFormat != "text" {
config.LogFormat = "text"
}
return config
}
func envInt(key string, defaultValue int) int {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return defaultValue
}
value, err := strconv.Atoi(raw)
if err != nil {
return defaultValue
}
return value
}
func envBool(key string, defaultValue bool) bool {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return defaultValue
}
value, err := strconv.ParseBool(raw)
if err != nil {
return defaultValue
}
return value
}
func firstNonEmpty(values ...string) string {
for _, v := range values {
if v != "" {
return v
}
}
return ""
}
func sanitizeDataPath(raw string) string {
cleaned := strings.TrimSpace(raw)
if cleaned == "" {
cleaned = "."
}
cleaned = filepath.Clean(cleaned)
if abs, err := filepath.Abs(cleaned); err == nil {
return abs
}
return cleaned
}