4 Commits

Author SHA1 Message Date
eac20f7fda Merge pull request #10 from ferdzo/hotfix/copy-object-fix-url-encoding-fix
HOTFIX: Copy object and key extraction improvement
2026-03-13 01:32:10 +01:00
9bfdceca08 HOTFIX: Copy object and encoding fixed 2026-03-13 01:29:29 +01:00
6473726a45 Merge pull request #9 from ferdzo/hotfix/streaming-error-and-equalsign-in-sigv4
HOTFIX: Fixed chunked stream problems with size and equal sign encoding
2026-03-13 00:27:57 +01:00
115d825234 HOTFIX: Fixed streaming problems with size and equal sign encoding 2026-03-13 00:25:48 +01:00
5 changed files with 271 additions and 14 deletions

View File

@@ -2,6 +2,7 @@ package api
import ( import (
"bufio" "bufio"
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
@@ -138,11 +139,83 @@ func validateObjectKey(key string) *s3APIError {
return nil return nil
} }
func objectKeyFromRequest(r *http.Request) (string, *s3APIError) {
rawKey := rawObjectKeyFromRequest(r)
key, err := normalizeObjectKey(rawKey)
if err != nil {
apiErr := s3ErrInvalidArgument
return "", &apiErr
}
if apiErr := validateObjectKey(key); apiErr != nil {
return "", apiErr
}
return key, nil
}
func rawObjectKeyFromRequest(r *http.Request) string {
if r == nil || r.URL == nil {
return ""
}
bucket := chi.URLParam(r, "bucket")
if bucket == "" {
return chi.URLParam(r, "*")
}
escapedPath := r.URL.EscapedPath()
prefix := "/" + bucket + "/"
if strings.HasPrefix(escapedPath, prefix) {
return strings.TrimPrefix(escapedPath, prefix)
}
return chi.URLParam(r, "*")
}
func normalizeObjectKey(raw string) (string, error) {
if raw == "" {
return "", nil
}
return url.PathUnescape(raw)
}
func parseCopySource(raw string) (string, string, error) {
raw = strings.TrimSpace(raw)
raw = strings.TrimPrefix(raw, "/")
if idx := strings.IndexByte(raw, '?'); idx >= 0 {
raw = raw[:idx]
}
bucket, rawKey, found := strings.Cut(raw, "/")
if !found || strings.TrimSpace(bucket) == "" || rawKey == "" {
return "", "", errors.New("invalid copy source")
}
key, err := normalizeObjectKey(rawKey)
if err != nil {
return "", "", err
}
if apiErr := validateObjectKey(key); apiErr != nil {
return "", "", errors.New(apiErr.Code)
}
return bucket, key, nil
}
func (h *Handler) authorizeCopySource(r *http.Request, bucket, key string) error {
if h.authSvc == nil || !h.authSvc.Config().Enabled {
return nil
}
authCtx, ok := auth.GetRequestContext(r.Context())
if !ok || !authCtx.Authenticated {
return auth.ErrAccessDenied
}
return h.authSvc.Authorize(authCtx.AccessKeyID, auth.RequestTarget{
Action: auth.ActionGetObject,
Bucket: bucket,
Key: key,
})
}
func (h *Handler) handleGetObject(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleGetObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket") bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*") key, apiErr := objectKeyFromRequest(r)
if apiErr != nil {
if apiErr := validateObjectKey(key); apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path) writeS3Error(w, r, *apiErr, r.URL.Path)
return return
} }
@@ -199,8 +272,8 @@ func (h *Handler) handleGetObject(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) { func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket") bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*") key, apiErr := objectKeyFromRequest(r)
if apiErr := validateObjectKey(key); apiErr != nil { if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path) writeS3Error(w, r, *apiErr, r.URL.Path)
return return
} }
@@ -275,8 +348,8 @@ func (h *Handler) handlePostObject(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) { func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket") bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*") key, apiErr := objectKeyFromRequest(r)
if apiErr := validateObjectKey(key); apiErr != nil { if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path) writeS3Error(w, r, *apiErr, r.URL.Path)
return return
} }
@@ -289,6 +362,10 @@ func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
writeS3Error(w, r, s3ErrInvalidPart, r.URL.Path) writeS3Error(w, r, s3ErrInvalidPart, r.URL.Path)
return return
} }
if strings.TrimSpace(r.Header.Get("x-amz-copy-source")) != "" {
writeS3Error(w, r, s3ErrNotImplemented, r.URL.Path)
return
}
partNumber, err := strconv.Atoi(partNumberRaw) partNumber, err := strconv.Atoi(partNumberRaw)
if err != nil { if err != nil {
@@ -333,6 +410,42 @@ func (h *Handler) handlePutObject(w http.ResponseWriter, r *http.Request) {
} }
} }
if copySourceRaw := strings.TrimSpace(r.Header.Get("x-amz-copy-source")); copySourceRaw != "" {
srcBucket, srcKey, err := parseCopySource(copySourceRaw)
if err != nil {
writeS3Error(w, r, s3ErrInvalidArgument, r.URL.Path)
return
}
if err := h.authorizeCopySource(r, srcBucket, srcKey); err != nil {
writeMappedS3Error(w, r, err)
return
}
manifest, err := h.svc.CopyObject(srcBucket, srcKey, bucket, key)
if err != nil {
writeMappedS3Error(w, r, err)
return
}
response := models.CopyObjectResult{
Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
LastModified: time.Unix(manifest.CreatedAt, 0).UTC().Format("2006-01-02T15:04:05.000Z"),
ETag: `"` + manifest.ETag + `"`,
}
payload, err := xml.MarshalIndent(response, "", " ")
if err != nil {
writeMappedS3Error(w, r, err)
return
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.Header().Set("ETag", `"`+manifest.ETag+`"`)
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(xml.Header))
_, _ = w.Write(payload)
return
}
contentType := r.Header.Get("Content-Type") contentType := r.Header.Get("Content-Type")
if contentType == "" { if contentType == "" {
contentType = "application/octet-stream" contentType = "application/octet-stream"
@@ -400,13 +513,21 @@ func shouldDecodeAWSChunkedPayload(r *http.Request) bool {
return true return true
} }
signingMode := strings.ToLower(r.Header.Get("x-amz-content-sha256")) signingMode := strings.ToLower(r.Header.Get("x-amz-content-sha256"))
return strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload") if strings.HasPrefix(signingMode, "streaming-aws4-hmac-sha256-payload") {
return true
}
return strings.HasPrefix(signingMode, "streaming-unsigned-payload")
} }
func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser { func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
probedReader, isAWSChunked := probeAWSChunkedPayload(src)
if !isAWSChunked {
return io.NopCloser(probedReader)
}
pr, pw := io.Pipe() pr, pw := io.Pipe()
go func() { go func() {
if err := decodeAWSChunkedPayload(src, pw); err != nil { if err := decodeAWSChunkedPayload(probedReader, pw); err != nil {
_ = pw.CloseWithError(err) _ = pw.CloseWithError(err)
return return
} }
@@ -415,6 +536,30 @@ func newAWSChunkedDecodingReader(src io.Reader) io.ReadCloser {
return pr return pr
} }
func probeAWSChunkedPayload(src io.Reader) (io.Reader, bool) {
reader := bufio.NewReaderSize(src, 512)
headerLine, err := reader.ReadSlice('\n')
replay := io.MultiReader(bytes.NewReader(headerLine), reader)
if err != nil {
return replay, false
}
line := strings.TrimRight(string(headerLine), "\r\n")
chunkSizeToken := line
if idx := strings.IndexByte(chunkSizeToken, ';'); idx >= 0 {
chunkSizeToken = chunkSizeToken[:idx]
}
chunkSizeToken = strings.TrimSpace(chunkSizeToken)
if chunkSizeToken == "" {
return replay, false
}
size, parseErr := strconv.ParseInt(chunkSizeToken, 16, 64)
if parseErr != nil || size < 0 {
return replay, false
}
return replay, true
}
func decodeAWSChunkedPayload(src io.Reader, dst io.Writer) error { func decodeAWSChunkedPayload(src io.Reader, dst io.Writer) error {
reader := bufio.NewReader(src) reader := bufio.NewReader(src)
for { for {
@@ -584,8 +729,8 @@ func (h *Handler) handlePostBucket(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handleDeleteObject(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleDeleteObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket") bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*") key, apiErr := objectKeyFromRequest(r)
if apiErr := validateObjectKey(key); apiErr != nil { if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path) writeS3Error(w, r, *apiErr, r.URL.Path)
return return
} }
@@ -621,8 +766,8 @@ func (h *Handler) handleHeadBucket(w http.ResponseWriter, r *http.Request) {
func (h *Handler) handleHeadObject(w http.ResponseWriter, r *http.Request) { func (h *Handler) handleHeadObject(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket") bucket := chi.URLParam(r, "bucket")
key := chi.URLParam(r, "*") key, apiErr := objectKeyFromRequest(r)
if apiErr := validateObjectKey(key); apiErr != nil { if apiErr != nil {
writeS3Error(w, r, *apiErr, r.URL.Path) writeS3Error(w, r, *apiErr, r.URL.Path)
return return
} }

View File

@@ -205,6 +205,29 @@ func (s *Service) AuthenticateRequest(r *http.Request) (RequestContext, error) {
}, nil }, nil
} }
func (s *Service) Authorize(accessKeyID string, target RequestTarget) error {
if !s.cfg.Enabled {
return nil
}
accessKeyID = strings.TrimSpace(accessKeyID)
if accessKeyID == "" {
return ErrAccessDenied
}
if target.Action == "" {
return ErrAccessDenied
}
policy, err := s.store.GetAuthPolicy(accessKeyID)
if err != nil {
return ErrAccessDenied
}
if !isAllowed(policy, target) {
return ErrAccessDenied
}
return nil
}
func (s *Service) CreateUser(input CreateUserInput) (*CreateUserResult, error) { func (s *Service) CreateUser(input CreateUserInput) (*CreateUserResult, error) {
if !s.cfg.Enabled { if !s.cfg.Enabled {
return nil, ErrAuthNotEnabled return nil, ErrAuthNotEnabled

View File

@@ -259,7 +259,57 @@ func canonicalPath(u *url.URL) string {
if path == "" { if path == "" {
return "/" return "/"
} }
return path return awsEncodePath(path)
}
func awsEncodePath(path string) string {
var b strings.Builder
b.Grow(len(path))
for i := 0; i < len(path); i++ {
ch := path[i]
if ch == '/' || isUnreserved(ch) {
b.WriteByte(ch)
continue
}
if ch == '%' && i+2 < len(path) && isHex(path[i+1]) && isHex(path[i+2]) {
b.WriteByte('%')
b.WriteByte(toUpperHex(path[i+1]))
b.WriteByte(toUpperHex(path[i+2]))
i += 2
continue
}
b.WriteByte('%')
b.WriteByte(hexUpper(ch >> 4))
b.WriteByte(hexUpper(ch & 0x0F))
}
return b.String()
}
func isUnreserved(ch byte) bool {
return (ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '_' || ch == '.' || ch == '~'
}
func isHex(ch byte) bool {
return (ch >= '0' && ch <= '9') ||
(ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F')
}
func toUpperHex(ch byte) byte {
if ch >= 'a' && ch <= 'f' {
return ch - ('a' - 'A')
}
return ch
}
func hexUpper(nibble byte) byte {
if nibble < 10 {
return '0' + nibble
}
return 'A' + (nibble - 10)
} }
type queryPair struct { type queryPair struct {

View File

@@ -158,6 +158,13 @@ type CompleteMultipartUploadResult struct {
Location string `xml:"Location,omitempty"` Location string `xml:"Location,omitempty"`
} }
type CopyObjectResult struct {
XMLName xml.Name `xml:"CopyObjectResult"`
Xmlns string `xml:"xmlns,attr,omitempty"`
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
}
type ListPartsResult struct { type ListPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"` XMLName xml.Name `xml:"ListPartsResult"`
Xmlns string `xml:"xmlns,attr"` Xmlns string `xml:"xmlns,attr"`

View File

@@ -104,6 +104,38 @@ func (s *ObjectService) PutObject(bucket, key, contentType string, input io.Read
return manifest, nil return manifest, nil
} }
func (s *ObjectService) CopyObject(srcBucket, srcKey, dstBucket, dstKey string) (*models.ObjectManifest, error) {
start := time.Now()
success := false
defer func() {
metrics.Default.ObserveService("copy_object", time.Since(start), success)
}()
unlock := s.acquireGCRLock()
defer unlock()
source, err := s.metadata.GetManifest(srcBucket, srcKey)
if err != nil {
return nil, err
}
manifest := &models.ObjectManifest{
Bucket: dstBucket,
Key: dstKey,
Size: source.Size,
ContentType: source.ContentType,
ETag: source.ETag,
Chunks: append([]string(nil), source.Chunks...),
CreatedAt: time.Now().Unix(),
}
if err := s.metadata.PutManifest(manifest); err != nil {
return nil, err
}
success = true
return manifest, nil
}
func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) { func (s *ObjectService) GetObject(bucket, key string) (io.ReadCloser, *models.ObjectManifest, error) {
start := time.Now() start := time.Now()