Add chunked uploads (Resumable curently broken)

This commit is contained in:
2026-04-14 16:39:43 +02:00
parent 8ae5dfc483
commit 6065b4d95f
8 changed files with 428 additions and 84 deletions

View File

@@ -5,8 +5,10 @@ import (
"ResendIt/internal/notify"
"ResendIt/internal/util"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
@@ -238,3 +240,161 @@ func (h *Handler) Export(c *gin.Context) {
c.JSON(http.StatusOK, records)
}
// Chunked stuff
func (h *Handler) UploadInit(c *gin.Context) {
var req struct {
Filename string `json:"filename"`
TotalChunks int `json:"totalChunks"`
Size int64 `json:"size"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{"error": "invalid request"})
return
}
fileID := util.RandomString(32)
// create temp folder
path := filepath.Join("tmp", fileID)
if err := os.MkdirAll(path, os.ModePerm); err != nil {
c.JSON(500, gin.H{"error": "failed to create temp dir"})
return
}
c.JSON(200, gin.H{
"fileId": fileID,
})
}
func (h *Handler) UploadChunk(c *gin.Context) {
fileID := c.GetHeader("fileId")
chunkIndex := c.GetHeader("chunkIndex")
if fileID == "" || chunkIndex == "" {
c.JSON(400, gin.H{"error": "missing headers"})
return
}
idx, err := strconv.Atoi(chunkIndex)
if err != nil {
c.JSON(400, gin.H{"error": "invalid chunkIndex"})
return
}
file, err := c.FormFile("chunk")
if err != nil {
c.JSON(400, gin.H{"error": "missing chunk"})
return
}
src, err := file.Open()
if err != nil {
c.JSON(500, gin.H{"error": "cannot open chunk"})
return
}
defer src.Close()
chunkPath := filepath.Join("tmp", fileID, fmt.Sprintf("chunk_%d", idx))
dst, err := os.Create(chunkPath)
if err != nil {
c.JSON(500, gin.H{"error": "cannot save chunk"})
return
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
c.JSON(500, gin.H{"error": "write failed"})
return
}
c.JSON(200, gin.H{"status": "ok"})
}
func (h *Handler) UploadComplete(c *gin.Context) {
var req struct {
FileID string `json:"fileId"`
Filename string `json:"filename"`
TotalChunks int `json:"totalChunks"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{"error": "invalid request"})
return
}
tmpDir := filepath.Join("tmp", req.FileID)
// create pipe to stream into your existing service
pr, pw := io.Pipe()
go func() {
defer pw.Close()
for i := 0; i < req.TotalChunks; i++ {
chunkPath := filepath.Join(tmpDir, fmt.Sprintf("chunk_%d", i))
f, err := os.Open(chunkPath)
if err != nil {
pw.CloseWithError(err)
return
}
if _, err := io.Copy(pw, f); err != nil {
f.Close()
pw.CloseWithError(err)
return
}
f.Close()
}
}()
// reuse your existing upload logic 👇
record, err := h.service.UploadFile(
req.Filename,
pr,
false,
24*time.Hour,
)
if err != nil {
c.JSON(500, gin.H{"error": err.Error()})
return
}
// cleanup temp
_ = os.RemoveAll(tmpDir)
c.JSON(200, gin.H{
"id": record.ID,
"view_key": record.ViewID,
})
}
func (h *Handler) UploadStatus(c *gin.Context) {
fileID := c.Param("fileId")
dir := filepath.Join("tmp", fileID)
files, err := os.ReadDir(dir)
if err != nil {
c.JSON(404, gin.H{"error": "not found"})
return
}
var uploaded []int
for _, f := range files {
var idx int
_, err := fmt.Sscanf(f.Name(), "chunk_%d", &idx)
if err == nil {
uploaded = append(uploaded, idx)
}
}
c.JSON(200, gin.H{
"uploadedChunks": uploaded,
})
}

View File

@@ -16,6 +16,12 @@ func RegisterRoutes(r *gin.RouterGroup, h *Handler) {
files.GET("/view/:id", h.View)
files.GET("/delete/:del_id", h.Delete)
// Chunked upload endpoints
files.POST("/upload/init", h.UploadInit)
files.POST("/upload/chunk", h.UploadChunk)
files.POST("/upload/complete", h.UploadComplete)
files.GET("/upload/status/:fileId", h.UploadStatus)
adminRoutes := files.Group("/admin")
adminRoutes.Use(middleware.AuthMiddleware())
adminRoutes.Use(middleware.RequireRole("admin"))