mirror of
https://github.com/ZizzyDizzyMC/linx-server.git
synced 2026-01-22 18:05:09 +00:00
Merge pull request #19 from BBaoVanC/file-locking
Lock files while they're being written
This commit is contained in:
commit
57f867811a
10 changed files with 109 additions and 11 deletions
|
|
@ -18,11 +18,11 @@ ENV SSL_CERT_FILE /etc/ssl/cert.pem
|
|||
COPY static /go/src/github.com/andreimarcu/linx-server/static/
|
||||
COPY templates /go/src/github.com/andreimarcu/linx-server/templates/
|
||||
|
||||
RUN mkdir -p /data/files && mkdir -p /data/meta && chown -R 65534:65534 /data
|
||||
RUN mkdir -p /data/files && mkdir -p /data/meta && mkdir -p /data/locks && chown -R 65534:65534 /data
|
||||
|
||||
VOLUME ["/data/files", "/data/meta"]
|
||||
VOLUME ["/data/files", "/data/meta", "/data/locks"]
|
||||
|
||||
EXPOSE 8080
|
||||
USER nobody
|
||||
ENTRYPOINT ["/usr/local/bin/linx-server", "-bind=0.0.0.0:8080", "-filespath=/data/files/", "-metapath=/data/meta/"]
|
||||
ENTRYPOINT ["/usr/local/bin/linx-server", "-bind=0.0.0.0:8080", "-filespath=/data/files/", "-metapath=/data/meta/", "-lockspath=/data/locks/"]
|
||||
CMD ["-sitename=linx", "-allowhotlink"]
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package localfs
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
|
@ -16,6 +17,7 @@ import (
|
|||
type LocalfsBackend struct {
|
||||
metaPath string
|
||||
filesPath string
|
||||
locksPath string
|
||||
}
|
||||
|
||||
type MetadataJSON struct {
|
||||
|
|
@ -127,6 +129,41 @@ func (b LocalfsBackend) writeMetadata(key string, metadata backends.Metadata) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b LocalfsBackend) Lock(filename string) (err error) {
|
||||
lockPath := path.Join(b.locksPath, filename)
|
||||
|
||||
lock, err := os.Create(lockPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (b LocalfsBackend) Unlock(filename string) (err error) {
|
||||
lockPath := path.Join(b.locksPath, filename)
|
||||
|
||||
err = os.Remove(lockPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b LocalfsBackend) CheckLock(filename string) (locked bool, err error) {
|
||||
lockPath := path.Join(b.locksPath, filename)
|
||||
|
||||
if _, err := os.Stat(lockPath); errors.Is(err, os.ErrNotExist) {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b LocalfsBackend) Put(key string, r io.Reader, expiry time.Time, deleteKey, accessKey string, srcIp string) (m backends.Metadata, err error) {
|
||||
filePath := path.Join(b.filesPath, key)
|
||||
|
||||
|
|
@ -201,9 +238,10 @@ func (b LocalfsBackend) List() ([]string, error) {
|
|||
return output, nil
|
||||
}
|
||||
|
||||
func NewLocalfsBackend(metaPath string, filesPath string) LocalfsBackend {
|
||||
func NewLocalfsBackend(metaPath string, filesPath string, locksPath string) LocalfsBackend {
|
||||
return LocalfsBackend{
|
||||
metaPath: metaPath,
|
||||
filesPath: filesPath,
|
||||
locksPath: locksPath,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package s3
|
|||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
|
@ -156,6 +157,21 @@ func unmapMetadata(input map[string]*string) (m backends.Metadata, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (b S3Backend) Lock(filename string) (err error) {
|
||||
log.Printf("Locking is not supported on S3")
|
||||
return
|
||||
}
|
||||
|
||||
func (b S3Backend) Unlock(filename string) (err error) {
|
||||
log.Printf("Locking is not supported on S3")
|
||||
return
|
||||
}
|
||||
|
||||
func (b S3Backend) CheckLock(filename string) (locked bool, err error) {
|
||||
log.Printf("Locking is not supported on S3")
|
||||
return
|
||||
}
|
||||
|
||||
func (b S3Backend) Put(key string, r io.Reader, expiry time.Time, deleteKey, accessKey string, srcIp string) (m backends.Metadata, err error) {
|
||||
tmpDst, err := ioutil.TempFile("", "linx-server-upload")
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -12,6 +12,9 @@ type StorageBackend interface {
|
|||
Exists(key string) (bool, error)
|
||||
Head(key string) (Metadata, error)
|
||||
Get(key string) (Metadata, io.ReadCloser, error)
|
||||
Lock(filename string) (error)
|
||||
Unlock(filename string) (error)
|
||||
CheckLock(filename string) (bool, error)
|
||||
Put(key string, r io.Reader, expiry time.Time, deleteKey, accessKey string, srcIp string) (Metadata, error)
|
||||
PutMetadata(key string, m Metadata) error
|
||||
ServeFile(key string, w http.ResponseWriter, r *http.Request) error
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ import (
|
|||
"github.com/andreimarcu/linx-server/expiry"
|
||||
)
|
||||
|
||||
func Cleanup(filesDir string, metaDir string, noLogs bool) {
|
||||
fileBackend := localfs.NewLocalfsBackend(metaDir, filesDir)
|
||||
func Cleanup(filesDir string, metaDir string, locksDir string, noLogs bool) {
|
||||
fileBackend := localfs.NewLocalfsBackend(metaDir, filesDir, locksDir)
|
||||
|
||||
files, err := fileBackend.List()
|
||||
if err != nil {
|
||||
|
|
@ -17,6 +17,15 @@ func Cleanup(filesDir string, metaDir string, noLogs bool) {
|
|||
}
|
||||
|
||||
for _, filename := range files {
|
||||
locked, err := fileBackend.CheckLock(filename)
|
||||
if err != nil {
|
||||
log.Printf("Error checking if %s is locked: %s", filename, err)
|
||||
}
|
||||
if locked {
|
||||
log.Printf("%s is locked, it will be ignored", filename)
|
||||
continue
|
||||
}
|
||||
|
||||
metadata, err := fileBackend.Head(filename)
|
||||
if err != nil {
|
||||
if !noLogs {
|
||||
|
|
@ -33,10 +42,12 @@ func Cleanup(filesDir string, metaDir string, noLogs bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func PeriodicCleanup(minutes time.Duration, filesDir string, metaDir string, noLogs bool) {
|
||||
func PeriodicCleanup(minutes time.Duration, filesDir string, metaDir string, locksDir string, noLogs bool) {
|
||||
c := time.Tick(minutes)
|
||||
for range c {
|
||||
Cleanup(filesDir, metaDir, noLogs)
|
||||
log.Printf("Running periodic cleanup")
|
||||
Cleanup(filesDir, metaDir, locksDir, noLogs)
|
||||
log.Printf("Finished periodic cleanup")
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ func TestContentSecurityPolicy(t *testing.T) {
|
|||
Config.siteURL = "http://linx.example.org/"
|
||||
Config.filesDir = path.Join(os.TempDir(), generateBarename())
|
||||
Config.metaDir = Config.filesDir + "_meta"
|
||||
Config.locksDir = Config.filesDir + "_locks"
|
||||
Config.maxSize = 1024 * 1024 * 1024
|
||||
Config.noLogs = true
|
||||
Config.siteName = "linx"
|
||||
|
|
|
|||
|
|
@ -9,15 +9,18 @@ import (
|
|||
func main() {
|
||||
var filesDir string
|
||||
var metaDir string
|
||||
var locksDir string
|
||||
var noLogs bool
|
||||
|
||||
flag.StringVar(&filesDir, "filespath", "files/",
|
||||
"path to files directory")
|
||||
flag.StringVar(&metaDir, "metapath", "meta/",
|
||||
"path to metadata directory")
|
||||
flag.StringVar(&locksDir, "lockspath", "locks/",
|
||||
"path to locks directory")
|
||||
flag.BoolVar(&noLogs, "nologs", false,
|
||||
"don't log deleted files")
|
||||
flag.Parse()
|
||||
|
||||
cleanup.Cleanup(filesDir, metaDir, noLogs)
|
||||
cleanup.Cleanup(filesDir, metaDir, locksDir, noLogs)
|
||||
}
|
||||
|
|
|
|||
12
server.go
12
server.go
|
|
@ -43,6 +43,7 @@ var Config struct {
|
|||
bind string
|
||||
filesDir string
|
||||
metaDir string
|
||||
locksDir string
|
||||
siteName string
|
||||
siteURL string
|
||||
sitePath string
|
||||
|
|
@ -137,6 +138,11 @@ func setup() *web.Mux {
|
|||
log.Fatal("Could not create metadata directory:", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(Config.locksDir, 0755)
|
||||
if err != nil {
|
||||
log.Fatal("Could not create locks directory:", err)
|
||||
}
|
||||
|
||||
if Config.siteURL != "" {
|
||||
// ensure siteURL ends wth '/'
|
||||
if lastChar := Config.siteURL[len(Config.siteURL)-1:]; lastChar != "/" {
|
||||
|
|
@ -161,9 +167,9 @@ func setup() *web.Mux {
|
|||
if Config.s3Bucket != "" {
|
||||
storageBackend = s3.NewS3Backend(Config.s3Bucket, Config.s3Region, Config.s3Endpoint, Config.s3ForcePathStyle)
|
||||
} else {
|
||||
storageBackend = localfs.NewLocalfsBackend(Config.metaDir, Config.filesDir)
|
||||
storageBackend = localfs.NewLocalfsBackend(Config.metaDir, Config.filesDir, Config.locksDir)
|
||||
if Config.cleanupEveryMinutes > 0 {
|
||||
go cleanup.PeriodicCleanup(time.Duration(Config.cleanupEveryMinutes)*time.Minute, Config.filesDir, Config.metaDir, Config.noLogs)
|
||||
go cleanup.PeriodicCleanup(time.Duration(Config.cleanupEveryMinutes)*time.Minute, Config.filesDir, Config.metaDir, Config.locksDir, Config.noLogs)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -249,6 +255,8 @@ func main() {
|
|||
"path to files directory")
|
||||
flag.StringVar(&Config.metaDir, "metapath", "meta/",
|
||||
"path to metadata directory")
|
||||
flag.StringVar(&Config.locksDir, "lockspath", "locks/",
|
||||
"path to locks directory")
|
||||
flag.BoolVar(&Config.basicAuth, "basicauth", false,
|
||||
"allow logging by basic auth password")
|
||||
flag.BoolVar(&Config.noLogs, "nologs", false,
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ func TestSetup(t *testing.T) {
|
|||
Config.siteURL = "http://linx.example.org/"
|
||||
Config.filesDir = path.Join(os.TempDir(), generateBarename())
|
||||
Config.metaDir = Config.filesDir + "_meta"
|
||||
Config.locksDir = Config.filesDir + "_locks"
|
||||
Config.maxSize = 1024 * 1024 * 1024
|
||||
Config.noLogs = true
|
||||
Config.siteName = "linx"
|
||||
|
|
@ -1198,6 +1199,7 @@ func TestInferSiteURLHTTPSFastCGI(t *testing.T) {
|
|||
func TestShutdown(t *testing.T) {
|
||||
os.RemoveAll(Config.filesDir)
|
||||
os.RemoveAll(Config.metaDir)
|
||||
os.RemoveAll(Config.locksDir)
|
||||
}
|
||||
|
||||
func TestPutAndGetCLI(t *testing.T) {
|
||||
|
|
|
|||
16
upload.go
16
upload.go
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
|
@ -320,6 +321,13 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
|
|||
return upload, errors.New("Prohibited filename")
|
||||
}
|
||||
|
||||
// Lock the upload
|
||||
log.Printf("Lock %s", upload.Filename)
|
||||
err = storageBackend.Lock(upload.Filename)
|
||||
if err != nil {
|
||||
return upload, err
|
||||
}
|
||||
|
||||
// Get the rest of the metadata needed for storage
|
||||
var fileExpiry time.Time
|
||||
maxDurationTime := time.Duration(Config.maxDurationTime) * time.Second
|
||||
|
|
@ -343,11 +351,19 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
|
|||
if Config.disableAccessKey == true {
|
||||
upReq.accessKey = ""
|
||||
}
|
||||
log.Printf("Write %s", upload.Filename)
|
||||
upload.Metadata, err = storageBackend.Put(upload.Filename, io.MultiReader(bytes.NewReader(header), upReq.src), fileExpiry, upReq.deleteKey, upReq.accessKey, upReq.srcIp)
|
||||
if err != nil {
|
||||
return upload, err
|
||||
}
|
||||
|
||||
// Unlock the upload
|
||||
log.Printf("Unlock %s", upload.Filename)
|
||||
err = storageBackend.Unlock(upload.Filename)
|
||||
if err != nil {
|
||||
return upload, err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue