diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 95aad096..d0b86012 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -3,20 +3,20 @@ name: tests on: [push, pull_request] jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 2 - - uses: actions/setup-go@v2 - with: - go-version: '1.18' - - name: Run coverage - run: go test -coverprofile=coverage.out -covermode=atomic -v ./... - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: coverage.out - flags: unit-linux + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 2 + - uses: actions/setup-go@v2 + with: + go-version: "1.19" + - name: Run coverage + run: go test -coverprofile=coverage.out -covermode=atomic -v ./... + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: coverage.out + flags: unit-linux diff --git a/.github_build/Build.alpine.env b/.github_build/Build.alpine.env index d367892b..ced95107 100644 --- a/.github_build/Build.alpine.env +++ b/.github_build/Build.alpine.env @@ -1,5 +1,5 @@ # CORE ALPINE BASE IMAGE OS_NAME=alpine OS_VERSION=3.16 -GOLANG_IMAGE=golang:1.19.3-alpine3.16 -CORE_VERSION=16.11.0 +GOLANG_IMAGE=golang:1.20-alpine3.16 +CORE_VERSION=16.12.0 diff --git a/.github_build/Build.ubuntu.env b/.github_build/Build.ubuntu.env index 388d580b..4e02a698 100644 --- a/.github_build/Build.ubuntu.env +++ b/.github_build/Build.ubuntu.env @@ -1,5 +1,5 @@ # CORE UBUNTU BASE IMAGE OS_NAME=ubuntu OS_VERSION=20.04 -GOLANG_IMAGE=golang:1.19.3-alpine3.16 -CORE_VERSION=16.11.0 +GOLANG_IMAGE=golang:1.20-alpine3.16 +CORE_VERSION=16.12.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index c4cdea92..21042f0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Core +### Core v16.11.0 > v16.12.0 + +- Add S3 storage support +- Add support for variables in placeholde parameter +- Add support for RTMP token as stream key as last element in path +- Add support for soft memory limit with debug.memory_limit_mbytes in config +- Add support for partial process config updates +- Add support for alternative syntax for auth0 tenants as environment variable +- Fix config timestamps created_at and loaded_at +- Fix /config/reload return type +- Fix modifying DTS in RTMP packets ([restreamer/#487](https://github.com/datarhei/restreamer/issues/487), [restreamer/#367](https://github.com/datarhei/restreamer/issues/367)) +- Fix default internal SRT latency to 20ms + ### Core v16.10.1 > v16.11.0 - Add FFmpeg 4.4 to FFmpeg 5.1 migration tool diff --git a/Dockerfile b/Dockerfile index 2cd0a8a2..da7039c7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GOLANG_IMAGE=golang:1.19.3-alpine3.16 +ARG GOLANG_IMAGE=golang:1.20-alpine3.16 ARG BUILD_IMAGE=alpine:3.16 diff --git a/Dockerfile.test b/Dockerfile.test index bc308903..521784b5 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,4 +1,4 @@ -FROM golang:1.19.3-alpine3.16 +FROM golang:1.20-alpine3.16 RUN apk add alpine-sdk diff --git a/README.md b/README.md index 27d81a47..bd92149e 100644 --- a/README.md +++ b/README.md @@ -16,47 +16,47 @@ The datarhei Core is a process management solution for FFmpeg that offers a rang The objectives of development are: -* Unhindered use of FFmpeg processes -* Portability of FFmpeg, including management across development and production environments -* Scalability of FFmpeg-based applications through the ability to offload processes to additional instances -* Streamlining of media product development by focusing on features and design. +- Unhindered use of FFmpeg processes +- Portability of FFmpeg, including management across development and production environments +- Scalability of FFmpeg-based applications through the ability to offload processes to additional instances +- Streamlining of media product development by focusing on features and design. ## What issues have been resolved thus far? ### Process management -* Run multiple processes via API -* Unrestricted FFmpeg commands in process configuration. -* Error detection and recovery (e.g., FFmpeg stalls, dumps) -* Referencing for process chaining (pipelines) -* Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution) -* Logs (access to current stdout/stderr) -* Log history (configurable log history, e.g., for error analysis) -* Resource limitation (max. CPU and MEMORY usage per process) -* Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime) -* Input verification (like FFprobe) -* Metadata (option to store additional information like a title) +- Run multiple processes via API +- Unrestricted FFmpeg commands in process configuration. +- Error detection and recovery (e.g., FFmpeg stalls, dumps) +- Referencing for process chaining (pipelines) +- Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution) +- Logs (access to current stdout/stderr) +- Log history (configurable log history, e.g., for error analysis) +- Resource limitation (max. CPU and MEMORY usage per process) +- Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime) +- Input verification (like FFprobe) +- Metadata (option to store additional information like a title) ### Media delivery -* Configurable file systems (in-memory, disk-mount, S3) -* HTTP/S, RTMP/S, and SRT services, including Let's Encrypt -* Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion) -* Viewer session API and logging +- Configurable file systems (in-memory, disk-mount, S3) +- HTTP/S, RTMP/S, and SRT services, including Let's Encrypt +- Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion) +- Viewer session API and logging ### Misc -* HTTP REST and GraphQL API -* Swagger documentation -* Metrics incl. Prometheus support (also detects POSIX and cgroups resources) -* Docker images for fast setup of development environments up to the integration of cloud resources +- HTTP REST and GraphQL API +- Swagger documentation +- Metrics incl. Prometheus support (also detects POSIX and cgroups resources) +- Docker images for fast setup of development environments up to the integration of cloud resources ## Docker images -- datarhei/core:latest (AMD64, ARM64, ARMv7) -- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64) -- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7) -- datarhei/core:vaapi-latest (Intel VAAPI, AMD64) +- datarhei/core:latest (AMD64, ARM64, ARMv7) +- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64) +- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7) +- datarhei/core:vaapi-latest (Intel VAAPI, AMD64) ## Quick start @@ -80,12 +80,12 @@ docker run --name core -d \ ## Documentation -Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core). +Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core). -- [Quick start](https://docs.datarhei.com/core/guides/beginner) -- [Installation](https://docs.datarhei.com/core/installation) -- [Configuration](https://docs.datarhei.com/core/configuration) -- [Coding](https://docs.datarhei.com/core/development/coding) +- [Quick start](https://docs.datarhei.com/core/guides/beginner) +- [Installation](https://docs.datarhei.com/core/installation) +- [Configuration](https://docs.datarhei.com/core/configuration) +- [Coding](https://docs.datarhei.com/core/development/coding) ## License diff --git a/app/api/api.go b/app/api/api.go index 77145852..f82846f5 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -6,6 +6,7 @@ import ( "fmt" "io" golog "log" + "math" gonet "net" gohttp "net/http" "net/url" @@ -21,6 +22,7 @@ import ( "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/http" "github.com/datarhei/core/v16/http/cache" + httpfs "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/io/fs" @@ -30,8 +32,9 @@ import ( "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/prometheus" "github.com/datarhei/core/v16/restream" + restreamapp "github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/replace" - "github.com/datarhei/core/v16/restream/store" + restreamstore "github.com/datarhei/core/v16/restream/store" "github.com/datarhei/core/v16/rtmp" "github.com/datarhei/core/v16/service" "github.com/datarhei/core/v16/session" @@ -39,6 +42,7 @@ import ( "github.com/datarhei/core/v16/update" "github.com/caddyserver/certmagic" + "go.uber.org/zap" ) // The API interface is the implementation for the restreamer API. @@ -66,6 +70,7 @@ type api struct { ffmpeg ffmpeg.FFmpeg diskfs fs.Filesystem memfs fs.Filesystem + s3fs map[string]fs.Filesystem rtmpserver rtmp.Server srtserver srt.Server metrics monitor.HistoryMonitor @@ -115,6 +120,7 @@ var ErrConfigReload = fmt.Errorf("configuration reload") func New(configpath string, logwriter io.Writer) (API, error) { a := &api{ state: "idle", + s3fs: map[string]fs.Filesystem{}, } a.config.path = configpath @@ -147,7 +153,8 @@ func (a *api) Reload() error { logger := log.New("Core").WithOutput(log.NewConsoleWriter(a.log.writer, log.Lwarn, true)) - store, err := configstore.NewJSON(a.config.path, func() { + rootfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{}) + store, err := configstore.NewJSON(rootfs, a.config.path, func() { a.errorChan <- ErrConfigReload }) if err != nil { @@ -227,6 +234,8 @@ func (a *api) Reload() error { logger.Info().WithFields(logfields).Log("") + logger.Info().WithField("path", a.config.path).Log("Read config file") + configlogger := logger.WithComponent("Config") cfg.Messages(func(level string, v configvars.Variable, message string) { configlogger = configlogger.WithFields(log.Fields{ @@ -253,6 +262,8 @@ func (a *api) Reload() error { return fmt.Errorf("not all variables are set or valid") } + cfg.LoadedAt = time.Now() + store.SetActive(cfg) a.config.store = store @@ -285,7 +296,13 @@ func (a *api) start() error { } if cfg.Sessions.Persist { - sessionConfig.PersistDir = filepath.Join(cfg.DB.Dir, "sessions") + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: filepath.Join(cfg.DB.Dir, "sessions"), + }) + if err != nil { + return fmt.Errorf("unable to create filesystem for persisting sessions: %w", err) + } + sessionConfig.PersistFS = fs } sessions, err := session.New(sessionConfig) @@ -364,13 +381,18 @@ func (a *api) start() error { a.sessions = sessions } - diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{ - Dir: cfg.Storage.Disk.Dir, - Size: cfg.Storage.Disk.Size * 1024 * 1024, + diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: cfg.Storage.Disk.Dir, Logger: a.log.logger.core.WithComponent("DiskFS"), }) if err != nil { + return fmt.Errorf("disk filesystem: %w", err) + } + + if diskfsRoot, err := filepath.Abs(cfg.Storage.Disk.Dir); err != nil { return err + } else { + diskfs.SetMetadata("base", diskfsRoot) } a.diskfs = diskfs @@ -392,17 +414,60 @@ func (a *api) start() error { } if a.memfs == nil { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: baseMemFS.String(), - Size: cfg.Storage.Memory.Size * 1024 * 1024, - Purge: cfg.Storage.Memory.Purge, + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{ Logger: a.log.logger.core.WithComponent("MemFS"), }) - a.memfs = memfs + memfs.SetMetadata("base", baseMemFS.String()) + + sizedfs, _ := fs.NewSizedFilesystem(memfs, cfg.Storage.Memory.Size*1024*1024, cfg.Storage.Memory.Purge) + + a.memfs = sizedfs } else { - a.memfs.Rebase(baseMemFS.String()) - a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024) + a.memfs.SetMetadata("base", baseMemFS.String()) + if sizedfs, ok := a.memfs.(fs.SizedFilesystem); ok { + sizedfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024) + } + } + + for _, s3 := range cfg.Storage.S3 { + if _, ok := a.s3fs[s3.Name]; ok { + return fmt.Errorf("the name '%s' for a s3 filesystem is already in use", s3.Name) + } + + baseS3FS := url.URL{ + Scheme: "http", + Path: s3.Mountpoint, + } + + host, port, _ := gonet.SplitHostPort(cfg.Address) + if len(host) == 0 { + baseS3FS.Host = "localhost:" + port + } else { + baseS3FS.Host = cfg.Address + } + + if s3.Auth.Enable { + baseS3FS.User = url.UserPassword(s3.Auth.Username, s3.Auth.Password) + } + + s3fs, err := fs.NewS3Filesystem(fs.S3Config{ + Name: s3.Name, + Endpoint: s3.Endpoint, + AccessKeyID: s3.AccessKeyID, + SecretAccessKey: s3.SecretAccessKey, + Region: s3.Region, + Bucket: s3.Bucket, + UseSSL: s3.UseSSL, + Logger: a.log.logger.core.WithComponent("FS"), + }) + if err != nil { + return fmt.Errorf("s3 filesystem (%s): %w", s3.Name, err) + } + + s3fs.SetMetadata("base", baseS3FS.String()) + + a.s3fs[s3.Name] = s3fs } var portrange net.Portranger @@ -410,18 +475,18 @@ func (a *api) start() error { if cfg.Playout.Enable { portrange, err = net.NewPortrange(cfg.Playout.MinPort, cfg.Playout.MaxPort) if err != nil { - return err + return fmt.Errorf("playout port range: %w", err) } } validatorIn, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Input.Allow, cfg.FFmpeg.Access.Input.Block) if err != nil { - return err + return fmt.Errorf("input address validator: %w", err) } validatorOut, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Output.Allow, cfg.FFmpeg.Access.Output.Block) if err != nil { - return err + return fmt.Errorf("output address validator: %w", err) } ffmpeg, err := ffmpeg.New(ffmpeg.Config{ @@ -435,7 +500,7 @@ func (a *api) start() error { Collector: a.sessions.Collector("ffmpeg"), }) if err != nil { - return err + return fmt.Errorf("unable to create ffmpeg: %w", err) } a.ffmpeg = ffmpeg @@ -443,53 +508,103 @@ func (a *api) start() error { a.replacer = replace.New() { - a.replacer.RegisterTemplate("diskfs", a.diskfs.Base()) - a.replacer.RegisterTemplate("memfs", a.memfs.Base()) + a.replacer.RegisterTemplateFunc("diskfs", func(config *restreamapp.Config, section string) string { + return a.diskfs.Metadata("base") + }, nil) - host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) - if len(host) == 0 { - host = "localhost" - } + a.replacer.RegisterTemplateFunc("fs:disk", func(config *restreamapp.Config, section string) string { + return a.diskfs.Metadata("base") + }, nil) - template := "rtmp://" + host + ":" + port - if cfg.RTMP.App != "/" { - template += cfg.RTMP.App - } - template += "/{name}" + a.replacer.RegisterTemplateFunc("memfs", func(config *restreamapp.Config, section string) string { + return a.memfs.Metadata("base") + }, nil) + + a.replacer.RegisterTemplateFunc("fs:mem", func(config *restreamapp.Config, section string) string { + return a.memfs.Metadata("base") + }, nil) - if len(cfg.RTMP.Token) != 0 { - template += "?token=" + cfg.RTMP.Token + for name, s3 := range a.s3fs { + a.replacer.RegisterTemplate("fs:"+name, s3.Metadata("base"), nil) } - a.replacer.RegisterTemplate("rtmp", template) + a.replacer.RegisterTemplateFunc("rtmp", func(config *restreamapp.Config, section string) string { + host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) + if len(host) == 0 { + host = "localhost" + } + + template := "rtmp://" + host + ":" + port + if cfg.RTMP.App != "/" { + template += cfg.RTMP.App + } + template += "/{name}" - host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) - if len(host) == 0 { - host = "localhost" - } + if len(cfg.RTMP.Token) != 0 { + template += "?token=" + cfg.RTMP.Token + } + + return template + }, nil) + + a.replacer.RegisterTemplateFunc("srt", func(config *restreamapp.Config, section string) string { + host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) + if len(host) == 0 { + host = "localhost" + } + + template := "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + if len(cfg.SRT.Token) != 0 { + template += ",token:" + cfg.SRT.Token + } + if len(cfg.SRT.Passphrase) != 0 { + template += "&passphrase=" + cfg.SRT.Passphrase + } + + return template + }, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) + } + + filesystems := []fs.Filesystem{ + a.diskfs, + a.memfs, + } + + for _, fs := range a.s3fs { + filesystems = append(filesystems, fs) + } - template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid=#!:m={mode},r={name}" - if len(cfg.SRT.Token) != 0 { - template += ",token=" + cfg.SRT.Token + var store restreamstore.Store = nil + + { + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: cfg.DB.Dir, + }) + if err != nil { + return err } - if len(cfg.SRT.Passphrase) != 0 { - template += "&passphrase=" + cfg.SRT.Passphrase + store, err = restreamstore.NewJSON(restreamstore.JSONConfig{ + Filesystem: fs, + Filepath: "/db.json", + Logger: a.log.logger.core.WithComponent("ProcessStore"), + }) + if err != nil { + return err } - a.replacer.RegisterTemplate("srt", template) } - store := store.NewJSONStore(store.JSONConfig{ - Filepath: cfg.DB.Dir + "/db.json", - FFVersion: a.ffmpeg.Skills().FFmpeg.Version, - Logger: a.log.logger.core.WithComponent("ProcessStore"), - }) - restream, err := restream.New(restream.Config{ ID: cfg.ID, Name: cfg.Name, Store: store, - DiskFS: a.diskfs, - MemFS: a.memfs, + Filesystems: filesystems, Replace: a.replacer, FFmpeg: a.ffmpeg, MaxProcesses: cfg.FFmpeg.MaxProcesses, @@ -557,9 +672,12 @@ func (a *api) start() error { metrics.Register(monitor.NewCPUCollector()) metrics.Register(monitor.NewMemCollector()) metrics.Register(monitor.NewNetCollector()) - metrics.Register(monitor.NewDiskCollector(a.diskfs.Base())) - metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs)) + metrics.Register(monitor.NewDiskCollector(a.diskfs.Metadata("base"))) + metrics.Register(monitor.NewFilesystemCollector("diskfs", a.diskfs)) metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs)) + for name, fs := range a.s3fs { + metrics.Register(monitor.NewFilesystemCollector(name, fs)) + } metrics.Register(monitor.NewRestreamCollector(a.restream)) metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg)) metrics.Register(monitor.NewSessionCollector(a.sessions, []string{})) @@ -634,7 +752,7 @@ func (a *api) start() error { } if cfg.Storage.Disk.Cache.Enable { - diskCache, err := cache.NewLRUCache(cache.LRUConfig{ + cache, err := cache.NewLRUCache(cache.LRUConfig{ TTL: time.Duration(cfg.Storage.Disk.Cache.TTL) * time.Second, MaxSize: cfg.Storage.Disk.Cache.Size * 1024 * 1024, MaxFileSize: cfg.Storage.Disk.Cache.FileSize * 1024 * 1024, @@ -644,10 +762,10 @@ func (a *api) start() error { }) if err != nil { - return fmt.Errorf("unable to create disk cache: %w", err) + return fmt.Errorf("unable to create cache: %w", err) } - a.cache = diskCache + a.cache = cache } var autocertManager *certmagic.Config @@ -655,26 +773,28 @@ func (a *api) start() error { if cfg.TLS.Enable { if cfg.TLS.Auto { if len(cfg.Host.Name) == 0 { - return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME") + return fmt.Errorf("at least one host must be provided in host.name or CORE_HOST_NAME") + } + + certmagic.Default.Storage = &certmagic.FileStorage{ + Path: cfg.DB.Dir + "/cert", } + certmagic.Default.DefaultServerName = cfg.Host.Name[0] + certmagic.Default.Logger = zap.NewNop() certmagic.DefaultACME.Agreed = true certmagic.DefaultACME.Email = cfg.TLS.Email certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA certmagic.DefaultACME.DisableHTTPChallenge = false certmagic.DefaultACME.DisableTLSALPNChallenge = true - certmagic.DefaultACME.Logger = nil - - certmagic.Default.Storage = &certmagic.FileStorage{ - Path: cfg.DB.Dir + "/cert", - } - certmagic.Default.DefaultServerName = cfg.Host.Name[0] - certmagic.Default.Logger = nil + certmagic.DefaultACME.Logger = zap.NewNop() magic := certmagic.NewDefault() acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME) + acme.Logger = zap.NewNop() magic.Issuers = []certmagic.Issuer{acme} + magic.Logger = zap.NewNop() autocertManager = magic @@ -713,6 +833,19 @@ func (a *api) start() error { if err != nil { logger.Error().WithField("error", err).Log("Failed to acquire certificate") certerror = true + /* + problems, err := letsdebug.Check(host, letsdebug.HTTP01) + if err != nil { + logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition") + } + + for _, p := range problems { + logger.Error().WithFields(log.Fields{ + "name": p.Name, + "detail": p.Detail, + }).Log(p.Explanation) + } + */ break } @@ -820,22 +953,61 @@ func (a *api) start() error { a.log.logger.main = a.log.logger.core.WithComponent(logcontext).WithField("address", cfg.Address) - mainserverhandler, err := http.NewServer(http.Config{ + httpfilesystems := []httpfs.FS{ + { + Name: a.diskfs.Name(), + Mountpoint: "", + AllowWrite: false, + EnableAuth: false, + Username: "", + Password: "", + DefaultFile: "index.html", + DefaultContentType: "text/html", + Gzip: true, + Filesystem: a.diskfs, + Cache: a.cache, + }, + { + Name: a.memfs.Name(), + Mountpoint: "/memfs", + AllowWrite: true, + EnableAuth: cfg.Storage.Memory.Auth.Enable, + Username: cfg.Storage.Memory.Auth.Username, + Password: cfg.Storage.Memory.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.memfs, + Cache: nil, + }, + } + + for _, s3 := range cfg.Storage.S3 { + httpfilesystems = append(httpfilesystems, httpfs.FS{ + Name: s3.Name, + Mountpoint: s3.Mountpoint, + AllowWrite: true, + EnableAuth: s3.Auth.Enable, + Username: s3.Auth.Username, + Password: s3.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.s3fs[s3.Name], + Cache: a.cache, + }) + } + + serverConfig := http.Config{ Logger: a.log.logger.main, LogBuffer: a.log.buffer, Restream: a.restream, Metrics: a.metrics, Prometheus: a.prom, MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, + Filesystems: httpfilesystems, + IPLimiter: iplimiter, + Profiling: cfg.Debug.Profiling, Cors: http.CorsConfig{ Origins: cfg.Storage.CORS.Origins, }, @@ -843,11 +1015,12 @@ func (a *api) start() error { SRT: a.srtserver, JWT: a.httpjwt, Config: a.config.store, - Cache: a.cache, Sessions: a.sessions, Router: router, ReadOnly: cfg.API.ReadOnly, - }) + } + + mainserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create server: %w", err) @@ -882,34 +1055,10 @@ func (a *api) start() error { a.log.logger.sidecar = a.log.logger.core.WithComponent("HTTP").WithField("address", cfg.Address) - sidecarserverhandler, err := http.NewServer(http.Config{ - Logger: a.log.logger.sidecar, - LogBuffer: a.log.buffer, - Restream: a.restream, - Metrics: a.metrics, - Prometheus: a.prom, - MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, - Cors: http.CorsConfig{ - Origins: cfg.Storage.CORS.Origins, - }, - RTMP: a.rtmpserver, - SRT: a.srtserver, - JWT: a.httpjwt, - Config: a.config.store, - Cache: a.cache, - Sessions: a.sessions, - Router: router, - ReadOnly: cfg.API.ReadOnly, - }) + serverConfig.Logger = a.log.logger.sidecar + serverConfig.IPLimiter = iplimiter + + sidecarserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create sidecar HTTP server: %w", err) @@ -1101,6 +1250,12 @@ func (a *api) start() error { }(ctx) } + if cfg.Debug.MemoryLimit > 0 { + debug.SetMemoryLimit(cfg.Debug.MemoryLimit * 1024 * 1024) + } else { + debug.SetMemoryLimit(math.MaxInt64) + } + // Start the restream processes restream.Start() @@ -1267,7 +1422,7 @@ func (a *api) Destroy() { // Free the MemFS if a.memfs != nil { - a.memfs.DeleteAll() + a.memfs.RemoveAll() a.memfs = nil } } diff --git a/app/ffmigrate/main.go b/app/ffmigrate/main.go index 036af80f..5f3b4996 100644 --- a/app/ffmigrate/main.go +++ b/app/ffmigrate/main.go @@ -9,6 +9,7 @@ import ( cfgvars "github.com/datarhei/core/v16/config/vars" "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/restream/store" @@ -22,7 +23,11 @@ func main() { "to": "ffmpeg5", }) - configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil) + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + diskfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{}) + + configstore, err := cfgstore.NewJSON(diskfs, configfile, nil) if err != nil { logger.Error().WithError(err).Log("Loading configuration failed") os.Exit(1) @@ -115,9 +120,12 @@ func doMigration(logger log.Logger, configstore cfgstore.Store) error { logger.Info().WithField("backup", backupFilepath).Log("Backup created") // Load the existing DB - datastore := store.NewJSONStore(store.JSONConfig{ + datastore, err := store.NewJSON(store.JSONConfig{ Filepath: cfg.DB.Dir + "/db.json", }) + if err != nil { + return err + } data, err := datastore.Load() if err != nil { diff --git a/app/import/import.go b/app/import/import.go index b453c4a9..5899c350 100644 --- a/app/import/import.go +++ b/app/import/import.go @@ -17,6 +17,7 @@ import ( "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/ffmpeg/skills" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream" "github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/store" @@ -495,14 +496,14 @@ type importConfigAudio struct { sampling string } -func importV1(path string, cfg importConfig) (store.StoreData, error) { +func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.StoreData, error) { if len(cfg.id) == 0 { cfg.id = uuid.New().String() } r := store.NewStoreData() - jsondata, err := os.ReadFile(path) + jsondata, err := fs.ReadFile(path) if err != nil { return r, fmt.Errorf("failed to read data from %s: %w", path, err) } @@ -1417,9 +1418,19 @@ func probeInput(binary string, config app.Config) app.Probe { return app.Probe{} } + dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + store, err := store.NewJSON(store.JSONConfig{ + Filesystem: dummyfs, + Filepath: "/", + Logger: nil, + }) + if err != nil { + return app.Probe{} + } + rs, err := restream.New(restream.Config{ FFmpeg: ffmpeg, - Store: store.NewDummyStore(store.DummyConfig{}), + Store: store, }) if err != nil { return app.Probe{} diff --git a/app/import/import_test.go b/app/import/import_test.go index e0e8f3d6..8322c0eb 100644 --- a/app/import/import_test.go +++ b/app/import/import_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/datarhei/core/v16/encoding/json" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream/store" "github.com/stretchr/testify/require" @@ -36,8 +37,13 @@ import ( var id string = "4186b095-7f0a-4e94-8c3d-f17459ab252f" func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig) { + diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: ".", + }) + require.NoError(t, err) + // Import v1 database - v4, err := importV1(v1Fixture, config) + v4, err := importV1(diskfs, v1Fixture, config) require.Equal(t, nil, err) // Reset variants @@ -50,7 +56,7 @@ func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig require.Equal(t, nil, err) // Read the wanted result - wantdatav4, err := os.ReadFile(v4Fixture) + wantdatav4, err := diskfs.ReadFile(v4Fixture) require.Equal(t, nil, err) var wantv4 store.StoreData diff --git a/app/import/main.go b/app/import/main.go index ebfe1aa0..2d641caf 100644 --- a/app/import/main.go +++ b/app/import/main.go @@ -6,6 +6,7 @@ import ( cfgstore "github.com/datarhei/core/v16/config/store" cfgvars "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/restream/store" @@ -15,18 +16,26 @@ import ( func main() { logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1") - configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil) + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{}) + if err != nil { + logger.Error().WithError(err).Log("Access disk filesystem failed") + os.Exit(1) + } + + configstore, err := cfgstore.NewJSON(diskfs, configfile, nil) if err != nil { logger.Error().WithError(err).Log("Loading configuration failed") os.Exit(1) } - if err := doImport(logger, configstore); err != nil { + if err := doImport(logger, diskfs, configstore); err != nil { os.Exit(1) } } -func doImport(logger log.Logger, configstore cfgstore.Store) error { +func doImport(logger log.Logger, fs fs.Filesystem, configstore cfgstore.Store) error { if logger == nil { logger = log.New("") } @@ -65,23 +74,27 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error { logger = logger.WithField("database", v1filename) - if _, err := os.Stat(v1filename); err != nil { + if _, err := fs.Stat(v1filename); err != nil { if os.IsNotExist(err) { logger.Info().Log("Database doesn't exist and nothing will be imported") return nil } logger.Error().WithError(err).Log("Checking for v1 database") - return fmt.Errorf("checking for v1 database: %w", err) } logger.Info().Log("Found database") // Load an existing DB - datastore := store.NewJSONStore(store.JSONConfig{ - Filepath: cfg.DB.Dir + "/db.json", + datastore, err := store.NewJSON(store.JSONConfig{ + Filesystem: fs, + Filepath: cfg.DB.Dir + "/db.json", }) + if err != nil { + logger.Error().WithError(err).Log("Creating datastore for new database failed") + return fmt.Errorf("creating datastore for new database failed: %w", err) + } data, err := datastore.Load() if err != nil { @@ -103,7 +116,7 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error { importConfig.binary = cfg.FFmpeg.Binary // Rewrite the old database to the new database - r, err := importV1(v1filename, importConfig) + r, err := importV1(fs, v1filename, importConfig) if err != nil { logger.Error().WithError(err).Log("Importing database failed") return fmt.Errorf("importing database failed: %w", err) diff --git a/app/import/main_test.go b/app/import/main_test.go index d389c20e..305110f9 100644 --- a/app/import/main_test.go +++ b/app/import/main_test.go @@ -1,20 +1,30 @@ package main import ( + "strings" "testing" "github.com/datarhei/core/v16/config/store" + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" ) func TestImport(t *testing.T) { - configstore := store.NewDummy() + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + memfs.WriteFileReader("/mime.types", strings.NewReader("foobar")) + memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar")) + + configstore, err := store.NewJSON(memfs, "/config.json", nil) + require.NoError(t, err) cfg := configstore.Get() - err := configstore.Set(cfg) + err = configstore.Set(cfg) require.NoError(t, err) - err = doImport(nil, configstore) + err = doImport(nil, memfs, configstore) require.NoError(t, err) } diff --git a/app/version.go b/app/version.go index 40c3f34b..ec718dfd 100644 --- a/app/version.go +++ b/app/version.go @@ -29,7 +29,7 @@ func (v versionInfo) MinorString() string { // Version of the app var Version = versionInfo{ Major: 16, - Minor: 11, + Minor: 12, Patch: 0, } diff --git a/config/config.go b/config/config.go index 1d1d6732..b6eab656 100644 --- a/config/config.go +++ b/config/config.go @@ -6,11 +6,13 @@ import ( "net" "time" - haikunator "github.com/atrox/haikunatorgo/v2" "github.com/datarhei/core/v16/config/copy" "github.com/datarhei/core/v16/config/value" "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/math/rand" + + haikunator "github.com/atrox/haikunatorgo/v2" "github.com/google/uuid" ) @@ -45,14 +47,21 @@ const version int64 = 3 // Config is a wrapper for Data type Config struct { + fs fs.Filesystem vars vars.Variables Data } // New returns a Config which is initialized with its default values -func New() *Config { - config := &Config{} +func New(f fs.Filesystem) *Config { + config := &Config{ + fs: f, + } + + if config.fs == nil { + config.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } config.init() @@ -69,7 +78,7 @@ func (d *Config) Set(name, val string) error { // NewConfigFrom returns a clone of a Config func (d *Config) Clone() *Config { - data := New() + data := New(d.fs) data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt @@ -111,6 +120,7 @@ func (d *Config) Clone() *Config { data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow) data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block) + data.Storage.S3 = copy.Slice(d.Storage.S3) data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) @@ -143,7 +153,7 @@ func (d *Config) init() { d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) // DB - d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) // Host d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) @@ -172,14 +182,14 @@ func (d *Config) init() { d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false) - d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) - d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) // Storage - d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) // Storage (Disk) - d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) @@ -195,6 +205,9 @@ func (d *Config) init() { d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + // Storage (S3) + d.vars.Register(value.NewS3StorageListValue(&d.Storage.S3, []value.S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false) + // Storage (CORS) d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) @@ -215,7 +228,7 @@ func (d *Config) init() { d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) // FFmpeg - d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) @@ -232,6 +245,7 @@ func (d *Config) init() { // Debug d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false) + d.vars.Register(value.NewInt64(&d.Debug.MemoryLimit, 0), "debug.memory_limit_mbytes", "CORE_DEBUG_MEMORY_LIMIT_MBYTES", nil, "Impose a soft memory limit for the core, in megabytes", false, false) // Metrics d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) @@ -256,7 +270,7 @@ func (d *Config) init() { // Router d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) - d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) } // Validate validates the current state of the Config for completeness and sanity. Errors are @@ -374,6 +388,21 @@ func (d *Config) Validate(resetLogs bool) { } } + if len(d.Storage.S3) != 0 { + names := map[string]struct{}{ + "disk": {}, + "mem": {}, + } + + for _, s3 := range d.Storage.S3 { + if _, ok := names[s3.Name]; ok { + d.vars.Log("error", "storage.s3", "the name %s is already in use or reserved", s3.Name) + } + + names[s3.Name] = struct{}{} + } + } + // If playout is enabled, check that the port range is sane if d.Playout.Enable { if d.Playout.MinPort >= d.Playout.MaxPort { diff --git a/config/config_test.go b/config/config_test.go index 0ffe2790..132857fe 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,13 +1,18 @@ package config import ( + "strings" "testing" + "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" ) func TestConfigCopy(t *testing.T) { - config1 := New() + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + config1 := New(fs) config1.Version = 42 config1.DB.Dir = "foo" @@ -50,3 +55,30 @@ func TestConfigCopy(t *testing.T) { require.Equal(t, []string{"bar.com"}, config1.Host.Name) require.Equal(t, []string{"foo.com"}, config2.Host.Name) } + +func TestValidateDefault(t *testing.T) { + fs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + require.Equal(t, int64(5), size) + require.Equal(t, true, fresh) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + require.NoError(t, err) + + cfg := New(fs) + + cfg.Validate(true) + + errors := []string{} + cfg.Messages(func(level string, v vars.Variable, message string) { + if level == "error" { + errors = append(errors, message) + } + }) + + require.Equal(t, 0, len(cfg.Overrides())) + require.Equal(t, false, cfg.HasErrors(), errors) +} diff --git a/config/data.go b/config/data.go index 012c964b..35507888 100644 --- a/config/data.go +++ b/config/data.go @@ -6,14 +6,15 @@ import ( "github.com/datarhei/core/v16/config/copy" v2 "github.com/datarhei/core/v16/config/v2" "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/io/fs" ) // Data is the actual configuration data for the app type Data struct { - CreatedAt time.Time `json:"created_at"` - LoadedAt time.Time `json:"-"` - UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=3,maximum=3"` + CreatedAt time.Time `json:"created_at"` // When this config has been persisted + LoadedAt time.Time `json:"-"` // When this config has been actually used + UpdatedAt time.Time `json:"-"` // Irrelevant + Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -21,7 +22,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -67,12 +68,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types struct { Allow []string `json:"allow"` Block []string `json:"block"` @@ -85,9 +86,10 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` + S3 []value.S3Storage `json:"s3"` CORS struct { Origins []string `json:"origins"` } `json:"cors"` @@ -113,7 +115,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -125,33 +127,34 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { - Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + Profiling bool `json:"profiling"` + ForceGC int `json:"force_gc" format:"int"` + MemoryLimit int64 `json:"memory_limit_mbytes" format:"int64"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` @@ -165,8 +168,8 @@ type Data struct { } `json:"router"` } -func UpgradeV2ToV3(d *v2.Data) (*Data, error) { - cfg := New() +func UpgradeV2ToV3(d *v2.Data, fs fs.Filesystem) (*Data, error) { + cfg := New(fs) return MergeV2toV3(&cfg.Data, d) } @@ -189,7 +192,6 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.SRT = d.SRT data.FFmpeg = d.FFmpeg data.Playout = d.Playout - data.Debug = d.Debug data.Metrics = d.Metrics data.Sessions = d.Sessions data.Service = d.Service @@ -228,6 +230,10 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.Storage.Memory = d.Storage.Memory // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.Debug.MemoryLimit = 0 + data.TLS.Enable = d.TLS.Enable data.TLS.Address = d.TLS.Address data.TLS.Auto = d.TLS.Auto @@ -242,6 +248,8 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types) + data.Storage.S3 = []value.S3Storage{} + data.Version = 3 return data, nil @@ -267,7 +275,6 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) { data.SRT = d.SRT data.FFmpeg = d.FFmpeg data.Playout = d.Playout - data.Debug = d.Debug data.Metrics = d.Metrics data.Sessions = d.Sessions data.Service = d.Service @@ -299,6 +306,9 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) { data.Router.Routes = copy.StringMap(d.Router.Routes) // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.TLS.Enable = d.TLS.Enable data.TLS.Address = d.TLS.Address data.TLS.Auto = d.TLS.Auto diff --git a/config/data_test.go b/config/data_test.go new file mode 100644 index 00000000..956cf25d --- /dev/null +++ b/config/data_test.go @@ -0,0 +1,36 @@ +package config + +import ( + "testing" + + v2 "github.com/datarhei/core/v16/config/v2" + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" +) + +func TestUpgrade(t *testing.T) { + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + + v2cfg := v2.New(fs) + v2cfg.Storage.Disk.Cache.Types = []string{".foo", ".bar"} + + v3cfg, err := UpgradeV2ToV3(&v2cfg.Data, fs) + + require.NoError(t, err) + require.Equal(t, int64(3), v3cfg.Version) + require.ElementsMatch(t, []string{".foo", ".bar"}, v3cfg.Storage.Disk.Cache.Types.Allow) + require.ElementsMatch(t, []string{".m3u8", ".mpd"}, v3cfg.Storage.Disk.Cache.Types.Block) +} + +func TestDowngrade(t *testing.T) { + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + + v3cfg := New(fs) + v3cfg.Storage.Disk.Cache.Types.Allow = []string{".foo", ".bar"} + + v2cfg, err := DowngradeV3toV2(&v3cfg.Data) + + require.NoError(t, err) + require.Equal(t, int64(2), v2cfg.Version) + require.ElementsMatch(t, []string{".foo", ".bar"}, v2cfg.Storage.Disk.Cache.Types) +} diff --git a/config/store/dummy.go b/config/store/dummy.go deleted file mode 100644 index c1a96aae..00000000 --- a/config/store/dummy.go +++ /dev/null @@ -1,73 +0,0 @@ -package store - -import ( - "fmt" - - "github.com/datarhei/core/v16/config" -) - -type dummyStore struct { - current *config.Config - active *config.Config -} - -// NewDummyStore returns a store that returns the default config -func NewDummy() Store { - s := &dummyStore{} - - cfg := config.New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - s.current = cfg - - cfg = config.New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - s.active = cfg - - return s -} - -func (c *dummyStore) Get() *config.Config { - return c.current.Clone() -} - -func (c *dummyStore) Set(d *config.Config) error { - d.Validate(true) - - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - c.current = d.Clone() - - return nil -} - -func (c *dummyStore) GetActive() *config.Config { - return c.active.Clone() -} - -func (c *dummyStore) SetActive(d *config.Config) error { - d.Validate(true) - - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - c.active = d.Clone() - - return nil -} - -func (c *dummyStore) Reload() error { - return nil -} diff --git a/config/store/json.go b/config/store/json.go index b4cd2db9..976d18a0 100644 --- a/config/store/json.go +++ b/config/store/json.go @@ -5,16 +5,16 @@ import ( "fmt" "os" "path/filepath" - "time" "github.com/datarhei/core/v16/config" v1 "github.com/datarhei/core/v16/config/v1" v2 "github.com/datarhei/core/v16/config/v2" "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" ) type jsonStore struct { + fs fs.Filesystem path string data map[string]*config.Config @@ -22,18 +22,32 @@ type jsonStore struct { reloadFn func() } -// NewJSONStore will read a JSON config file from the given path. After successfully reading it in, it will be written -// back to the path. The returned error will be nil if everything went fine. -// If the path doesn't exist, a default JSON config file will be written to that path. -// The returned ConfigStore can be used to retrieve or write the config. -func NewJSON(path string, reloadFn func()) (Store, error) { +// NewJSONStore will read the JSON config file from the given path. After successfully reading it in, it will be written +// back to the path. The returned error will be nil if everything went fine. If the path doesn't exist, a default JSON +// config file will be written to that path. The returned ConfigStore can be used to retrieve or write the config. +func NewJSON(f fs.Filesystem, path string, reloadFn func()) (Store, error) { c := &jsonStore{ - path: path, + fs: f, data: make(map[string]*config.Config), reloadFn: reloadFn, } - c.data["base"] = config.New() + path, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("failed to determine absolute path of '%s': %w", path, err) + } + + c.path = path + + if len(c.path) == 0 { + c.path = "/config.json" + } + + if c.fs == nil { + return nil, fmt.Errorf("no valid filesystem provided") + } + + c.data["base"] = config.New(f) if err := c.load(c.data["base"]); err != nil { return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err) @@ -57,14 +71,10 @@ func (c *jsonStore) Set(d *config.Config) error { data := d.Clone() - data.CreatedAt = time.Now() - if err := c.store(data); err != nil { return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err) } - data.UpdatedAt = time.Now() - c.data["base"] = data return nil @@ -89,7 +99,9 @@ func (c *jsonStore) SetActive(d *config.Config) error { return fmt.Errorf("configuration data has errors after validation") } - c.data["merged"] = d.Clone() + data := d.Clone() + + c.data["merged"] = data return nil } @@ -109,15 +121,19 @@ func (c *jsonStore) load(cfg *config.Config) error { return nil } - if _, err := os.Stat(c.path); os.IsNotExist(err) { + if _, err := c.fs.Stat(c.path); os.IsNotExist(err) { return nil } - jsondata, err := os.ReadFile(c.path) + jsondata, err := c.fs.ReadFile(c.path) if err != nil { return err } + if len(jsondata) == 0 { + return nil + } + data, err := migrate(jsondata) if err != nil { return err @@ -125,15 +141,12 @@ func (c *jsonStore) load(cfg *config.Config) error { cfg.Data = *data - cfg.LoadedAt = time.Now() - cfg.UpdatedAt = cfg.LoadedAt + cfg.UpdatedAt = cfg.CreatedAt return nil } func (c *jsonStore) store(data *config.Config) error { - data.CreatedAt = time.Now() - if len(c.path) == 0 { return nil } @@ -143,28 +156,9 @@ func (c *jsonStore) store(data *config.Config) error { return err } - dir, filename := filepath.Split(c.path) - - tmpfile, err := os.CreateTemp(dir, filename) - if err != nil { - return err - } - - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return err - } - - if err := tmpfile.Close(); err != nil { - return err - } - - if err := file.Rename(tmpfile.Name(), c.path); err != nil { - return err - } + _, _, err = c.fs.WriteFileSafe(c.path, jsondata) - return nil + return err } func migrate(jsondata []byte) (*config.Data, error) { @@ -176,38 +170,38 @@ func migrate(jsondata []byte) (*config.Data, error) { } if version.Version == 1 { - dataV1 := &v1.New().Data + dataV1 := &v1.New(nil).Data if err := gojson.Unmarshal(jsondata, dataV1); err != nil { return nil, json.FormatError(jsondata, err) } - dataV2, err := v2.UpgradeV1ToV2(dataV1) + dataV2, err := v2.UpgradeV1ToV2(dataV1, nil) if err != nil { return nil, err } - dataV3, err := config.UpgradeV2ToV3(dataV2) + dataV3, err := config.UpgradeV2ToV3(dataV2, nil) if err != nil { return nil, err } data = dataV3 } else if version.Version == 2 { - dataV2 := &v2.New().Data + dataV2 := &v2.New(nil).Data if err := gojson.Unmarshal(jsondata, dataV2); err != nil { return nil, json.FormatError(jsondata, err) } - dataV3, err := config.UpgradeV2ToV3(dataV2) + dataV3, err := config.UpgradeV2ToV3(dataV2, nil) if err != nil { return nil, err } data = dataV3 } else if version.Version == 3 { - dataV3 := &config.New().Data + dataV3 := &config.New(nil).Data if err := gojson.Unmarshal(jsondata, dataV3); err != nil { return nil, json.FormatError(jsondata, err) diff --git a/config/store/json_test.go b/config/store/json_test.go index 6b412c14..f549fd3e 100644 --- a/config/store/json_test.go +++ b/config/store/json_test.go @@ -18,7 +18,7 @@ func TestMigrationV1ToV3(t *testing.T) { jsondatav3, err := os.ReadFile("./fixtures/config_v1_v3.json") require.NoError(t, err) - datav3 := config.New() + datav3 := config.New(nil) json.Unmarshal(jsondatav3, datav3) data, err := migrate(jsondatav1) @@ -37,7 +37,7 @@ func TestMigrationV2ToV3(t *testing.T) { jsondatav3, err := os.ReadFile("./fixtures/config_v2_v3.json") require.NoError(t, err) - datav3 := config.New() + datav3 := config.New(nil) json.Unmarshal(jsondatav3, datav3) data, err := migrate(jsondatav2) diff --git a/config/store/location.go b/config/store/location.go new file mode 100644 index 00000000..e073a0c8 --- /dev/null +++ b/config/store/location.go @@ -0,0 +1,53 @@ +package store + +import ( + "os" + "path" +) + +// Location returns the path to the config file. If no path is provided, +// different standard location will be probed: +// - os.UserConfigDir() + /datarhei-core/config.js +// - os.UserHomeDir() + /.config/datarhei-core/config.js +// - ./config/config.js +// If the config doesn't exist in none of these locations, it will be assumed +// at ./config/config.js +func Location(filepath string) string { + configfile := filepath + if len(configfile) != 0 { + return configfile + } + + locations := []string{} + + if dir, err := os.UserConfigDir(); err == nil { + locations = append(locations, dir+"/datarhei-core/config.js") + } + + if dir, err := os.UserHomeDir(); err == nil { + locations = append(locations, dir+"/.config/datarhei-core/config.js") + } + + locations = append(locations, "./config/config.js") + + for _, path := range locations { + info, err := os.Stat(path) + if err != nil { + continue + } + + if info.IsDir() { + continue + } + + configfile = path + } + + if len(configfile) == 0 { + configfile = "./config/config.js" + } + + os.MkdirAll(path.Dir(configfile), 0740) + + return configfile +} diff --git a/config/v1/config.go b/config/v1/config.go index b6f00f2d..022edfe9 100644 --- a/config/v1/config.go +++ b/config/v1/config.go @@ -8,6 +8,7 @@ import ( "github.com/datarhei/core/v16/config/copy" "github.com/datarhei/core/v16/config/value" "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/math/rand" haikunator "github.com/atrox/haikunatorgo/v2" @@ -21,14 +22,21 @@ const version int64 = 1 // Config is a wrapper for Data type Config struct { + fs fs.Filesystem vars vars.Variables Data } // New returns a Config which is initialized with its default values -func New() *Config { - cfg := &Config{} +func New(f fs.Filesystem) *Config { + cfg := &Config{ + fs: f, + } + + if cfg.fs == nil { + cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } cfg.init() @@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error { // NewConfigFrom returns a clone of a Config func (d *Config) Clone() *Config { - data := New() + data := New(d.fs) data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt @@ -118,7 +126,7 @@ func (d *Config) init() { d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) // DB - d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) // Host d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) @@ -146,14 +154,14 @@ func (d *Config) init() { d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) - d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) - d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) // Storage - d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) // Storage (Disk) - d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) @@ -187,7 +195,7 @@ func (d *Config) init() { d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) // FFmpeg - d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) @@ -228,7 +236,7 @@ func (d *Config) init() { // Router d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) - d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) } // Validate validates the current state of the Config for completeness and sanity. Errors are diff --git a/config/v1/data.go b/config/v1/data.go index e398adbb..2826f02d 100644 --- a/config/v1/data.go +++ b/config/v1/data.go @@ -10,7 +10,7 @@ type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=1,maximum=1"` + Version int64 `json:"version" jsonschema:"minimum=1,maximum=1" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -18,7 +18,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -63,12 +63,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -78,7 +78,7 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -105,7 +105,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -117,33 +117,33 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` diff --git a/config/v2/config.go b/config/v2/config.go index c3992da7..e1bfb0cb 100644 --- a/config/v2/config.go +++ b/config/v2/config.go @@ -8,6 +8,7 @@ import ( "github.com/datarhei/core/v16/config/copy" "github.com/datarhei/core/v16/config/value" "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/math/rand" haikunator "github.com/atrox/haikunatorgo/v2" @@ -21,14 +22,21 @@ const version int64 = 2 // Config is a wrapper for Data type Config struct { + fs fs.Filesystem vars vars.Variables Data } // New returns a Config which is initialized with its default values -func New() *Config { - cfg := &Config{} +func New(f fs.Filesystem) *Config { + cfg := &Config{ + fs: f, + } + + if cfg.fs == nil { + cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } cfg.init() @@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error { // NewConfigFrom returns a clone of a Config func (d *Config) Clone() *Config { - data := New() + data := New(d.fs) data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt @@ -118,7 +126,7 @@ func (d *Config) init() { d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) // DB - d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) // Host d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) @@ -146,14 +154,14 @@ func (d *Config) init() { d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) - d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) - d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) // Storage - d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) // Storage (Disk) - d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) @@ -188,7 +196,7 @@ func (d *Config) init() { d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) // FFmpeg - d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) @@ -229,7 +237,7 @@ func (d *Config) init() { // Router d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) - d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) } // Validate validates the current state of the Config for completeness and sanity. Errors are diff --git a/config/v2/data.go b/config/v2/data.go index 1a549497..1c226376 100644 --- a/config/v2/data.go +++ b/config/v2/data.go @@ -10,13 +10,14 @@ import ( "github.com/datarhei/core/v16/config/copy" v1 "github.com/datarhei/core/v16/config/v1" "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/io/fs" ) type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=2,maximum=2"` + Version int64 `json:"version" jsonschema:"minimum=2,maximum=2" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -24,7 +25,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -69,12 +70,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -84,7 +85,7 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -112,7 +113,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -124,33 +125,33 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` @@ -164,8 +165,8 @@ type Data struct { } `json:"router"` } -func UpgradeV1ToV2(d *v1.Data) (*Data, error) { - cfg := New() +func UpgradeV1ToV2(d *v1.Data, fs fs.Filesystem) (*Data, error) { + cfg := New(fs) return MergeV1ToV2(&cfg.Data, d) } diff --git a/config/value/auth0.go b/config/value/auth0.go index 8d19b4f1..a912134d 100644 --- a/config/value/auth0.go +++ b/config/value/auth0.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "net/url" "strings" ) @@ -16,6 +17,28 @@ type Auth0Tenant struct { Users []string `json:"users"` } +func (a *Auth0Tenant) String() string { + u := url.URL{ + Scheme: "auth0", + Host: a.Domain, + } + + if len(a.ClientID) != 0 { + u.User = url.User(a.ClientID) + } + + q := url.Values{} + q.Set("aud", a.Audience) + + for _, user := range a.Users { + q.Add("user", user) + } + + u.RawQuery = q.Encode() + + return u.String() +} + type TenantList struct { p *[]Auth0Tenant separator string @@ -32,18 +55,34 @@ func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *Tenan return v } +// Set allows to set a tenant list in two formats: +// - a separator separated list of bas64 encoded Auth0Tenant JSON objects +// - a separator separated list of Auth0Tenant in URL representation: auth0://[clientid]@[domain]?aud=[audience]&user=...&user=... func (s *TenantList) Set(val string) error { list := []Auth0Tenant{} for i, elm := range strings.Split(val, s.separator) { - data, err := base64.StdEncoding.DecodeString(elm) - if err != nil { - return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) - } - t := Auth0Tenant{} - if err := json.Unmarshal(data, &t); err != nil { - return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) + + if strings.HasPrefix(elm, "auth0://") { + data, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid url encoding of tenant %d: %w", i, err) + } + + t.Domain = data.Host + t.ClientID = data.User.Username() + t.Audience = data.Query().Get("aud") + t.Users = data.Query()["user"] + } else { + data, err := base64.StdEncoding.DecodeString(elm) + if err != nil { + return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) + } + + if err := json.Unmarshal(data, &t); err != nil { + return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) + } } list = append(list, t) @@ -62,10 +101,10 @@ func (s *TenantList) String() string { list := []string{} for _, t := range *s.p { - list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users))) + list = append(list, t.String()) } - return strings.Join(list, ",") + return strings.Join(list, s.separator) } func (s *TenantList) Validate() error { diff --git a/config/value/auth0_test.go b/config/value/auth0_test.go new file mode 100644 index 00000000..edc4eff8 --- /dev/null +++ b/config/value/auth0_test.go @@ -0,0 +1,43 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAuth0Value(t *testing.T) { + tenants := []Auth0Tenant{} + + v := NewTenantList(&tenants, nil, " ") + require.Equal(t, "(empty)", v.String()) + + v.Set("auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3") + require.Equal(t, []Auth0Tenant{ + { + Domain: "domain", + ClientID: "clientid", + Audience: "audience", + Users: []string{"user1", "user2"}, + }, + { + Domain: "domain2", + Audience: "audience2", + Users: []string{"user3"}, + }, + }, tenants) + require.Equal(t, "auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3", v.String()) + require.NoError(t, v.Validate()) + + v.Set("eyJkb21haW4iOiJkYXRhcmhlaS5ldS5hdXRoMC5jb20iLCJhdWRpZW5jZSI6Imh0dHBzOi8vZGF0YXJoZWkuY29tL2NvcmUiLCJ1c2VycyI6WyJhdXRoMHx4eHgiXX0=") + require.Equal(t, []Auth0Tenant{ + { + Domain: "datarhei.eu.auth0.com", + ClientID: "", + Audience: "https://datarhei.com/core", + Users: []string{"auth0|xxx"}, + }, + }, tenants) + require.Equal(t, "auth0://datarhei.eu.auth0.com?aud=https%3A%2F%2Fdatarhei.com%2Fcore&user=auth0%7Cxxx", v.String()) + require.NoError(t, v.Validate()) +} diff --git a/config/value/network_test.go b/config/value/network_test.go new file mode 100644 index 00000000..add7190a --- /dev/null +++ b/config/value/network_test.go @@ -0,0 +1,127 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAddressValue(t *testing.T) { + var x string + + val := NewAddress(&x, ":8080") + + require.Equal(t, ":8080", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz:9090" + + require.Equal(t, "foobaz:9090", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz:7070") + + require.Equal(t, "fooboz:7070", x) +} + +func TestCIDRListValue(t *testing.T) { + var x []string + + val := NewCIDRList(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"127.0.0.1/32", "127.0.0.2/32"} + + require.Equal(t, "127.0.0.1/32 127.0.0.2/32", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("129.0.0.1/32 129.0.0.2/32") + + require.Equal(t, []string{"129.0.0.1/32", "129.0.0.2/32"}, x) +} + +func TestCORSOriginaValue(t *testing.T) { + var x []string + + val := NewCORSOrigins(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"*"} + + require.Equal(t, "*", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost") + + require.Equal(t, []string{"http://localhost"}, x) +} + +func TestPortValue(t *testing.T) { + var x int + + val := NewPort(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestURLValue(t *testing.T) { + var x string + + val := NewURL(&x, "http://localhost/foobar") + + require.Equal(t, "http://localhost/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "http://localhost:8080/foobar" + + require.Equal(t, "http://localhost:8080/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost:8080/fooboz/foobaz") + + require.Equal(t, "http://localhost:8080/fooboz/foobaz", x) +} + +func TestEmailValue(t *testing.T) { + var x string + + val := NewEmail(&x, "foobar@example.com") + + require.Equal(t, "foobar@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobar+baz@example.com" + + require.Equal(t, "foobar+baz@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("foobar@sub.example.com") + + require.Equal(t, "foobar@sub.example.com", x) +} diff --git a/config/value/os.go b/config/value/os.go index dddfdc5e..6f57c1b3 100644 --- a/config/value/os.go +++ b/config/value/os.go @@ -2,39 +2,51 @@ package value import ( "fmt" - "os" - "os/exec" "path/filepath" "strings" + + "github.com/datarhei/core/v16/io/fs" ) // must directory -type MustDir string +type MustDir struct { + p *string + fs fs.Filesystem +} + +func NewMustDir(p *string, val string, fs fs.Filesystem) *MustDir { + v := &MustDir{ + p: p, + fs: fs, + } -func NewMustDir(p *string, val string) *MustDir { *p = val - return (*MustDir)(p) + return v } func (u *MustDir) Set(val string) error { - *u = MustDir(val) + *u.p = val return nil } func (u *MustDir) String() string { - return string(*u) + return *u.p } func (u *MustDir) Validate() error { - val := string(*u) + val := *u.p if len(strings.TrimSpace(val)) == 0 { return fmt.Errorf("path name must not be empty") } - finfo, err := os.Stat(val) + if err := u.fs.MkdirAll(val, 0750); err != nil { + return fmt.Errorf("%s can't be created (%w)", val, err) + } + + finfo, err := u.fs.Stat(val) if err != nil { return fmt.Errorf("%s does not exist", val) } @@ -47,36 +59,44 @@ func (u *MustDir) Validate() error { } func (u *MustDir) IsEmpty() bool { - return len(string(*u)) == 0 + return len(*u.p) == 0 } // directory -type Dir string +type Dir struct { + p *string + fs fs.Filesystem +} + +func NewDir(p *string, val string, fs fs.Filesystem) *Dir { + v := &Dir{ + p: p, + fs: fs, + } -func NewDir(p *string, val string) *Dir { *p = val - return (*Dir)(p) + return v } func (u *Dir) Set(val string) error { - *u = Dir(val) + *u.p = val return nil } func (u *Dir) String() string { - return string(*u) + return *u.p } func (u *Dir) Validate() error { - val := string(*u) + val := *u.p if len(strings.TrimSpace(val)) == 0 { return nil } - finfo, err := os.Stat(val) + finfo, err := u.fs.Stat(val) if err != nil { return fmt.Errorf("%s does not exist", val) } @@ -89,32 +109,40 @@ func (u *Dir) Validate() error { } func (u *Dir) IsEmpty() bool { - return len(string(*u)) == 0 + return len(*u.p) == 0 } // executable -type Exec string +type Exec struct { + p *string + fs fs.Filesystem +} + +func NewExec(p *string, val string, fs fs.Filesystem) *Exec { + v := &Exec{ + p: p, + fs: fs, + } -func NewExec(p *string, val string) *Exec { *p = val - return (*Exec)(p) + return v } func (u *Exec) Set(val string) error { - *u = Exec(val) + *u.p = val return nil } func (u *Exec) String() string { - return string(*u) + return *u.p } func (u *Exec) Validate() error { - val := string(*u) + val := *u.p - _, err := exec.LookPath(val) + _, err := u.fs.LookPath(val) if err != nil { return fmt.Errorf("%s not found or is not executable", val) } @@ -123,36 +151,44 @@ func (u *Exec) Validate() error { } func (u *Exec) IsEmpty() bool { - return len(string(*u)) == 0 + return len(*u.p) == 0 } // regular file -type File string +type File struct { + p *string + fs fs.Filesystem +} + +func NewFile(p *string, val string, fs fs.Filesystem) *File { + v := &File{ + p: p, + fs: fs, + } -func NewFile(p *string, val string) *File { *p = val - return (*File)(p) + return v } func (u *File) Set(val string) error { - *u = File(val) + *u.p = val return nil } func (u *File) String() string { - return string(*u) + return *u.p } func (u *File) Validate() error { - val := string(*u) + val := *u.p if len(val) == 0 { return nil } - finfo, err := os.Stat(val) + finfo, err := u.fs.Stat(val) if err != nil { return fmt.Errorf("%s does not exist", val) } @@ -165,7 +201,7 @@ func (u *File) Validate() error { } func (u *File) IsEmpty() bool { - return len(string(*u)) == 0 + return len(*u.p) == 0 } // absolute path diff --git a/config/value/os_test.go b/config/value/os_test.go new file mode 100644 index 00000000..1706ba94 --- /dev/null +++ b/config/value/os_test.go @@ -0,0 +1,142 @@ +package value + +import ( + "testing" + + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" +) + +func TestMustDirValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + _, err = memfs.Stat("/foobar") + require.Error(t, err) + + var x string + + val := NewMustDir(&x, "./foobar", memfs) + + require.Equal(t, "./foobar", val.String()) + require.NoError(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + info, err := memfs.Stat("/foobar") + require.NoError(t, err) + require.True(t, info.IsDir()) + + x = "/bar/foo" + + require.Equal(t, "/bar/foo", val.String()) + + _, err = memfs.Stat("/bar/foo") + require.Error(t, err) + + require.NoError(t, val.Validate()) + + info, err = memfs.Stat("/bar/foo") + require.NoError(t, err) + require.True(t, info.IsDir()) + + memfs.WriteFile("/foo/bar", []byte("hello")) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestDirValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewDir(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + err = memfs.MkdirAll("/foobar", 0755) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + _, _, err = memfs.WriteFile("/foo/bar", []byte("hello")) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestFileValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewFile(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + _, _, err = memfs.WriteFile("/foobar", []byte("hello")) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + err = memfs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestExecValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewExec(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + _, _, err = memfs.WriteFile("/foobar", []byte("hello")) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + err = memfs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestAbsolutePathValue(t *testing.T) { + var x string + + val := NewAbsolutePath(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "/foobaz" + + require.Equal(t, "/foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("/fooboz") + + require.Equal(t, "/fooboz", x) +} diff --git a/config/value/primitives.go b/config/value/primitives.go index 701db26b..1dd52a94 100644 --- a/config/value/primitives.go +++ b/config/value/primitives.go @@ -1,6 +1,7 @@ package value import ( + "sort" "strconv" "strings" ) @@ -127,11 +128,20 @@ func (s *StringMapString) String() string { return "(empty)" } + sms := *s.p + + keys := []string{} + for k := range sms { + keys = append(keys, k) + } + + sort.Strings(keys) + mappings := make([]string, len(*s.p)) i := 0 - for k, v := range *s.p { - mappings[i] = k + ":" + v + for _, k := range keys { + mappings[i] = k + ":" + sms[k] i++ } diff --git a/config/value/primitives_test.go b/config/value/primitives_test.go new file mode 100644 index 00000000..4b815b90 --- /dev/null +++ b/config/value/primitives_test.go @@ -0,0 +1,147 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStringValue(t *testing.T) { + var x string + + val := NewString(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz" + + require.Equal(t, "foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz") + + require.Equal(t, "fooboz", x) +} + +func TestStringListValue(t *testing.T) { + var x []string + + val := NewStringList(&x, []string{"foobar"}, " ") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = []string{"foobar", "foobaz"} + + require.Equal(t, "foobar foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz foobar") + + require.Equal(t, []string{"fooboz", "foobar"}, x) +} + +func TestStringMapStringValue(t *testing.T) { + var x map[string]string + + val := NewStringMapString(&x, map[string]string{"a": "foobar"}) + + require.Equal(t, "a:foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = map[string]string{"a": "foobar", "b": "foobaz"} + + require.Equal(t, "a:foobar b:foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("x:fooboz y:foobar") + + require.Equal(t, map[string]string{"x": "fooboz", "y": "foobar"}, x) +} + +func TestBoolValue(t *testing.T) { + var x bool + + val := NewBool(&x, false) + + require.Equal(t, "false", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = true + + require.Equal(t, "true", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("false") + + require.Equal(t, false, x) +} + +func TestIntValue(t *testing.T) { + var x int + + val := NewInt(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestInt64Value(t *testing.T) { + var x int64 + + val := NewInt64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int64(77), x) +} + +func TestUint64Value(t *testing.T) { + var x uint64 + + val := NewUint64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, uint64(77), x) +} diff --git a/config/value/s3.go b/config/value/s3.go new file mode 100644 index 00000000..a85a0838 --- /dev/null +++ b/config/value/s3.go @@ -0,0 +1,179 @@ +package value + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/publicsuffix" +) + +// array of s3 storages +// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy + +type S3Storage struct { + Name string `json:"name"` + Mountpoint string `json:"mountpoint"` + Auth struct { + Enable bool `json:"enable"` + Username string `json:"username"` + Password string `json:"password"` + } `json:"auth"` + Endpoint string `json:"endpoint"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Bucket string `json:"bucket"` + Region string `json:"region"` + UseSSL bool `json:"use_ssl"` +} + +func (t *S3Storage) String() string { + u := url.URL{} + + if t.UseSSL { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + + u.User = url.UserPassword(t.AccessKeyID, "---") + + u.Host = t.Endpoint + + if len(t.Region) != 0 { + u.Host = t.Region + "." + u.Host + } + + if len(t.Bucket) != 0 { + u.Path = "/" + t.Bucket + } + + v := url.Values{} + v.Set("name", t.Name) + v.Set("mountpoint", t.Mountpoint) + + if t.Auth.Enable { + if len(t.Auth.Username) != 0 { + v.Set("username", t.Auth.Username) + } + + if len(t.Auth.Password) != 0 { + v.Set("password", "---") + } + } + + u.RawQuery = v.Encode() + + return u.String() +} + +type s3StorageListValue struct { + p *[]S3Storage + separator string +} + +func NewS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue { + v := &s3StorageListValue{ + p: p, + separator: separator, + } + + *p = val + return v +} + +func (s *s3StorageListValue) Set(val string) error { + list := []S3Storage{} + + for _, elm := range strings.Split(val, s.separator) { + u, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err) + } + + t := S3Storage{ + Name: u.Query().Get("name"), + Mountpoint: u.Query().Get("mountpoint"), + AccessKeyID: u.User.Username(), + } + + hostname := u.Hostname() + port := u.Port() + + domain, err := publicsuffix.EffectiveTLDPlusOne(hostname) + if err != nil { + return fmt.Errorf("invalid eTLD (%s): %w", hostname, err) + } + + t.Endpoint = domain + if len(port) != 0 { + t.Endpoint += ":" + port + } + + region := strings.TrimSuffix(hostname, domain) + if len(region) != 0 { + t.Region = strings.TrimSuffix(region, ".") + } + + secret, ok := u.User.Password() + if ok { + t.SecretAccessKey = secret + } + + t.Bucket = strings.TrimPrefix(u.Path, "/") + + if u.Scheme == "https" { + t.UseSSL = true + } + + if u.Query().Has("username") || u.Query().Has("password") { + t.Auth.Enable = true + t.Auth.Username = u.Query().Get("username") + t.Auth.Username = u.Query().Get("password") + } + + list = append(list, t) + } + + *s.p = list + + return nil +} + +func (s *s3StorageListValue) String() string { + if s.IsEmpty() { + return "(empty)" + } + + list := []string{} + + for _, t := range *s.p { + list = append(list, t.String()) + } + + return strings.Join(list, s.separator) +} + +func (s *s3StorageListValue) Validate() error { + for i, t := range *s.p { + if len(t.Name) == 0 { + return fmt.Errorf("the name for s3 storage %d is missing", i) + } + + if len(t.Mountpoint) == 0 { + return fmt.Errorf("the mountpoint for s3 storage %d is missing", i) + } + + if t.Auth.Enable { + if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 { + return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i) + } + } + } + + return nil +} + +func (s *s3StorageListValue) IsEmpty() bool { + return len(*s.p) == 0 +} diff --git a/config/value/time_test.go b/config/value/time_test.go new file mode 100644 index 00000000..3259d7d2 --- /dev/null +++ b/config/value/time_test.go @@ -0,0 +1,30 @@ +package value + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestTimeValue(t *testing.T) { + var x time.Time + + tm := time.Unix(1257894000, 0).UTC() + + val := NewTime(&x, tm) + + require.Equal(t, "2009-11-10T23:00:00Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = time.Unix(1257894001, 0).UTC() + + require.Equal(t, "2009-11-10T23:00:01Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("2009-11-11T23:00:00Z") + + require.Equal(t, time.Time(time.Date(2009, time.November, 11, 23, 0, 0, 0, time.UTC)), x) +} diff --git a/config/value/value_test.go b/config/value/value_test.go index 49c024e9..3f36b17f 100644 --- a/config/value/value_test.go +++ b/config/value/value_test.go @@ -3,29 +3,9 @@ package value import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestIntValue(t *testing.T) { - var i int - - ivar := NewInt(&i, 11) - - assert.Equal(t, "11", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) - - i = 42 - - assert.Equal(t, "42", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) - - ivar.Set("77") - - assert.Equal(t, int(77), i) -} - type testdata struct { value1 int value2 int @@ -37,22 +17,22 @@ func TestCopyStruct(t *testing.T) { NewInt(&data1.value1, 1) NewInt(&data1.value2, 2) - assert.Equal(t, int(1), data1.value1) - assert.Equal(t, int(2), data1.value2) + require.Equal(t, int(1), data1.value1) + require.Equal(t, int(2), data1.value2) data2 := testdata{} val21 := NewInt(&data2.value1, 3) val22 := NewInt(&data2.value2, 4) - assert.Equal(t, int(3), data2.value1) - assert.Equal(t, int(4), data2.value2) + require.Equal(t, int(3), data2.value1) + require.Equal(t, int(4), data2.value2) data2 = data1 - assert.Equal(t, int(1), data2.value1) - assert.Equal(t, int(2), data2.value2) + require.Equal(t, int(1), data2.value1) + require.Equal(t, int(2), data2.value2) - assert.Equal(t, "1", val21.String()) - assert.Equal(t, "2", val22.String()) + require.Equal(t, "1", val21.String()) + require.Equal(t, "2", val22.String()) } diff --git a/config/vars/vars_test.go b/config/vars/vars_test.go index 38e51fb4..c41dd77a 100644 --- a/config/vars/vars_test.go +++ b/config/vars/vars_test.go @@ -1,6 +1,7 @@ package vars import ( + "os" "testing" "github.com/datarhei/core/v16/config/value" @@ -38,3 +39,210 @@ func TestVars(t *testing.T) { x, _ = v1.Get("string") require.Equal(t, "foobar", x) } + +func TestSetDefault(t *testing.T) { + v := Variables{} + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + require.Equal(t, "foobar", s) + + v.Set("string", "foobaz") + + require.Equal(t, "foobaz", s) + + v.SetDefault("strong") + + require.Equal(t, "foobaz", s) + + v.SetDefault("string") + + require.Equal(t, "foobar", s) +} + +func TestGet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + value, err := v.Get("string") + require.NoError(t, err) + require.Equal(t, "foobar", value) + + value, err = v.Get("strong") + require.Error(t, err) + require.Equal(t, "", value) +} + +func TestSet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + err := v.Set("string", "foobaz") + require.NoError(t, err) + require.Equal(t, "foobaz", s) + + err = v.Set("strong", "fooboz") + require.Error(t, err) + require.Equal(t, "foobaz", s) +} + +func TestLog(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + v.Log("info", "strong", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "foobar", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) + + v.ResetLogs() + + require.Equal(t, 0, len(v.logs)) +} + +func TestMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 0, len(v.logs)) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestMergeAlt(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRUNG", []string{"CORE_TEST_STRING"}, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 1, len(v.logs)) + + require.Contains(t, v.logs[0].message, "CORE_TEST_STRUNG") + require.Equal(t, "warn", v.logs[0].level) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestNoMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRONG", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobar") + require.Equal(t, false, v.IsMerged("string")) + + os.Unsetenv("CORE_TEST_STRONG") +} + +func TestValidate(t *testing.T) { + v := Variables{} + + s1 := "" + s2 := "" + + v.Register(value.NewString(&s1, ""), "string", "", nil, "a string", false, false) + v.Register(value.NewString(&s2, ""), "string", "", nil, "a string", true, false) + + require.Equal(t, s1, "") + require.Equal(t, s2, "") + + require.Equal(t, false, v.HasErrors()) + + v.Validate() + + require.Equal(t, true, v.HasErrors()) + + ninfo := 0 + nerror := 0 + v.Messages(func(level string, v Variable, message string) { + if level == "info" { + ninfo++ + } else if level == "error" { + nerror++ + } + }) + + require.Equal(t, 2, ninfo) + require.Equal(t, 1, nerror) +} + +func TestOverrides(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + v.Merge() + + overrides := v.Overrides() + + require.ElementsMatch(t, []string{"string"}, overrides) +} + +func TestDisquise(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, true) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "***", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) +} diff --git a/docs/docs.go b/docs/docs.go index 0d0740fd..353c8b58 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -229,7 +229,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -292,9 +292,9 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" ], "tags": [ "v16.7.2" @@ -311,220 +311,53 @@ const docTemplate = `{ } } }, - "/api/v3/fs/disk": { + "/api/v3/fs": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "description": "Listall registered filesystems", "produces": [ "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "List all files on the filesystem", - "operationId": "diskfs-3-list-files", - "parameters": [ - { - "type": "string", - "description": "glob pattern for file names", - "name": "glob", - "in": "query" - }, - { - "type": "string", - "description": "none, name, size, lastmod", - "name": "sort", - "in": "query" - }, - { - "type": "string", - "description": "asc, desc", - "name": "order", - "in": "query" - } - ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { - "$ref": "#/definitions/api.FileInfo" + "$ref": "#/definitions/api.FilesystemInfo" } } } } } }, - "/api/v3/fs/disk/{path}": { + "/api/v3/fs/{name}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the filesystem. The contents of that file are returned.", - "produces": [ - "application/data", - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-3-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Writes or overwrites a file on the filesystem", - "consumes": [ - "application/data" - ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", "produces": [ - "text/plain", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Add a file to the filesystem", - "operationId": "diskfs-3-put-file", + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", "parameters": [ { "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Remove a file from the filesystem", - "produces": [ - "text/plain" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Remove a file from the filesystem", - "operationId": "diskfs-3-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", + "description": "Name of the filesystem", + "name": "name", "in": "path", "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, - "/api/v3/fs/mem": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", - "produces": [ - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "List all files on the memory filesystem", - "operationId": "memfs-3-list-files", - "parameters": [ { "type": "string", "description": "glob pattern for file names", @@ -557,24 +390,28 @@ const docTemplate = `{ } } }, - "/api/v3/fs/mem/{path}": { + "/api/v3/fs/{name}/{path}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the memory filesystem", + "description": "Fetch a file from a filesystem", "produces": [ "application/data", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-3-get-file", + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", @@ -610,7 +447,7 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Writes or overwrites a file on the memory filesystem", + "description": "Writes or overwrites a file on a filesystem", "consumes": [ "application/data" ], @@ -618,12 +455,16 @@ const docTemplate = `{ "text/plain", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-3-put-file", + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", @@ -671,85 +512,37 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Remove a file from the memory filesystem", + "description": "Remove a file from a filesystem", "produces": [ "text/plain" ], - "tags": [ - "v16.7.2" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-3-delete-file", + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", "parameters": [ { "type": "string", - "description": "Path to file", - "name": "path", + "description": "Name of the filesystem", + "name": "name", "in": "path", "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "patch": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Create a link to a file in the memory filesystem. The file linked to has to exist.", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Create a link to a file in the memory filesystem", - "operationId": "memfs-3-patch", - "parameters": [ { "type": "string", "description": "Path to file", "name": "path", "in": "path", "required": true - }, - { - "description": "Path to the file to link to", - "name": "url", - "in": "body", - "required": true, - "schema": { - "type": "string" - } } ], "responses": { - "201": { - "description": "Created", + "200": { + "description": "OK", "schema": { "type": "string" } }, - "400": { - "description": "Bad Request", + "404": { + "description": "Not Found", "schema": { "$ref": "#/definitions/api.Error" } @@ -2132,140 +1925,6 @@ const docTemplate = `{ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2299,60 +1958,20 @@ const docTemplate = `{ "type": "string" } } - } - } - }, - "/profiling": { - "get": { - "description": "Retrieve profiling data from the application", - "produces": [ - "text/html" - ], - "summary": "Retrieve profiling data from the application", - "operationId": "profiling", - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "type": "string" - } - } - } - } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } + } + } + }, + "/profiling": { + "get": { + "description": "Retrieve profiling data from the application", + "produces": [ + "text/html" ], + "summary": "Retrieve profiling data from the application", + "operationId": "profiling", "responses": { "200": { "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", "schema": { "type": "string" } @@ -2360,7 +1979,7 @@ const docTemplate = `{ "404": { "description": "Not Found", "schema": { - "$ref": "#/definitions/api.Error" + "type": "string" } } } @@ -2372,19 +1991,23 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2399,7 +2022,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2407,7 +2031,8 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2470,29 +2095,6 @@ const docTemplate = `{ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2586,6 +2188,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2600,7 +2203,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2656,15 +2264,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2699,7 +2310,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2720,11 +2332,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2738,10 +2352,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2815,19 +2431,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2886,13 +2506,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -2917,7 +2540,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2939,7 +2563,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -2948,6 +2573,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -2978,7 +2609,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2995,7 +2627,8 @@ const docTemplate = `{ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -3012,13 +2645,29 @@ const docTemplate = `{ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" + } + } + }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" } } }, @@ -3100,7 +2749,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3109,7 +2759,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3131,7 +2782,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3140,7 +2792,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3179,20 +2832,24 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3210,10 +2867,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3227,10 +2886,12 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3240,7 +2901,8 @@ const docTemplate = `{ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3285,7 +2947,8 @@ const docTemplate = `{ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3304,10 +2967,12 @@ const docTemplate = `{ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3320,10 +2985,12 @@ const docTemplate = `{ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3333,7 +3000,8 @@ const docTemplate = `{ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3344,7 +3012,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3402,13 +3071,15 @@ const docTemplate = `{ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3452,10 +3123,12 @@ const docTemplate = `{ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3472,10 +3145,12 @@ const docTemplate = `{ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3483,7 +3158,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3512,7 +3188,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3550,7 +3227,8 @@ const docTemplate = `{ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3559,10 +3237,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3574,16 +3254,19 @@ const docTemplate = `{ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3598,14 +3281,16 @@ const docTemplate = `{ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3623,14 +3308,19 @@ const docTemplate = `{ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3645,23 +3335,27 @@ const docTemplate = `{ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3675,20 +3369,24 @@ const docTemplate = `{ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3762,7 +3460,8 @@ const docTemplate = `{ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3771,11 +3470,13 @@ const docTemplate = `{ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3783,11 +3484,13 @@ const docTemplate = `{ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3795,11 +3498,13 @@ const docTemplate = `{ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3807,79 +3512,98 @@ const docTemplate = `{ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -3887,75 +3611,93 @@ const docTemplate = `{ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3971,13 +3713,16 @@ const docTemplate = `{ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -4006,13 +3751,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4020,13 +3768,16 @@ const docTemplate = `{ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4067,10 +3818,12 @@ const docTemplate = `{ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4096,13 +3849,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4214,6 +3970,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4228,7 +3985,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4284,15 +4046,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4327,7 +4092,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4348,11 +4114,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4366,10 +4134,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4443,19 +4213,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4514,13 +4288,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4545,7 +4322,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4567,7 +4345,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4576,6 +4355,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -4606,7 +4391,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4863,16 +4649,41 @@ const docTemplate = `{ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" } } }, + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, "value.Auth0Tenant": { "type": "object", "properties": { @@ -4892,6 +4703,49 @@ const docTemplate = `{ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.json b/docs/swagger.json index ae470126..75d15a44 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -221,7 +221,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -284,9 +284,9 @@ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" ], "tags": [ "v16.7.2" @@ -303,220 +303,53 @@ } } }, - "/api/v3/fs/disk": { + "/api/v3/fs": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "description": "Listall registered filesystems", "produces": [ "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "List all files on the filesystem", - "operationId": "diskfs-3-list-files", - "parameters": [ - { - "type": "string", - "description": "glob pattern for file names", - "name": "glob", - "in": "query" - }, - { - "type": "string", - "description": "none, name, size, lastmod", - "name": "sort", - "in": "query" - }, - { - "type": "string", - "description": "asc, desc", - "name": "order", - "in": "query" - } - ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { - "$ref": "#/definitions/api.FileInfo" + "$ref": "#/definitions/api.FilesystemInfo" } } } } } }, - "/api/v3/fs/disk/{path}": { + "/api/v3/fs/{name}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the filesystem. The contents of that file are returned.", - "produces": [ - "application/data", - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-3-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Writes or overwrites a file on the filesystem", - "consumes": [ - "application/data" - ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", "produces": [ - "text/plain", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Add a file to the filesystem", - "operationId": "diskfs-3-put-file", + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", "parameters": [ { "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Remove a file from the filesystem", - "produces": [ - "text/plain" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Remove a file from the filesystem", - "operationId": "diskfs-3-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", + "description": "Name of the filesystem", + "name": "name", "in": "path", "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, - "/api/v3/fs/mem": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", - "produces": [ - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "List all files on the memory filesystem", - "operationId": "memfs-3-list-files", - "parameters": [ { "type": "string", "description": "glob pattern for file names", @@ -549,24 +382,28 @@ } } }, - "/api/v3/fs/mem/{path}": { + "/api/v3/fs/{name}/{path}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the memory filesystem", + "description": "Fetch a file from a filesystem", "produces": [ "application/data", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-3-get-file", + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", @@ -602,7 +439,7 @@ "ApiKeyAuth": [] } ], - "description": "Writes or overwrites a file on the memory filesystem", + "description": "Writes or overwrites a file on a filesystem", "consumes": [ "application/data" ], @@ -610,12 +447,16 @@ "text/plain", "application/json" ], - "tags": [ - "v16.7.2" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-3-put-file", + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", @@ -663,85 +504,37 @@ "ApiKeyAuth": [] } ], - "description": "Remove a file from the memory filesystem", + "description": "Remove a file from a filesystem", "produces": [ "text/plain" ], - "tags": [ - "v16.7.2" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-3-delete-file", + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", "parameters": [ { "type": "string", - "description": "Path to file", - "name": "path", + "description": "Name of the filesystem", + "name": "name", "in": "path", "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "patch": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Create a link to a file in the memory filesystem. The file linked to has to exist.", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "tags": [ - "v16.7.2" - ], - "summary": "Create a link to a file in the memory filesystem", - "operationId": "memfs-3-patch", - "parameters": [ { "type": "string", "description": "Path to file", "name": "path", "in": "path", "required": true - }, - { - "description": "Path to the file to link to", - "name": "url", - "in": "body", - "required": true, - "schema": { - "type": "string" - } } ], "responses": { - "201": { - "description": "Created", + "200": { + "description": "OK", "schema": { "type": "string" } }, - "400": { - "description": "Bad Request", + "404": { + "description": "Not Found", "schema": { "$ref": "#/definitions/api.Error" } @@ -2124,140 +1917,6 @@ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2291,60 +1950,20 @@ "type": "string" } } - } - } - }, - "/profiling": { - "get": { - "description": "Retrieve profiling data from the application", - "produces": [ - "text/html" - ], - "summary": "Retrieve profiling data from the application", - "operationId": "profiling", - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "type": "string" - } - } - } - } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } + } + } + }, + "/profiling": { + "get": { + "description": "Retrieve profiling data from the application", + "produces": [ + "text/html" ], + "summary": "Retrieve profiling data from the application", + "operationId": "profiling", "responses": { "200": { "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", "schema": { "type": "string" } @@ -2352,7 +1971,7 @@ "404": { "description": "Not Found", "schema": { - "$ref": "#/definitions/api.Error" + "type": "string" } } } @@ -2364,19 +1983,23 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2391,7 +2014,8 @@ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2399,7 +2023,8 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2462,29 +2087,6 @@ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2578,6 +2180,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2592,7 +2195,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2648,15 +2256,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2691,7 +2302,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2712,11 +2324,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2730,10 +2344,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2807,19 +2423,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2878,13 +2498,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -2909,7 +2532,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2931,7 +2555,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -2940,6 +2565,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -2970,7 +2601,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2987,7 +2619,8 @@ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -3004,13 +2637,29 @@ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" + } + } + }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" } } }, @@ -3092,7 +2741,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3101,7 +2751,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3123,7 +2774,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3132,7 +2784,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3171,20 +2824,24 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3202,10 +2859,12 @@ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3219,10 +2878,12 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3232,7 +2893,8 @@ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3277,7 +2939,8 @@ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3296,10 +2959,12 @@ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3312,10 +2977,12 @@ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3325,7 +2992,8 @@ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3336,7 +3004,8 @@ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3394,13 +3063,15 @@ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3444,10 +3115,12 @@ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3464,10 +3137,12 @@ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3475,7 +3150,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3504,7 +3180,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3542,7 +3219,8 @@ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3551,10 +3229,12 @@ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3566,16 +3246,19 @@ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3590,14 +3273,16 @@ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3615,14 +3300,19 @@ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3637,23 +3327,27 @@ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3667,20 +3361,24 @@ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3754,7 +3452,8 @@ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3763,11 +3462,13 @@ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3775,11 +3476,13 @@ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3787,11 +3490,13 @@ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3799,79 +3504,98 @@ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -3879,75 +3603,93 @@ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3963,13 +3705,16 @@ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -3998,13 +3743,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4012,13 +3760,16 @@ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4059,10 +3810,12 @@ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4088,13 +3841,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4206,6 +3962,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4220,7 +3977,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4276,15 +4038,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4319,7 +4084,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4340,11 +4106,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4358,10 +4126,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4435,19 +4205,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4506,13 +4280,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4537,7 +4314,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4559,7 +4337,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4568,6 +4347,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -4598,7 +4383,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4855,16 +4641,41 @@ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" } } }, + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, "value.Auth0Tenant": { "type": "object", "properties": { @@ -4884,6 +4695,49 @@ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index a11c50c6..d735b7c8 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -3,14 +3,18 @@ definitions: api.AVstream: properties: aqueue: + format: uint64 type: integer drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -21,11 +25,13 @@ definitions: output: $ref: '#/definitions/api.AVstreamIO' queue: + format: uint64 type: integer type: object api.AVstreamIO: properties: packet: + format: uint64 type: integer size_kb: type: integer @@ -68,21 +74,6 @@ definitions: required: - command type: object - api.Config: - properties: - config: - $ref: '#/definitions/api.ConfigData' - created_at: - type: string - loaded_at: - type: string - overrides: - items: - type: string - type: array - updated_at: - type: string - type: object api.ConfigData: properties: address: @@ -143,6 +134,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -152,6 +144,10 @@ definitions: debug: properties: force_gc: + format: int + type: integer + memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -188,11 +184,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -217,6 +216,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -231,9 +231,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -243,8 +245,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -293,14 +297,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -339,10 +347,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -359,6 +370,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -373,12 +385,17 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -398,6 +415,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.ConfigError: @@ -409,6 +427,7 @@ definitions: api.Error: properties: code: + format: int type: integer details: items: @@ -420,12 +439,23 @@ definitions: api.FileInfo: properties: last_modified: + format: int64 type: integer name: type: string size_bytes: + format: int64 type: integer type: object + api.FilesystemInfo: + properties: + mount: + type: string + name: + type: string + type: + type: string + type: object api.GraphQuery: properties: query: @@ -478,12 +508,14 @@ definitions: api.MetricsQuery: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsQueryMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsQueryMetric: @@ -498,12 +530,14 @@ definitions: api.MetricsResponse: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsResponseMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsResponseMetric: @@ -529,15 +563,19 @@ definitions: api.PlayoutStatus: properties: aqueue: + format: uint64 type: integer debug: {} drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -550,8 +588,10 @@ definitions: output: $ref: '#/definitions/api.PlayoutStatusIO' queue: + format: uint64 type: integer stream: + format: uint64 type: integer swap: $ref: '#/definitions/api.PlayoutStatusSwap' @@ -561,8 +601,10 @@ definitions: api.PlayoutStatusIO: properties: packet: + format: uint64 type: integer size_kb: + format: uint64 type: integer state: enum: @@ -570,6 +612,7 @@ definitions: - idle type: string time: + format: uint64 type: integer type: object api.PlayoutStatusSwap: @@ -599,6 +642,7 @@ definitions: bitrate_kbps: type: number channels: + format: uint64 type: integer codec: type: string @@ -612,8 +656,10 @@ definitions: description: video type: number height: + format: uint64 type: integer index: + format: uint64 type: integer language: type: string @@ -623,8 +669,10 @@ definitions: type: string sampling_hz: description: audio + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string @@ -632,6 +680,7 @@ definitions: description: common type: string width: + format: uint64 type: integer type: object api.Process: @@ -639,6 +688,7 @@ definitions: config: $ref: '#/definitions/api.ProcessConfig' created_at: + format: int64 type: integer id: type: string @@ -675,10 +725,12 @@ definitions: reconnect: type: boolean reconnect_delay_seconds: + format: uint64 type: integer reference: type: string stale_timeout_seconds: + format: uint64 type: integer type: enum: @@ -709,8 +761,10 @@ definitions: api.ProcessConfigIOCleanup: properties: max_file_age_seconds: + format: uint type: integer max_files: + format: uint type: integer pattern: type: string @@ -724,13 +778,16 @@ definitions: cpu_usage: type: number memory_mbytes: + format: uint64 type: integer waitfor_seconds: + format: uint64 type: integer type: object api.ProcessReport: properties: created_at: + format: int64 type: integer history: items: @@ -750,6 +807,7 @@ definitions: api.ProcessReportHistoryEntry: properties: created_at: + format: int64 type: integer log: items: @@ -775,14 +833,17 @@ definitions: last_logline: type: string memory_bytes: + format: uint64 type: integer order: type: string progress: $ref: '#/definitions/api.Progress' reconnect_seconds: + format: int64 type: integer runtime_seconds: + format: int64 type: integer type: object api.Progress: @@ -791,12 +852,15 @@ definitions: description: kbit/s type: number drop: + format: uint64 type: integer dup: + format: uint64 type: integer fps: type: number frame: + format: uint64 type: integer inputs: items: @@ -807,11 +871,13 @@ definitions: $ref: '#/definitions/api.ProgressIO' type: array packet: + format: uint64 type: integer q: type: number size_kb: description: kbytes + format: uint64 type: integer speed: type: number @@ -823,12 +889,14 @@ definitions: address: type: string avstream: - $ref: '#/definitions/api.AVstream' + allOf: + - $ref: '#/definitions/api.AVstream' description: avstream bitrate_kbit: description: kbit/s type: number channels: + format: uint64 type: integer codec: type: string @@ -839,17 +907,21 @@ definitions: fps: type: number frame: + format: uint64 type: integer height: + format: uint64 type: integer id: type: string index: description: General + format: uint64 type: integer layout: type: string packet: + format: uint64 type: integer pix_fmt: description: Video @@ -860,15 +932,19 @@ definitions: type: number sampling_hz: description: Audio + format: uint64 type: integer size_kb: description: kbytes + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string width: + format: uint64 type: integer type: object api.RTMPChannel: @@ -917,34 +993,41 @@ definitions: type: string type: array ts: + format: int64 type: integer type: object api.SRTStatistics: properties: avail_recv_buf_bytes: description: The available space in the receiver's buffer, in bytes + format: uint64 type: integer avail_send_buf_bytes: description: The available space in the sender's buffer, in bytes + format: uint64 type: integer bandwidth_mbit: description: Estimated bandwidth of the network link, in Mbps type: number flight_size_pkt: description: The number of packets in flight + format: uint64 type: integer flow_window_pkt: description: The maximum number of packets that can be "in flight" + format: uint64 type: integer max_bandwidth_mbit: description: Transmission bandwidth limit, in Mbps type: number mss_bytes: description: Maximum Segment Size (MSS), in bytes + format: uint64 type: integer pkt_recv_avg_belated_time_ms: description: Accumulated difference between the current time and the time-to-play of a packet that is received late + format: uint64 type: integer pkt_send_period_us: description: Current minimum time interval between which consecutive packets @@ -952,79 +1035,98 @@ definitions: type: number recv_ack_pkt: description: The total number of received ACK (Acknowledgement) control packets + format: uint64 type: integer recv_buf_bytes: description: Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer recv_buf_ms: description: The timespan (msec) of acknowledged packets in the receiver's buffer + format: uint64 type: integer recv_buf_pkt: description: The number of acknowledged packets in receiver's buffer + format: uint64 type: integer recv_bytes: description: Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_bytes: description: Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_pkt: description: The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + format: uint64 type: integer recv_km_pkt: description: The total number of received KM (Key Material) control packets + format: uint64 type: integer recv_loss_bytes: description: Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + format: uint64 type: integer recv_loss_pkt: description: The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + format: uint64 type: integer recv_nak_pkt: description: The total number of received NAK (Negative Acknowledgement) control packets + format: uint64 type: integer recv_pkt: description: The total number of received DATA packets, including retransmitted packets + format: uint64 type: integer recv_retran_pkts: description: The total number of retransmitted packets registered at the receiver side + format: uint64 type: integer recv_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + format: uint64 type: integer recv_undecrypt_bytes: description: Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_undecrypt_pkt: description: The total number of packets that failed to be decrypted at the receiver side + format: uint64 type: integer recv_unique_bytes: description: Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_unique_pkt: description: The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + format: uint64 type: integer reorder_tolerance_pkt: description: Instant value of the packet reorder tolerance + format: uint64 type: integer rtt_ms: description: Smoothed round-trip time (SRTT), an exponentially-weighted moving @@ -1033,71 +1135,89 @@ definitions: send_buf_bytes: description: Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer send_buf_ms: description: The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + format: uint64 type: integer send_buf_pkt: description: The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + format: uint64 type: integer send_drop_bytes: description: Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer send_drop_pkt: description: The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + format: uint64 type: integer send_duration_us: description: The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + format: uint64 type: integer send_km_pkt: description: The total number of sent KM (Key Material) control packets + format: uint64 type: integer send_loss_pkt: description: The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + format: uint64 type: integer send_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value of the peer + format: uint64 type: integer sent_ack_pkt: description: The total number of sent ACK (Acknowledgement) control packets + format: uint64 type: integer sent_bytes: description: Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_nak_pkt: description: The total number of sent NAK (Negative Acknowledgement) control packets + format: uint64 type: integer sent_pkt: description: The total number of sent DATA packets, including retransmitted packets + format: uint64 type: integer sent_retrans_bytes: description: Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_retrans_pkt: description: The total number of retransmitted packets sent by the SRT sender + format: uint64 type: integer sent_unique_bytes: description: Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_unique_pkt: description: The total number of unique DATA packets sent by the SRT sender + format: uint64 type: integer timestamp_ms: description: The time elapsed, in milliseconds, since the SRT socket has been created + format: uint64 type: integer type: object api.Session: @@ -1109,10 +1229,13 @@ definitions: description: kbit/s type: number bytes_rx: + format: uint64 type: integer bytes_tx: + format: uint64 type: integer created_at: + format: int64 type: integer extra: type: string @@ -1132,19 +1255,25 @@ definitions: $ref: '#/definitions/api.SessionStats' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionStats: properties: sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionSummary: @@ -1173,8 +1302,10 @@ definitions: description: mbit/s type: number max_sessions: + format: uint64 type: integer sessions: + format: uint64 type: integer type: object api.SessionSummarySummary: @@ -1192,10 +1323,13 @@ definitions: $ref: '#/definitions/api.SessionPeers' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionsActive: @@ -1268,6 +1402,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -1277,6 +1412,10 @@ definitions: debug: properties: force_gc: + format: int + type: integer + memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -1313,11 +1452,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -1342,6 +1484,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -1356,9 +1499,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -1368,8 +1513,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -1418,14 +1565,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -1464,10 +1615,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -1484,6 +1638,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -1498,12 +1653,17 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -1523,6 +1683,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.Skills: @@ -1689,12 +1850,29 @@ definitions: api.WidgetProcess: properties: current_sessions: + format: uint64 type: integer total_sessions: + format: uint64 type: integer uptime: type: integer type: object + github_com_datarhei_core_v16_http_api.Config: + properties: + config: + $ref: '#/definitions/api.ConfigData' + created_at: + type: string + loaded_at: + type: string + overrides: + items: + type: string + type: array + updated_at: + type: string + type: object value.Auth0Tenant: properties: audience: @@ -1708,6 +1886,34 @@ definitions: type: string type: array type: object + value.S3Storage: + properties: + access_key_id: + type: string + auth: + properties: + enable: + type: boolean + password: + type: string + username: + type: string + type: object + bucket: + type: string + endpoint: + type: string + mountpoint: + type: string + name: + type: string + region: + type: string + secret_access_key: + type: string + use_ssl: + type: boolean + type: object info: contact: email: hello@datarhei.com @@ -1720,34 +1926,6 @@ info: title: datarhei Core API version: "3.0" paths: - /{path}: - get: - description: Fetch a file from the filesystem. If the file is a directory, a - index.html is returned, if it exists. - operationId: diskfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the filesystem /api: get: description: API version and build infos in case auth is valid or not required. @@ -1876,7 +2054,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/api.Config' + $ref: '#/definitions/github_com_datarhei_core_v16_http_api.Config' security: - ApiKeyAuth: [] summary: Retrieve the currently active Restreamer configuration @@ -1918,10 +2096,10 @@ paths: /api/v3/config/reload: get: description: Reload the currently active configuration. This will trigger a - restart of the Restreamer. + restart of the Core. operationId: config-3-reload produces: - - text/plain + - application/json responses: "200": description: OK @@ -1932,24 +2110,10 @@ paths: summary: Reload the currently active configuration tags: - v16.7.2 - /api/v3/fs/disk: + /api/v3/fs: get: - description: List all files on the filesystem. The listing can be ordered by - name, size, or date of last modification in ascending or descending order. - operationId: diskfs-3-list-files - parameters: - - description: glob pattern for file names - in: query - name: glob - type: string - - description: none, name, size, lastmod - in: query - name: sort - type: string - - description: asc, desc - in: query - name: order - type: string + description: Listall registered filesystems + operationId: filesystem-3-list produces: - application/json responses: @@ -1957,116 +2121,22 @@ paths: description: OK schema: items: - $ref: '#/definitions/api.FileInfo' + $ref: '#/definitions/api.FilesystemInfo' type: array security: - ApiKeyAuth: [] - summary: List all files on the filesystem - tags: - - v16.7.2 - /api/v3/fs/disk/{path}: - delete: - description: Remove a file from the filesystem - operationId: diskfs-3-delete-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - text/plain - responses: - "200": - description: OK - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Remove a file from the filesystem - tags: - - v16.7.2 + summary: List all registered filesystems + /api/v3/fs/{name}: get: - description: Fetch a file from the filesystem. The contents of that file are - returned. - operationId: diskfs-3-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Fetch a file from the filesystem - tags: - - v16.7.2 - put: - consumes: - - application/data - description: Writes or overwrites a file on the filesystem - operationId: diskfs-3-put-file + description: List all files on a filesystem. The listing can be ordered by name, + size, or date of last modification in ascending or descending order. + operationId: filesystem-3-list-files parameters: - - description: Path to file + - description: Name of the filesystem in: path - name: path + name: name required: true type: string - - description: File data - in: body - name: data - required: true - schema: - items: - type: integer - type: array - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "204": - description: No Content - schema: - type: string - "507": - description: Insufficient Storage - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Add a file to the filesystem - tags: - - v16.7.2 - /api/v3/fs/mem: - get: - description: List all files on the memory filesystem. The listing can be ordered - by name, size, or date of last modification in ascending or descending order. - operationId: memfs-3-list-files - parameters: - description: glob pattern for file names in: query name: glob @@ -2090,14 +2160,17 @@ paths: type: array security: - ApiKeyAuth: [] - summary: List all files on the memory filesystem - tags: - - v16.7.2 - /api/v3/fs/mem/{path}: + summary: List all files on a filesystem + /api/v3/fs/{name}/{path}: delete: - description: Remove a file from the memory filesystem - operationId: memfs-3-delete-file + description: Remove a file from a filesystem + operationId: filesystem-3-delete-file parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string - description: Path to file in: path name: path @@ -2116,13 +2189,16 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Remove a file from the memory filesystem - tags: - - v16.7.2 + summary: Remove a file from a filesystem get: - description: Fetch a file from the memory filesystem - operationId: memfs-3-get-file + description: Fetch a file from a filesystem + operationId: filesystem-3-get-file parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string - description: Path to file in: path name: path @@ -2146,50 +2222,18 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Fetch a file from the memory filesystem - tags: - - v16.7.2 - patch: + summary: Fetch a file from a filesystem + put: consumes: - application/data - description: Create a link to a file in the memory filesystem. The file linked - to has to exist. - operationId: memfs-3-patch + description: Writes or overwrites a file on a filesystem + operationId: filesystem-3-put-file parameters: - - description: Path to file + - description: Name of the filesystem in: path - name: path + name: name required: true type: string - - description: Path to the file to link to - in: body - name: url - required: true - schema: - type: string - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Create a link to a file in the memory filesystem - tags: - - v16.7.2 - put: - consumes: - - application/data - description: Writes or overwrites a file on the memory filesystem - operationId: memfs-3-put-file - parameters: - description: Path to file in: path name: path @@ -2221,9 +2265,7 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Add a file to the memory filesystem - tags: - - v16.7.2 + summary: Add a file to a filesystem /api/v3/log: get: description: Get the last log lines of the Restreamer application @@ -3133,94 +3175,6 @@ paths: summary: Fetch minimal statistics about a process tags: - v16.7.2 - /memfs/{path}: - delete: - description: Remove a file from the memory filesystem - operationId: memfs-delete-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - text/plain - responses: - "200": - description: OK - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Remove a file from the memory filesystem - get: - description: Fetch a file from the memory filesystem - operationId: memfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the memory filesystem - put: - consumes: - - application/data - description: Writes or overwrites a file on the memory filesystem - operationId: memfs-put-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - - description: File data - in: body - name: data - required: true - schema: - items: - type: integer - type: array - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "204": - description: No Content - schema: - type: string - "507": - description: Insufficient Storage - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Add a file to the memory filesystem /metrics: get: description: Prometheus metrics diff --git a/glob/glob.go b/glob/glob.go index 690daf61..89b57f00 100644 --- a/glob/glob.go +++ b/glob/glob.go @@ -4,6 +4,9 @@ import ( "github.com/gobwas/glob" ) +// Match returns whether the name matches the glob pattern, also considering +// one or several optionnal separator. An error is only returned if the pattern +// is invalid. func Match(pattern, name string, separators ...rune) (bool, error) { g, err := glob.Compile(pattern, separators...) if err != nil { diff --git a/go.mod b/go.mod index ae0e3c5f..e13a2ff6 100644 --- a/go.mod +++ b/go.mod @@ -11,22 +11,25 @@ require ( github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759 github.com/go-playground/validator/v10 v10.11.1 github.com/gobwas/glob v0.2.3 - github.com/golang-jwt/jwt/v4 v4.4.2 + github.com/golang-jwt/jwt/v4 v4.4.3 github.com/google/uuid v1.3.0 github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.4.0 github.com/labstack/echo/v4 v4.9.1 github.com/lithammer/shortuuid/v4 v4.0.0 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.17 + github.com/minio/minio-go/v7 v7.0.47 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 - github.com/prometheus/client_golang v1.13.1 - github.com/shirou/gopsutil/v3 v3.22.10 + github.com/prometheus/client_golang v1.14.0 + github.com/shirou/gopsutil/v3 v3.22.11 github.com/stretchr/testify v1.8.1 github.com/swaggo/echo-swagger v1.3.5 github.com/swaggo/swag v1.8.7 github.com/vektah/gqlparser/v2 v2.5.1 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/mod v0.6.0 + go.uber.org/zap v1.24.0 + golang.org/x/mod v0.7.0 + golang.org/x/net v0.7.0 ) require ( @@ -34,13 +37,14 @@ require ( github.com/agnivade/levenshtein v1.1.1 // indirect github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/spec v0.20.8 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect @@ -50,7 +54,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.1.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/libdns/libdns v0.2.1 // indirect @@ -60,16 +66,24 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mholt/acmez v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rs/xid v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect github.com/urfave/cli/v2 v2.8.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect @@ -78,14 +92,14 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.23.0 // indirect - golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.1.0 // indirect - golang.org/x/sys v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/time v0.1.0 // indirect - golang.org/x/tools v0.2.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.4.0 // indirect google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 8712f523..788ba6ff 100644 --- a/go.sum +++ b/go.sum @@ -1,41 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbpJMw= github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= @@ -46,11 +12,6 @@ github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaW github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= @@ -61,21 +22,12 @@ github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLj github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE= github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -89,22 +41,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -114,8 +53,8 @@ github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/a github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= -github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= @@ -128,105 +67,48 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= +github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA= github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid/v2 v2.1.2 h1:XhdX4fqAJUA0yj+kUwMavO0hHrSPAecYdYf1ZmxHvak= -github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -261,25 +143,29 @@ github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80= github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs= +github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -287,66 +173,47 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c= -github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZpPoVg= -github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk= +github.com/shirou/gopsutil/v3 v3.22.11 h1:kxsPKS+Eeo+VnEQ2XCaGJepeP6KY53QoRTETx3+1ndM= +github.com/shirou/gopsutil/v3 v3.22.11/go.mod h1:xl0EeL4vXJ+hQMAGN8B9VFpxukEMA0XdevQOe5MZ1oY= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -363,11 +230,10 @@ github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9J github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ= github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU= github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A= -github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= @@ -387,180 +253,69 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -569,165 +324,49 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -735,13 +374,12 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -749,13 +387,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/http/api/avstream.go b/http/api/avstream.go index 0a9c5044..279b3352 100644 --- a/http/api/avstream.go +++ b/http/api/avstream.go @@ -6,7 +6,7 @@ import ( type AVstreamIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet" format:"uint64"` Time uint64 `json:"time"` Size uint64 `json:"size_kb"` } @@ -25,11 +25,11 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) { type AVstream struct { Input AVstreamIO `json:"input"` Output AVstreamIO `json:"output"` - Aqueue uint64 `json:"aqueue"` - Queue uint64 `json:"queue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Aqueue uint64 `json:"aqueue" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/error.go b/http/api/error.go index 07477568..a87ef95a 100644 --- a/http/api/error.go +++ b/http/api/error.go @@ -8,7 +8,7 @@ import ( // Error represents an error response of the API type Error struct { - Code int `json:"code" jsonschema:"required"` + Code int `json:"code" jsonschema:"required" format:"int"` Message string `json:"message" jsonschema:""` Details []string `json:"details" jsonschema:""` } diff --git a/http/api/fs.go b/http/api/fs.go index c7d12eb4..84535bcc 100644 --- a/http/api/fs.go +++ b/http/api/fs.go @@ -3,6 +3,13 @@ package api // FileInfo represents informatiion about a file on a filesystem type FileInfo struct { Name string `json:"name" jsonschema:"minLength=1"` - Size int64 `json:"size_bytes" jsonschema:"minimum=0"` - LastMod int64 `json:"last_modified" jsonschema:"minimum=0"` + Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"` + LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"` +} + +// FilesystemInfo represents information about a filesystem +type FilesystemInfo struct { + Name string `json:"name"` + Type string `json:"type"` + Mount string `json:"mount"` } diff --git a/http/api/metrics.go b/http/api/metrics.go index 49b184f9..f2476988 100644 --- a/http/api/metrics.go +++ b/http/api/metrics.go @@ -19,8 +19,8 @@ type MetricsQueryMetric struct { } type MetricsQuery struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsQueryMetric `json:"metrics"` } @@ -51,8 +51,8 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) { } type MetricsResponse struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsResponseMetric `json:"metrics"` } diff --git a/http/api/playout.go b/http/api/playout.go index 22a192d4..ae2b0b9d 100644 --- a/http/api/playout.go +++ b/http/api/playout.go @@ -4,9 +4,9 @@ import "github.com/datarhei/core/v16/playout" type PlayoutStatusIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` - Time uint64 `json:"time"` - Size uint64 `json:"size_kb"` + Packet uint64 `json:"packet" format:"uint64"` + Time uint64 `json:"time" format:"uint64"` + Size uint64 `json:"size_kb" format:"uint64"` } func (i *PlayoutStatusIO) Unmarshal(io playout.StatusIO) { @@ -33,12 +33,12 @@ func (s *PlayoutStatusSwap) Unmarshal(swap playout.StatusSwap) { type PlayoutStatus struct { ID string `json:"id"` Address string `json:"url"` - Stream uint64 `json:"stream"` - Queue uint64 `json:"queue"` - AQueue uint64 `json:"aqueue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Stream uint64 `json:"stream" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + AQueue uint64 `json:"aqueue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/probe.go b/http/api/probe.go index 3c538dcc..dda8b260 100644 --- a/http/api/probe.go +++ b/http/api/probe.go @@ -11,8 +11,8 @@ type ProbeIO struct { // common Address string `json:"url"` Format string `json:"format"` - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` Language string `json:"language"` Type string `json:"type"` Codec string `json:"codec"` @@ -23,13 +23,13 @@ type ProbeIO struct { // video FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Pixfmt string `json:"pix_fmt"` - Width uint64 `json:"width"` - Height uint64 `json:"height"` + Width uint64 `json:"width" format:"uint64"` + Height uint64 `json:"height" format:"uint64"` // audio - Sampling uint64 `json:"sampling_hz"` + Sampling uint64 `json:"sampling_hz" format:"uint64"` Layout string `json:"layout"` - Channels uint64 `json:"channels"` + Channels uint64 `json:"channels" format:"uint64"` } func (i *ProbeIO) Unmarshal(io *app.ProbeIO) { diff --git a/http/api/process.go b/http/api/process.go index 7365e176..e217b455 100644 --- a/http/api/process.go +++ b/http/api/process.go @@ -13,7 +13,7 @@ type Process struct { ID string `json:"id" jsonschema:"minLength=1"` Type string `json:"type" jsonschema:"enum=ffmpeg"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at" jsonschema:"minimum=0"` + CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"` Config *ProcessConfig `json:"config,omitempty"` State *ProcessState `json:"state,omitempty"` Report *ProcessReport `json:"report,omitempty"` @@ -30,15 +30,15 @@ type ProcessConfigIO struct { type ProcessConfigIOCleanup struct { Pattern string `json:"pattern" validate:"required"` - MaxFiles uint `json:"max_files"` - MaxFileAge uint `json:"max_file_age_seconds"` + MaxFiles uint `json:"max_files" format:"uint"` + MaxFileAge uint `json:"max_file_age_seconds" format:"uint"` PurgeOnDelete bool `json:"purge_on_delete"` } type ProcessConfigLimits struct { CPU float64 `json:"cpu_usage" jsonschema:"minimum=0,maximum=100"` - Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0"` - WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0"` + Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"` + WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"` } // ProcessConfig represents the configuration of an ffmpeg process @@ -50,9 +50,9 @@ type ProcessConfig struct { Output []ProcessConfigIO `json:"output" validate:"required"` Options []string `json:"options"` Reconnect bool `json:"reconnect"` - ReconnectDelay uint64 `json:"reconnect_delay_seconds"` + ReconnectDelay uint64 `json:"reconnect_delay_seconds" format:"uint64"` Autostart bool `json:"autostart"` - StaleTimeout uint64 `json:"stale_timeout_seconds"` + StaleTimeout uint64 `json:"stale_timeout_seconds" format:"uint64"` Limits ProcessConfigLimits `json:"limits"` } @@ -188,7 +188,7 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) { // ProcessReportHistoryEntry represents the logs of a run of a restream process type ProcessReportHistoryEntry struct { - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Prelude []string `json:"prelude"` Log [][2]string `json:"log"` } @@ -235,11 +235,11 @@ func (report *ProcessReport) Unmarshal(l *app.Log) { type ProcessState struct { Order string `json:"order" jsonschema:"enum=start,enum=stop"` State string `json:"exec" jsonschema:"enum=finished,enum=starting,enum=running,enum=finishing,enum=killed,enum=failed"` - Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0"` - Reconnect int64 `json:"reconnect_seconds"` + Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0" format:"int64"` + Reconnect int64 `json:"reconnect_seconds" format:"int64"` LastLog string `json:"last_logline"` Progress *Progress `json:"progress"` - Memory uint64 `json:"memory_bytes"` + Memory uint64 `json:"memory_bytes" format:"uint64"` CPU json.Number `json:"cpu_usage" swaggertype:"number" jsonschema:"type=number"` Command []string `json:"command"` } diff --git a/http/api/progress.go b/http/api/progress.go index ed575fc7..a402d55a 100644 --- a/http/api/progress.go +++ b/http/api/progress.go @@ -13,29 +13,29 @@ type ProgressIO struct { Address string `json:"address" jsonschema:"minLength=1"` // General - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` Format string `json:"format"` Type string `json:"type"` Codec string `json:"codec"` Coder string `json:"coder"` - Frame uint64 `json:"frame"` + Frame uint64 `json:"frame" format:"uint64"` FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet" format:"uint64"` PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_kb" format:"uint64"` // kbytes Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s // Video Pixfmt string `json:"pix_fmt,omitempty"` Quantizer json.Number `json:"q,omitempty" swaggertype:"number" jsonschema:"type=number"` - Width uint64 `json:"width,omitempty"` - Height uint64 `json:"height,omitempty"` + Width uint64 `json:"width,omitempty" format:"uint64"` + Height uint64 `json:"height,omitempty" format:"uint64"` // Audio - Sampling uint64 `json:"sampling_hz,omitempty"` + Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"` Layout string `json:"layout,omitempty"` - Channels uint64 `json:"channels,omitempty"` + Channels uint64 `json:"channels,omitempty" format:"uint64"` // avstream AVstream *AVstream `json:"avstream"` @@ -79,16 +79,16 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { type Progress struct { Input []ProgressIO `json:"inputs"` Output []ProgressIO `json:"outputs"` - Frame uint64 `json:"frame"` - Packet uint64 `json:"packet"` + Frame uint64 `json:"frame" format:"uint64"` + Packet uint64 `json:"packet" format:"uint64"` FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Quantizer json.Number `json:"q" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_kb" format:"uint64"` // kbytes Time json.Number `json:"time" swaggertype:"number" jsonschema:"type=number"` Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s Speed json.Number `json:"speed" swaggertype:"number" jsonschema:"type=number"` - Drop uint64 `json:"drop"` - Dup uint64 `json:"dup"` + Drop uint64 `json:"drop" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` } // Unmarshal converts a restreamer Progress to a Progress in API representation diff --git a/http/api/session.go b/http/api/session.go index 8078531a..c616121f 100644 --- a/http/api/session.go +++ b/http/api/session.go @@ -8,9 +8,9 @@ import ( // SessionStats are the accumulated numbers for the session summary type SessionStats struct { - TotalSessions uint64 `json:"sessions"` - TotalRxBytes uint64 `json:"traffic_rx_mb"` - TotalTxBytes uint64 `json:"traffic_tx_mb"` + TotalSessions uint64 `json:"sessions" format:"uint64"` + TotalRxBytes uint64 `json:"traffic_rx_mb" format:"uint64"` + TotalTxBytes uint64 `json:"traffic_tx_mb" format:"uint64"` } // SessionPeers is for the grouping by peers in the summary @@ -24,12 +24,12 @@ type SessionPeers struct { type Session struct { ID string `json:"id"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Location string `json:"local"` Peer string `json:"remote"` Extra string `json:"extra"` - RxBytes uint64 `json:"bytes_rx"` - TxBytes uint64 `json:"bytes_tx"` + RxBytes uint64 `json:"bytes_rx" format:"uint64"` + TxBytes uint64 `json:"bytes_tx" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s TxBitrate json.Number `json:"bandwidth_tx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s } @@ -50,10 +50,10 @@ func (s *Session) Unmarshal(sess session.Session) { // SessionSummaryActive represents the currently active sessions type SessionSummaryActive struct { SessionList []Session `json:"list"` - Sessions uint64 `json:"sessions"` + Sessions uint64 `json:"sessions" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s TxBitrate json.Number `json:"bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s - MaxSessions uint64 `json:"max_sessions"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` MaxRxBitrate json.Number `json:"max_bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s MaxTxBitrate json.Number `json:"max_bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s } diff --git a/http/api/srt.go b/http/api/srt.go index bb31498e..e41b2514 100644 --- a/http/api/srt.go +++ b/http/api/srt.go @@ -8,60 +8,60 @@ import ( // SRTStatistics represents the statistics of a SRT connection type SRTStatistics struct { - MsTimeStamp uint64 `json:"timestamp_ms"` // The time elapsed, in milliseconds, since the SRT socket has been created + MsTimeStamp uint64 `json:"timestamp_ms" format:"uint64"` // The time elapsed, in milliseconds, since the SRT socket has been created // Accumulated - PktSent uint64 `json:"sent_pkt"` // The total number of sent DATA packets, including retransmitted packets - PktRecv uint64 `json:"recv_pkt"` // The total number of received DATA packets, including retransmitted packets - PktSentUnique uint64 `json:"sent_unique_pkt"` // The total number of unique DATA packets sent by the SRT sender - PktRecvUnique uint64 `json:"recv_unique_pkt"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. - PktSndLoss uint64 `json:"send_loss_pkt"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. - PktRcvLoss uint64 `json:"recv_loss_pkt"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side - PktRetrans uint64 `json:"sent_retrans_pkt"` // The total number of retransmitted packets sent by the SRT sender - PktRcvRetrans uint64 `json:"recv_retran_pkts"` // The total number of retransmitted packets registered at the receiver side - PktSentACK uint64 `json:"sent_ack_pkt"` // The total number of sent ACK (Acknowledgement) control packets - PktRecvACK uint64 `json:"recv_ack_pkt"` // The total number of received ACK (Acknowledgement) control packets - PktSentNAK uint64 `json:"sent_nak_pkt"` // The total number of sent NAK (Negative Acknowledgement) control packets - PktRecvNAK uint64 `json:"recv_nak_pkt"` // The total number of received NAK (Negative Acknowledgement) control packets - PktSentKM uint64 `json:"send_km_pkt"` // The total number of sent KM (Key Material) control packets - PktRecvKM uint64 `json:"recv_km_pkt"` // The total number of received KM (Key Material) control packets - UsSndDuration uint64 `json:"send_duration_us"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged - PktSndDrop uint64 `json:"send_drop_pkt"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time - PktRcvDrop uint64 `json:"recv_drop_pkt"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets - PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt"` // The total number of packets that failed to be decrypted at the receiver side - - ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSentUnique uint64 `json:"sent_unique_bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvLoss uint64 `json:"recv_loss_bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size - ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + PktSent uint64 `json:"sent_pkt" format:"uint64"` // The total number of sent DATA packets, including retransmitted packets + PktRecv uint64 `json:"recv_pkt" format:"uint64"` // The total number of received DATA packets, including retransmitted packets + PktSentUnique uint64 `json:"sent_unique_pkt" format:"uint64"` // The total number of unique DATA packets sent by the SRT sender + PktRecvUnique uint64 `json:"recv_unique_pkt" format:"uint64"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + PktSndLoss uint64 `json:"send_loss_pkt" format:"uint64"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + PktRcvLoss uint64 `json:"recv_loss_pkt" format:"uint64"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + PktRetrans uint64 `json:"sent_retrans_pkt" format:"uint64"` // The total number of retransmitted packets sent by the SRT sender + PktRcvRetrans uint64 `json:"recv_retran_pkts" format:"uint64"` // The total number of retransmitted packets registered at the receiver side + PktSentACK uint64 `json:"sent_ack_pkt" format:"uint64"` // The total number of sent ACK (Acknowledgement) control packets + PktRecvACK uint64 `json:"recv_ack_pkt" format:"uint64"` // The total number of received ACK (Acknowledgement) control packets + PktSentNAK uint64 `json:"sent_nak_pkt" format:"uint64"` // The total number of sent NAK (Negative Acknowledgement) control packets + PktRecvNAK uint64 `json:"recv_nak_pkt" format:"uint64"` // The total number of received NAK (Negative Acknowledgement) control packets + PktSentKM uint64 `json:"send_km_pkt" format:"uint64"` // The total number of sent KM (Key Material) control packets + PktRecvKM uint64 `json:"recv_km_pkt" format:"uint64"` // The total number of received KM (Key Material) control packets + UsSndDuration uint64 `json:"send_duration_us" format:"uint64"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + PktSndDrop uint64 `json:"send_drop_pkt" format:"uint64"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + PktRcvDrop uint64 `json:"recv_drop_pkt" format:"uint64"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt" format:"uint64"` // The total number of packets that failed to be decrypted at the receiver side + + ByteSent uint64 `json:"sent_bytes" format:"uint64"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecv uint64 `json:"recv_bytes" format:"uint64"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSentUnique uint64 `json:"sent_unique_bytes" format:"uint64"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecvUnique uint64 `json:"recv_unique_bytes" format:"uint64"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvLoss uint64 `json:"recv_loss_bytes" format:"uint64"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + ByteRetrans uint64 `json:"sent_retrans_bytes" format:"uint64"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSndDrop uint64 `json:"send_drop_bytes" format:"uint64"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvDrop uint64 `json:"recv_drop_bytes" format:"uint64"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes" format:"uint64"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) // Instantaneous - UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds - PktFlowWindow uint64 `json:"flow_window_pkt"` // The maximum number of packets that can be "in flight" - PktFlightSize uint64 `json:"flight_size_pkt"` // The number of packets in flight - MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds - MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps - ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes"` // The available space in the sender's buffer, in bytes - ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes"` // The available space in the receiver's buffer, in bytes - MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps - ByteMSS uint64 `json:"mss_bytes"` // Maximum Segment Size (MSS), in bytes - PktSndBuf uint64 `json:"send_buf_pkt"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged - ByteSndBuf uint64 `json:"send_buf_bytes"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsSndBuf uint64 `json:"send_buf_ms"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) - MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value of the peer - PktRcvBuf uint64 `json:"recv_buf_pkt"` // The number of acknowledged packets in receiver's buffer - ByteRcvBuf uint64 `json:"recv_buf_bytes"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsRcvBuf uint64 `json:"recv_buf_ms"` // The timespan (msec) of acknowledged packets in the receiver's buffer - MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY - PktReorderTolerance uint64 `json:"reorder_tolerance_pkt"` // Instant value of the packet reorder tolerance - PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms"` // Accumulated difference between the current time and the time-to-play of a packet that is received late + UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds + PktFlowWindow uint64 `json:"flow_window_pkt" format:"uint64"` // The maximum number of packets that can be "in flight" + PktFlightSize uint64 `json:"flight_size_pkt" format:"uint64"` // The number of packets in flight + MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds + MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps + ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes" format:"uint64"` // The available space in the sender's buffer, in bytes + ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes" format:"uint64"` // The available space in the receiver's buffer, in bytes + MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps + ByteMSS uint64 `json:"mss_bytes" format:"uint64"` // Maximum Segment Size (MSS), in bytes + PktSndBuf uint64 `json:"send_buf_pkt" format:"uint64"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + ByteSndBuf uint64 `json:"send_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsSndBuf uint64 `json:"send_buf_ms" format:"uint64"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value of the peer + PktRcvBuf uint64 `json:"recv_buf_pkt" format:"uint64"` // The number of acknowledged packets in receiver's buffer + ByteRcvBuf uint64 `json:"recv_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsRcvBuf uint64 `json:"recv_buf_ms" format:"uint64"` // The timespan (msec) of acknowledged packets in the receiver's buffer + MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + PktReorderTolerance uint64 `json:"reorder_tolerance_pkt" format:"uint64"` // Instant value of the packet reorder tolerance + PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms" format:"uint64"` // Accumulated difference between the current time and the time-to-play of a packet that is received late } // Unmarshal converts the SRT statistics into API representation @@ -119,7 +119,7 @@ func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) { } type SRTLog struct { - Timestamp int64 `json:"ts"` + Timestamp int64 `json:"ts" format:"int64"` Message []string `json:"msg"` } diff --git a/http/api/widget.go b/http/api/widget.go index d0f35e6c..5d91bda6 100644 --- a/http/api/widget.go +++ b/http/api/widget.go @@ -1,7 +1,7 @@ package api type WidgetProcess struct { - CurrentSessions uint64 `json:"current_sessions"` - TotalSessions uint64 `json:"total_sessions"` + CurrentSessions uint64 `json:"current_sessions" format:"uint64"` + TotalSessions uint64 `json:"total_sessions" format:"uint64"` Uptime int64 `json:"uptime"` } diff --git a/http/fs/fs.go b/http/fs/fs.go new file mode 100644 index 00000000..500ab733 --- /dev/null +++ b/http/fs/fs.go @@ -0,0 +1,25 @@ +package fs + +import ( + "github.com/datarhei/core/v16/http/cache" + "github.com/datarhei/core/v16/io/fs" +) + +type FS struct { + Name string + Mountpoint string + + AllowWrite bool + + EnableAuth bool + Username string + Password string + + DefaultFile string + DefaultContentType string + Gzip bool + + Filesystem fs.Filesystem + + Cache cache.Cacher +} diff --git a/http/handler/api/config.go b/http/handler/api/config.go index 5e084e0b..d2484de2 100644 --- a/http/handler/api/config.go +++ b/http/handler/api/config.go @@ -3,6 +3,7 @@ package api import ( "io" "net/http" + "time" cfgstore "github.com/datarhei/core/v16/config/store" cfgvars "github.com/datarhei/core/v16/config/vars" @@ -71,6 +72,10 @@ func (p *ConfigHandler) Set(c echo.Context) error { } cfg := p.store.Get() + cfgActive := p.store.GetActive() + + // Copy the timestamp of when this config has been used + cfg.LoadedAt = cfgActive.LoadedAt // For each version, set the current config as default config value. This will // allow to set a partial config without destroying the other values. @@ -119,6 +124,9 @@ func (p *ConfigHandler) Set(c echo.Context) error { return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version) } + cfg.CreatedAt = time.Now() + cfg.UpdatedAt = cfg.CreatedAt + // Now we make a copy from the config and merge it with the environment // variables. If this configuration is valid, we will store the un-merged // one to disk. @@ -157,15 +165,15 @@ func (p *ConfigHandler) Set(c echo.Context) error { // Reload will reload the currently active configuration // @Summary Reload the currently active configuration -// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer. +// @Description Reload the currently active configuration. This will trigger a restart of the Core. // @Tags v16.7.2 // @ID config-3-reload -// @Produce plain -// @Success 200 {string} string "OK" +// @Produce json +// @Success 200 {string} string // @Security ApiKeyAuth // @Router /api/v3/config/reload [get] func (p *ConfigHandler) Reload(c echo.Context) error { p.store.Reload() - return c.String(http.StatusOK, "OK") + return c.JSON(http.StatusOK, "OK") } diff --git a/http/handler/api/config_test.go b/http/handler/api/config_test.go index 77e985c4..0410eaf2 100644 --- a/http/handler/api/config_test.go +++ b/http/handler/api/config_test.go @@ -4,20 +4,32 @@ import ( "bytes" "encoding/json" "net/http" + "strings" "testing" "github.com/datarhei/core/v16/config" "github.com/datarhei/core/v16/config/store" v1 "github.com/datarhei/core/v16/config/v1" "github.com/datarhei/core/v16/http/mock" + "github.com/datarhei/core/v16/io/fs" "github.com/labstack/echo/v4" "github.com/stretchr/testify/require" ) -func getDummyConfigRouter() (*echo.Echo, store.Store) { +func getDummyConfigRouter(t *testing.T) (*echo.Echo, store.Store) { router := mock.DummyEcho() - config := store.NewDummy() + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + _, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + require.NoError(t, err) + + _, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + require.NoError(t, err) + + config, err := store.NewJSON(memfs, "/config.json", nil) + require.NoError(t, err) handler := NewConfig(config) @@ -28,7 +40,7 @@ func getDummyConfigRouter() (*echo.Echo, store.Store) { } func TestConfigGet(t *testing.T) { - router, _ := getDummyConfigRouter() + router, _ := getDummyConfigRouter(t) mock.Request(t, http.StatusOK, router, "GET", "/", nil) @@ -36,18 +48,21 @@ func TestConfigGet(t *testing.T) { } func TestConfigSetConflict(t *testing.T) { - router, _ := getDummyConfigRouter() + router, _ := getDummyConfigRouter(t) + + cfg := config.New(nil) + cfg.Storage.MimeTypes = "/path/to/mime.types" var data bytes.Buffer encoder := json.NewEncoder(&data) - encoder.Encode(config.New()) + encoder.Encode(cfg) mock.Request(t, http.StatusConflict, router, "PUT", "/", &data) } func TestConfigSet(t *testing.T) { - router, store := getDummyConfigRouter() + router, store := getDummyConfigRouter(t) storedcfg := store.Get() @@ -57,11 +72,9 @@ func TestConfigSet(t *testing.T) { encoder := json.NewEncoder(&data) // Setting a new v3 config - cfg := config.New() - cfg.FFmpeg.Binary = "true" + cfg := config.New(nil) cfg.DB.Dir = "." cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"} cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"} cfg.Host.Name = []string{"foobar.com"} @@ -78,11 +91,9 @@ func TestConfigSet(t *testing.T) { require.Equal(t, "cert@datarhei.com", cfg.TLS.Email) // Setting a complete v1 config - cfgv1 := v1.New() - cfgv1.FFmpeg.Binary = "true" + cfgv1 := v1.New(nil) cfgv1.DB.Dir = "." cfgv1.Storage.Disk.Dir = "." - cfgv1.Storage.MimeTypes = "" cfgv1.Storage.Disk.Cache.Types = []string{".bbb"} cfgv1.Host.Name = []string{"foobar.com"} diff --git a/http/handler/api/diskfs.go b/http/handler/api/diskfs.go deleted file mode 100644 index 98156ac9..00000000 --- a/http/handler/api/diskfs.go +++ /dev/null @@ -1,215 +0,0 @@ -package api - -import ( - "net/http" - "path/filepath" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem - handler *handler.DiskFSHandler -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - handler: handler.NewDiskFS(fs, cache), - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. The contents of that file are returned. -// @Tags v16.7.2 -// @ID diskfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ := file.Stat() - - if stat.IsDir() { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the filesystem -// @Description Writes or overwrites a file on the filesystem -// @Tags v16.7.2 -// @ID diskfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [put] -func (h *DiskFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, path) - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the filesystem -// @Description Remove a file from the filesystem -// @Tags v16.7.2 -// @ID diskfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [delete] -func (h *DiskFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - return c.String(http.StatusOK, "OK") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the filesystem -// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @Tags v16.7.2 -// @ID diskfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk [get] -func (h *DiskFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - fileinfos := []api.FileInfo{} - - for _, f := range files { - if f.IsDir() { - continue - } - - fileinfos = append(fileinfos, api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - }) - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/api/filesystems.go b/http/handler/api/filesystems.go new file mode 100644 index 00000000..ce93812b --- /dev/null +++ b/http/handler/api/filesystems.go @@ -0,0 +1,146 @@ +package api + +import ( + "net/http" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/handler" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +type FSConfig struct { + Type string + Mountpoint string + Handler *handler.FSHandler +} + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + filesystems map[string]FSConfig +} + +// NewFS return a new FSHanlder type. You have to provide a filesystem to act on. +func NewFS(filesystems map[string]FSConfig) *FSHandler { + return &FSHandler{ + filesystems: filesystems, + } +} + +// GetFileAPI returns the file at the given path +// @Summary Fetch a file from a filesystem +// @Description Fetch a file from a filesystem +// @ID filesystem-3-get-file +// @Produce application/data +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Success 200 {file} byte +// @Success 301 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [get] +func (h *FSHandler) GetFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.GetFile(c) +} + +// PutFileAPI adds or overwrites a file at the given path +// @Summary Add a file to a filesystem +// @Description Writes or overwrites a file on a filesystem +// @ID filesystem-3-put-file +// @Accept application/data +// @Produce text/plain +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Param data body []byte true "File data" +// @Success 201 {string} string +// @Success 204 {string} string +// @Failure 507 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [put] +func (h *FSHandler) PutFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.PutFile(c) +} + +// DeleteFileAPI removes a file from a filesystem +// @Summary Remove a file from a filesystem +// @Description Remove a file from a filesystem +// @ID filesystem-3-delete-file +// @Produce text/plain +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Success 200 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [delete] +func (h *FSHandler) DeleteFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.DeleteFile(c) +} + +// ListFiles lists all files on a filesystem +// @Summary List all files on a filesystem +// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. +// @ID filesystem-3-list-files +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param glob query string false "glob pattern for file names" +// @Param sort query string false "none, name, size, lastmod" +// @Param order query string false "asc, desc" +// @Success 200 {array} api.FileInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name} [get] +func (h *FSHandler) ListFiles(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.ListFiles(c) +} + +// List lists all registered filesystems +// @Summary List all registered filesystems +// @Description Listall registered filesystems +// @ID filesystem-3-list +// @Produce json +// @Success 200 {array} api.FilesystemInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs [get] +func (h *FSHandler) List(c echo.Context) error { + fss := []api.FilesystemInfo{} + + for name, config := range h.filesystems { + fss = append(fss, api.FilesystemInfo{ + Name: name, + Type: config.Type, + Mount: config.Mountpoint, + }) + } + + return c.JSON(http.StatusOK, fss) +} diff --git a/http/handler/api/memfs.go b/http/handler/api/memfs.go deleted file mode 100644 index 2b64c4d0..00000000 --- a/http/handler/api/memfs.go +++ /dev/null @@ -1,177 +0,0 @@ -package api - -import ( - "io" - "net/http" - "net/url" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem - handler *handler.MemFSHandler -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - handler: handler.NewMemFS(fs), - } -} - -// GetFileAPI returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - return h.handler.GetFile(c) -} - -// PutFileAPI adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - return h.handler.PutFile(c) -} - -// DeleteFileAPI removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - return h.handler.DeleteFile(c) -} - -// PatchFile creates a symbolic link to a file in the filesystem -// @Summary Create a link to a file in the memory filesystem -// @Description Create a link to a file in the memory filesystem. The file linked to has to exist. -// @Tags v16.7.2 -// @ID memfs-3-patch -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param url body string true "Path to the file to link to" -// @Success 201 {string} string -// @Failure 400 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [patch] -func (h *MemFSHandler) PatchFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - body, err := io.ReadAll(req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err) - } - - u, err := url.Parse(string(body)) - if err != nil { - return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err) - } - - if err := h.filesystem.Symlink(u.Path, path); err != nil { - return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - return c.String(http.StatusCreated, "") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the memory filesystem -// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @Tags v16.7.2 -// @ID memfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem [get] -func (h *MemFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) - - for i, f := range files { - fileinfos[i] = api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - } - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/api/restream.go b/http/handler/api/restream.go index 2e6f0c2a..c61f363a 100644 --- a/http/handler/api/restream.go +++ b/http/handler/api/restream.go @@ -51,7 +51,7 @@ func (h *RestreamHandler) Add(c echo.Context) error { return api.Err(http.StatusBadRequest, "Unsupported process type", "Supported process types are: ffmpeg") } - if len(process.Input) == 0 && len(process.Output) == 0 { + if len(process.Input) == 0 || len(process.Output) == 0 { return api.Err(http.StatusBadRequest, "At least one input and one output need to be defined") } @@ -189,6 +189,14 @@ func (h *RestreamHandler) Update(c echo.Context) error { Autostart: true, } + current, err := h.restream.GetProcess(id) + if err != nil { + return api.Err(http.StatusNotFound, "Process not found", "%s", id) + } + + // Prefill the config with the current values + process.Unmarshal(current.Config) + if err := util.ShouldBindJSON(c, &process); err != nil { return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) } diff --git a/http/handler/diskfs.go b/http/handler/diskfs.go deleted file mode 100644 index 9726c258..00000000 --- a/http/handler/diskfs.go +++ /dev/null @@ -1,88 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists. -// @ID diskfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ := file.Stat() - - if stat.IsDir() { - path = filepath.Join(path, "index.html") - - file.Close() - - file = h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ = file.Stat() - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} diff --git a/http/handler/filesystem.go b/http/handler/filesystem.go new file mode 100644 index 00000000..a8277e7c --- /dev/null +++ b/http/handler/filesystem.go @@ -0,0 +1,164 @@ +package handler + +import ( + "net/http" + "path/filepath" + "sort" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/fs" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + fs fs.FS +} + +// NewFS return a new FSHandler type. You have to provide a filesystem to act on. +func NewFS(fs fs.FS) *FSHandler { + return &FSHandler{ + fs: fs, + } +} + +func (h *FSHandler) GetFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + mimeType := c.Response().Header().Get(echo.HeaderContentType) + c.Response().Header().Del(echo.HeaderContentType) + + file := h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ := file.Stat() + + if len(h.fs.DefaultFile) != 0 { + if stat.IsDir() { + path = filepath.Join(path, h.fs.DefaultFile) + + file.Close() + + file = h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ = file.Stat() + } + } + + defer file.Close() + + c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) + + if path, ok := stat.IsLink(); ok { + path = filepath.Clean("/" + path) + + if path[0] == '/' { + path = path[1:] + } + + return c.Redirect(http.StatusMovedPermanently, path) + } + + c.Response().Header().Set(echo.HeaderContentType, mimeType) + + if c.Request().Method == "HEAD" { + return c.Blob(http.StatusOK, "application/data", nil) + } + + return c.Stream(http.StatusOK, "application/data", file) +} + +func (h *FSHandler) PutFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + req := c.Request() + + _, created, err := h.fs.Filesystem.WriteFileReader(path, req.Body) + if err != nil { + return api.Err(http.StatusBadRequest, "Bad request", "%s", err) + } + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + } + + c.Response().Header().Set("Content-Location", req.URL.RequestURI()) + + if created { + return c.String(http.StatusCreated, "") + } + + return c.NoContent(http.StatusNoContent) +} + +func (h *FSHandler) DeleteFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + size := h.fs.Filesystem.Remove(path) + + if size < 0 { + return api.Err(http.StatusNotFound, "File not found", path) + } + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + } + + return c.String(http.StatusOK, "Deleted: "+path) +} + +func (h *FSHandler) ListFiles(c echo.Context) error { + pattern := util.DefaultQuery(c, "glob", "") + sortby := util.DefaultQuery(c, "sort", "none") + order := util.DefaultQuery(c, "order", "asc") + + files := h.fs.Filesystem.List("/", pattern) + + var sortFunc func(i, j int) bool + + switch sortby { + case "name": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } + } else { + sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } + } + case "size": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } + } else { + sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } + } + default: + if order == "asc" { + sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } + } else { + sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } + } + } + + sort.Slice(files, sortFunc) + + var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) + + for i, f := range files { + fileinfos[i] = api.FileInfo{ + Name: f.Name(), + Size: f.Size(), + LastMod: f.ModTime().Unix(), + } + } + + return c.JSON(http.StatusOK, fileinfos) +} diff --git a/http/handler/memfs.go b/http/handler/memfs.go deleted file mode 100644 index 1369a6dc..00000000 --- a/http/handler/memfs.go +++ /dev/null @@ -1,130 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @ID memfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /memfs/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - stat, _ := file.Stat() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @ID memfs-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, "") - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @ID memfs-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - return c.String(http.StatusOK, "Deleted: "+path) -} diff --git a/http/mock/mock.go b/http/mock/mock.go index 8bdc0c55..621204a7 100644 --- a/http/mock/mock.go +++ b/http/mock/mock.go @@ -17,6 +17,7 @@ import ( "github.com/datarhei/core/v16/http/errorhandler" "github.com/datarhei/core/v16/http/validator" "github.com/datarhei/core/v16/internal/testhelper" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream" "github.com/datarhei/core/v16/restream/store" @@ -32,7 +33,17 @@ func DummyRestreamer(pathPrefix string) (restream.Restreamer, error) { return nil, fmt.Errorf("failed to build helper program: %w", err) } - store := store.NewDummyStore(store.DummyConfig{}) + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + if err != nil { + return nil, fmt.Errorf("failed to create memory filesystem: %w", err) + } + + store, err := store.NewJSON(store.JSONConfig{ + Filesystem: memfs, + }) + if err != nil { + return nil, err + } ffmpeg, err := ffmpeg.New(ffmpeg.Config{ Binary: binary, diff --git a/http/server.go b/http/server.go index 34d51579..21c88ad4 100644 --- a/http/server.go +++ b/http/server.go @@ -29,19 +29,20 @@ package http import ( + "fmt" "net/http" "strings" cfgstore "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/http/cache" "github.com/datarhei/core/v16/http/errorhandler" + "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/graph/resolver" "github.com/datarhei/core/v16/http/handler" api "github.com/datarhei/core/v16/http/handler/api" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/http/validator" - "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/monitor" "github.com/datarhei/core/v16/net" @@ -79,8 +80,7 @@ type Config struct { Metrics monitor.HistoryReader Prometheus prometheus.Reader MimeTypesFile string - DiskFS fs.Filesystem - MemFS MemFSConfig + Filesystems []fs.FS IPLimiter net.IPLimiter Profiling bool Cors CorsConfig @@ -94,13 +94,6 @@ type Config struct { ReadOnly bool } -type MemFSConfig struct { - EnableAuth bool - Username string - Password string - Filesystem fs.Filesystem -} - type CorsConfig struct { Origins []string } @@ -114,8 +107,6 @@ type server struct { handler struct { about *api.AboutHandler - memfs *handler.MemFSHandler - diskfs *handler.DiskFSHandler prometheus *handler.PrometheusHandler profiling *handler.ProfilingHandler ping *handler.PingHandler @@ -127,8 +118,6 @@ type server struct { log *api.LogHandler restream *api.RestreamHandler playout *api.PlayoutHandler - memfs *api.MemFSHandler - diskfs *api.DiskFSHandler rtmp *api.RTMPHandler srt *api.SRTHandler config *api.ConfigHandler @@ -148,18 +137,12 @@ type server struct { hlsrewrite echo.MiddlewareFunc } - memfs struct { - enableAuth bool - username string - password string - } - - diskfs fs.Filesystem - gzip struct { mimetypes []string } + filesystems map[string]*filesystem + router *echo.Echo mimeTypesFile string profiling bool @@ -167,32 +150,63 @@ type server struct { readOnly bool } +type filesystem struct { + fs.FS + + handler *handler.FSHandler + middleware echo.MiddlewareFunc +} + func NewServer(config Config) (Server, error) { s := &server{ logger: config.Logger, mimeTypesFile: config.MimeTypesFile, profiling: config.Profiling, - diskfs: config.DiskFS, readOnly: config.ReadOnly, } - s.v3handler.diskfs = api.NewDiskFS( - config.DiskFS, - config.Cache, - ) + s.filesystems = map[string]*filesystem{} - s.handler.diskfs = handler.NewDiskFS( - config.DiskFS, - config.Cache, - ) + corsPrefixes := map[string][]string{ + "/api": {"*"}, + } - s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ - PathPrefix: config.DiskFS.Base(), - }) + for _, fs := range config.Filesystems { + if _, ok := s.filesystems[fs.Name]; ok { + return nil, fmt.Errorf("the filesystem name '%s' is already in use", fs.Name) + } - s.memfs.enableAuth = config.MemFS.EnableAuth - s.memfs.username = config.MemFS.Username - s.memfs.password = config.MemFS.Password + if !strings.HasPrefix(fs.Mountpoint, "/") { + fs.Mountpoint = "/" + fs.Mountpoint + } + + if !strings.HasSuffix(fs.Mountpoint, "/") { + fs.Mountpoint = strings.TrimSuffix(fs.Mountpoint, "/") + } + + if _, ok := corsPrefixes[fs.Mountpoint]; ok { + return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", fs.Mountpoint, fs.Name) + } + + corsPrefixes[fs.Mountpoint] = config.Cors.Origins + + filesystem := &filesystem{ + FS: fs, + handler: handler.NewFS(fs), + } + + if fs.Filesystem.Type() == "disk" { + filesystem.middleware = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ + PathPrefix: fs.Filesystem.Metadata("base"), + }) + } + + s.filesystems[filesystem.Name] = filesystem + } + + if _, ok := corsPrefixes["/"]; !ok { + return nil, fmt.Errorf("one filesystem must be mounted at /") + } if config.Logger == nil { s.logger = log.New("HTTP") @@ -224,16 +238,6 @@ func NewServer(config Config) (Server, error) { ) } - if config.MemFS.Filesystem != nil { - s.v3handler.memfs = api.NewMemFS( - config.MemFS.Filesystem, - ) - - s.handler.memfs = handler.NewMemFS( - config.MemFS.Filesystem, - ) - } - if config.Prometheus != nil { s.handler.prometheus = handler.NewPrometheus( config.Prometheus.HTTPHandler(), @@ -292,12 +296,6 @@ func NewServer(config Config) (Server, error) { Logger: s.logger, }) - if config.Cache != nil { - s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{ - Cache: config.Cache, - }) - } - s.v3handler.widget = api.NewWidget(api.WidgetConfig{ Restream: config.Restream, Registry: config.Sessions, @@ -308,11 +306,7 @@ func NewServer(config Config) (Server, error) { }) if middleware, err := mwcors.NewWithConfig(mwcors.Config{ - Prefixes: map[string][]string{ - "/": config.Cors.Origins, - "/api": {"*"}, - "/memfs": config.Cors.Origins, - }, + Prefixes: corsPrefixes, }); err != nil { return nil, err } else { @@ -437,65 +431,66 @@ func (s *server) setRoutes() { doc.Use(gzipMiddleware) doc.GET("", echoSwagger.WrapHandler) - // Serve static data - fs := s.router.Group("/*") - fs.Use(mwmime.NewWithConfig(mwmime.Config{ - MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "text/html", - })) - fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), - })) - if s.middleware.cache != nil { - fs.Use(s.middleware.cache) - } - fs.Use(s.middleware.hlsrewrite) - if s.middleware.session != nil { - fs.Use(s.middleware.session) - } + // Mount filesystems + for _, filesystem := range s.filesystems { + // Define a local variable because later in the loop we have a closure + filesystem := filesystem - fs.GET("", s.handler.diskfs.GetFile) - fs.HEAD("", s.handler.diskfs.GetFile) + mountpoint := filesystem.Mountpoint + "/*" + if filesystem.Mountpoint == "/" { + mountpoint = "/*" + } - // Memory FS - if s.handler.memfs != nil { - memfs := s.router.Group("/memfs/*") - memfs.Use(mwmime.NewWithConfig(mwmime.Config{ + fs := s.router.Group(mountpoint) + fs.Use(mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "application/data", - })) - memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), + DefaultContentType: filesystem.DefaultContentType, })) - if s.middleware.session != nil { - memfs.Use(s.middleware.session) - } - memfs.HEAD("", s.handler.memfs.GetFile) - memfs.GET("", s.handler.memfs.GetFile) + if filesystem.Gzip { + fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ + Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), + Level: mwgzip.BestSpeed, + MinLength: 1000, + })) + } - var authmw echo.MiddlewareFunc + if filesystem.Cache != nil { + mwcache := mwcache.NewWithConfig(mwcache.Config{ + Cache: filesystem.Cache, + }) + fs.Use(mwcache) + } - if s.memfs.enableAuth { - authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { - if username == s.memfs.username && password == s.memfs.password { - return true, nil - } + if filesystem.middleware != nil { + fs.Use(filesystem.middleware) + } - return false, nil - }) + if s.middleware.session != nil { + fs.Use(s.middleware.session) + } - memfs.POST("", s.handler.memfs.PutFile, authmw) - memfs.PUT("", s.handler.memfs.PutFile, authmw) - memfs.DELETE("", s.handler.memfs.DeleteFile, authmw) - } else { - memfs.POST("", s.handler.memfs.PutFile) - memfs.PUT("", s.handler.memfs.PutFile) - memfs.DELETE("", s.handler.memfs.DeleteFile) + fs.GET("", filesystem.handler.GetFile) + fs.HEAD("", filesystem.handler.GetFile) + + if filesystem.AllowWrite { + if filesystem.EnableAuth { + authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { + if username == filesystem.Username && password == filesystem.Password { + return true, nil + } + + return false, nil + }) + + fs.POST("", filesystem.handler.PutFile, authmw) + fs.PUT("", filesystem.handler.PutFile, authmw) + fs.DELETE("", filesystem.handler.DeleteFile, authmw) + } else { + fs.POST("", filesystem.handler.PutFile) + fs.PUT("", filesystem.handler.PutFile) + fs.DELETE("", filesystem.handler.DeleteFile) + } } } @@ -593,32 +588,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) { } } - // v3 Memory FS - if s.v3handler.memfs != nil { - v3.GET("/fs/mem", s.v3handler.memfs.ListFiles) - v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile) - - if !s.readOnly { - v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile) - v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile) - v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile) + // v3 Filesystems + fshandlers := map[string]api.FSConfig{} + for _, fs := range s.filesystems { + fshandlers[fs.Name] = api.FSConfig{ + Type: fs.Filesystem.Type(), + Mountpoint: fs.Mountpoint, + Handler: fs.handler, } } - // v3 Disk FS - v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles) - v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + handler := api.NewFS(fshandlers) + + v3.GET("/fs", handler.List) + + v3.GET("/fs/:name", handler.ListFiles) + v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) - v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) if !s.readOnly { - v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile) - v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile) + v3.PUT("/fs/:name/*", handler.PutFile) + v3.DELETE("/fs/:name/*", handler.DeleteFile) } // v3 RTMP diff --git a/io/fs/disk.go b/io/fs/disk.go index bf9e1843..88352c72 100644 --- a/io/fs/disk.go +++ b/io/fs/disk.go @@ -1,25 +1,30 @@ package fs import ( + "bytes" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" + "sync" "time" "github.com/datarhei/core/v16/glob" "github.com/datarhei/core/v16/log" ) -// DiskConfig is the config required to create a new disk -// filesystem. +// DiskConfig is the config required to create a new disk filesystem. type DiskConfig struct { - // Dir is the path to the directory to observe - Dir string + // For logging, optional + Logger log.Logger +} - // Size of the filesystem in bytes - Size int64 +// RootedDiskConfig is the config required to create a new rooted disk filesystem. +type RootedDiskConfig struct { + // Root is the path this filesystem is rooted to + Root string // For logging, optional Logger log.Logger @@ -27,8 +32,9 @@ type DiskConfig struct { // diskFileInfo implements the FileInfo interface type diskFileInfo struct { - dir string + root string name string + mode os.FileMode finfo os.FileInfo } @@ -37,31 +43,37 @@ func (fi *diskFileInfo) Name() string { } func (fi *diskFileInfo) Size() int64 { + if fi.finfo.IsDir() { + return 0 + } + return fi.finfo.Size() } +func (fi *diskFileInfo) Mode() fs.FileMode { + return fi.mode +} + func (fi *diskFileInfo) ModTime() time.Time { return fi.finfo.ModTime() } func (fi *diskFileInfo) IsLink() (string, bool) { - mode := fi.finfo.Mode() - if mode&os.ModeSymlink == 0 { + if fi.mode&os.ModeSymlink == 0 { return fi.name, false } - path, err := os.Readlink(filepath.Join(fi.dir, fi.name)) + path, err := os.Readlink(filepath.Join(fi.root, fi.name)) if err != nil { return fi.name, false } - path = filepath.Join(fi.dir, path) - - if !strings.HasPrefix(path, fi.dir) { + if !strings.HasPrefix(path, fi.root) { return fi.name, false } - name := strings.TrimPrefix(path, fi.dir) + name := strings.TrimPrefix(path, fi.root) + if name[0] != os.PathSeparator { name = string(os.PathSeparator) + name } @@ -75,8 +87,9 @@ func (fi *diskFileInfo) IsDir() bool { // diskFile implements the File interface type diskFile struct { - dir string + root string name string + mode os.FileMode file *os.File } @@ -91,8 +104,9 @@ func (f *diskFile) Stat() (FileInfo, error) { } dif := &diskFileInfo{ - dir: f.dir, + root: f.root, name: f.name, + mode: f.mode, finfo: finfo, } @@ -109,11 +123,11 @@ func (f *diskFile) Read(p []byte) (int, error) { // diskFilesystem implements the Filesystem interface type diskFilesystem struct { - dir string + metadata map[string]string + lock sync.RWMutex - // Max. size of the filesystem in bytes as - // given by the config - maxSize int64 + root string + cwd string // Current size of the filesystem in bytes currentSize int64 @@ -123,53 +137,102 @@ type diskFilesystem struct { logger log.Logger } -// NewDiskFilesystem returns a new filesystem that is backed by a disk -// that implements the Filesystem interface +// NewDiskFilesystem returns a new filesystem that is backed by the disk filesystem. +// The root is / and the working directory is whatever is returned by os.Getwd(). The value +// of Root in the config will be ignored. func NewDiskFilesystem(config DiskConfig) (Filesystem, error) { fs := &diskFilesystem{ - maxSize: config.Size, - logger: config.Logger, + metadata: make(map[string]string), + root: "/", + cwd: "/", + logger: config.Logger, } - if fs.logger == nil { - fs.logger = log.New("DiskFS") + cwd, err := os.Getwd() + if err != nil { + return nil, err } - if err := fs.Rebase(config.Dir); err != nil { - return nil, err + fs.cwd = cwd + + if len(fs.cwd) == 0 { + fs.cwd = "/" + } + + fs.cwd = filepath.Clean(fs.cwd) + if !filepath.IsAbs(fs.cwd) { + return nil, fmt.Errorf("the current working directory must be an absolute path") + } + + if fs.logger == nil { + fs.logger = log.New("") } return fs, nil } -func (fs *diskFilesystem) Base() string { - return fs.dir -} +// NewRootedDiskFilesystem returns a filesystem that is backed by the disk filesystem. The +// root of the filesystem is defined by DiskConfig.Root. The working directory is "/". Root +// must be directory. If it doesn't exist, it will be created +func NewRootedDiskFilesystem(config RootedDiskConfig) (Filesystem, error) { + fs := &diskFilesystem{ + metadata: make(map[string]string), + root: config.Root, + cwd: "/", + logger: config.Logger, + } -func (fs *diskFilesystem) Rebase(base string) error { - if len(base) == 0 { - return fmt.Errorf("invalid base path provided") + if len(fs.root) == 0 { + fs.root = "/" } - dir, err := filepath.Abs(base) - if err != nil { - return err + if root, err := filepath.Abs(fs.root); err != nil { + return nil, err + } else { + fs.root = root } - base = dir + err := os.MkdirAll(fs.root, 0700) + if err != nil { + return nil, err + } - finfo, err := os.Stat(base) + info, err := os.Stat(fs.root) if err != nil { - return fmt.Errorf("the provided base path '%s' doesn't exist", fs.dir) + return nil, err } - if !finfo.IsDir() { - return fmt.Errorf("the provided base path '%s' must be a directory", fs.dir) + if !info.IsDir() { + return nil, fmt.Errorf("root is not a directory") } - fs.dir = base + if fs.logger == nil { + fs.logger = log.New("") + } - return nil + return fs, nil +} + +func (fs *diskFilesystem) Name() string { + return "disk" +} + +func (fs *diskFilesystem) Type() string { + return "disk" +} + +func (fs *diskFilesystem) Metadata(key string) string { + fs.lock.RLock() + defer fs.lock.RUnlock() + + return fs.metadata[key] +} + +func (fs *diskFilesystem) SetMetadata(key, data string) { + fs.lock.Lock() + defer fs.lock.Unlock() + + fs.metadata[key] = data } func (fs *diskFilesystem) Size() (int64, int64) { @@ -178,7 +241,11 @@ func (fs *diskFilesystem) Size() (int64, int64) { if time.Since(fs.lastSizeCheck) >= 10*time.Second { var size int64 = 0 - fs.walk(func(path string, info os.FileInfo) { + fs.walk(fs.root, func(path string, info os.FileInfo) { + if info.IsDir() { + return + } + size += info.Size() }) @@ -187,17 +254,21 @@ func (fs *diskFilesystem) Size() (int64, int64) { fs.lastSizeCheck = time.Now() } - return fs.currentSize, fs.maxSize + return fs.currentSize, -1 } -func (fs *diskFilesystem) Resize(size int64) { - fs.maxSize = size +func (fs *diskFilesystem) Purge(size int64) int64 { + return 0 } func (fs *diskFilesystem) Files() int64 { var nfiles int64 = 0 - fs.walk(func(path string, info os.FileInfo) { + fs.walk(fs.root, func(path string, info os.FileInfo) { + if info.IsDir() { + return + } + nfiles++ }) @@ -205,38 +276,58 @@ func (fs *diskFilesystem) Files() int64 { } func (fs *diskFilesystem) Symlink(oldname, newname string) error { - oldname = filepath.Join(fs.dir, filepath.Clean("/"+oldname)) + oldname = fs.cleanPath(oldname) + newname = fs.cleanPath(newname) - if !filepath.IsAbs(newname) { - return nil + info, err := os.Lstat(oldname) + if err != nil { + return err } - newname = filepath.Join(fs.dir, filepath.Clean("/"+newname)) + if info.Mode()&os.ModeSymlink != 0 { + return fmt.Errorf("%s can't link to another link (%s)", newname, oldname) + } - err := os.Symlink(oldname, newname) + if info.IsDir() { + return fmt.Errorf("can't symlink directories") + } - return err + return os.Symlink(oldname, newname) } func (fs *diskFilesystem) Open(path string) File { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) + path = fs.cleanPath(path) - f, err := os.Open(path) + df := &diskFile{ + root: fs.root, + name: strings.TrimPrefix(path, fs.root), + } + + info, err := os.Lstat(path) if err != nil { return nil } - df := &diskFile{ - dir: fs.dir, - name: path, - file: f, + df.mode = info.Mode() + + f, err := os.Open(path) + if err != nil { + return nil } + df.file = f + return df } -func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) +func (fs *diskFilesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + + return os.ReadFile(path) +} + +func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) replace := true @@ -258,16 +349,155 @@ func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) { replace = false } + defer f.Close() + size, err := f.ReadFrom(r) if err != nil { return -1, false, fmt.Errorf("reading data failed: %w", err) } + fs.lastSizeCheck = time.Time{} + return size, !replace, nil } -func (fs *diskFilesystem) Delete(path string) int64 { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) +func (fs *diskFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *diskFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + path = fs.cleanPath(path) + dir, filename := filepath.Split(path) + + tmpfile, err := os.CreateTemp(dir, filename) + if err != nil { + return -1, false, err + } + + defer os.Remove(tmpfile.Name()) + + size, err := tmpfile.Write(data) + if err != nil { + return -1, false, err + } + + if err := tmpfile.Close(); err != nil { + return -1, false, err + } + + replace := false + if _, err := fs.Stat(path); err == nil { + replace = true + } + + if err := fs.rename(tmpfile.Name(), path); err != nil { + return -1, false, err + } + + fs.lastSizeCheck = time.Time{} + + return int64(size), !replace, nil +} + +func (fs *diskFilesystem) Rename(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + return fs.rename(src, dst) +} + +func (fs *diskFilesystem) rename(src, dst string) error { + if src == dst { + return nil + } + + // First try to rename the file + if err := os.Rename(src, dst); err == nil { + return nil + } + + // If renaming the file fails, copy the data + if err := fs.copy(src, dst); err != nil { + os.Remove(dst) + return fmt.Errorf("failed to copy files: %w", err) + } + + if err := os.Remove(src); err != nil { + os.Remove(dst) + return fmt.Errorf("failed to remove source file: %w", err) + } + + return nil +} + +func (fs *diskFilesystem) Copy(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + return fs.copy(src, dst) +} + +func (fs *diskFilesystem) copy(src, dst string) error { + source, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %w", err) + } + + destination, err := os.Create(dst) + if err != nil { + source.Close() + return fmt.Errorf("failed to create destination file: %w", err) + } + defer destination.Close() + + if _, err := io.Copy(destination, source); err != nil { + source.Close() + os.Remove(dst) + return fmt.Errorf("failed to copy data from source to destination: %w", err) + } + + source.Close() + + fs.lastSizeCheck = time.Time{} + + return nil +} + +func (fs *diskFilesystem) MkdirAll(path string, perm os.FileMode) error { + path = fs.cleanPath(path) + + return os.MkdirAll(path, perm) +} + +func (fs *diskFilesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + dif := &diskFileInfo{ + root: fs.root, + name: strings.TrimPrefix(path, fs.root), + } + + info, err := os.Lstat(path) + if err != nil { + return nil, err + } + + dif.mode = info.Mode() + + if info.Mode()&os.ModeSymlink != 0 { + info, err = os.Stat(path) + if err != nil { + return nil, err + } + } + + dif.finfo = info + + return dif, nil +} + +func (fs *diskFilesystem) Remove(path string) int64 { + path = fs.cleanPath(path) finfo, err := os.Stat(path) if err != nil { @@ -280,28 +510,31 @@ func (fs *diskFilesystem) Delete(path string) int64 { return -1 } + fs.lastSizeCheck = time.Time{} + return size } -func (fs *diskFilesystem) DeleteAll() int64 { +func (fs *diskFilesystem) RemoveAll() int64 { return 0 } -func (fs *diskFilesystem) List(pattern string) []FileInfo { +func (fs *diskFilesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) files := []FileInfo{} - fs.walk(func(path string, info os.FileInfo) { - if path == fs.dir { + fs.walk(path, func(path string, info os.FileInfo) { + if path == fs.root { return } - name := strings.TrimPrefix(path, fs.dir) + name := strings.TrimPrefix(path, fs.root) if name[0] != os.PathSeparator { name = string(os.PathSeparator) + name } if info.IsDir() { - name += "/" + return } if len(pattern) != 0 { @@ -311,7 +544,7 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo { } files = append(files, &diskFileInfo{ - dir: fs.dir, + root: fs.root, name: name, finfo: info, }) @@ -320,8 +553,53 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo { return files } -func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) { - filepath.Walk(fs.dir, func(path string, info os.FileInfo, err error) error { +func (fs *diskFilesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + err := fs.findExecutable(file) + if err == nil { + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if err := fs.findExecutable(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *diskFilesystem) findExecutable(file string) error { + d, err := fs.Stat(file) + if err != nil { + return err + } + + m := d.Mode() + if m.IsDir() { + return fmt.Errorf("is a directory") + } + + if m&0111 != 0 { + return nil + } + + return os.ErrPermission +} + +func (fs *diskFilesystem) walk(path string, walkfn func(path string, info os.FileInfo)) { + filepath.Walk(path, func(path string, info os.FileInfo, err error) error { if err != nil { return nil } @@ -341,3 +619,11 @@ func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) { return nil }) } + +func (fs *diskFilesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join(fs.cwd, path) + } + + return filepath.Join(fs.root, filepath.Clean(path)) +} diff --git a/io/fs/dummy.go b/io/fs/dummy.go deleted file mode 100644 index 442d1586..00000000 --- a/io/fs/dummy.go +++ /dev/null @@ -1,40 +0,0 @@ -package fs - -import ( - "io" - "time" -) - -type dummyFileInfo struct{} - -func (d *dummyFileInfo) Name() string { return "" } -func (d *dummyFileInfo) Size() int64 { return 0 } -func (d *dummyFileInfo) ModTime() time.Time { return time.Date(2000, 1, 1, 0, 0, 0, 0, nil) } -func (d *dummyFileInfo) IsLink() (string, bool) { return "", false } -func (d *dummyFileInfo) IsDir() bool { return false } - -type dummyFile struct{} - -func (d *dummyFile) Read(p []byte) (int, error) { return 0, io.EOF } -func (d *dummyFile) Close() error { return nil } -func (d *dummyFile) Name() string { return "" } -func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil } - -type dummyFilesystem struct{} - -func (d *dummyFilesystem) Base() string { return "/" } -func (d *dummyFilesystem) Rebase(string) error { return nil } -func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 } -func (d *dummyFilesystem) Resize(int64) {} -func (d *dummyFilesystem) Files() int64 { return 0 } -func (d *dummyFilesystem) Symlink(string, string) error { return nil } -func (d *dummyFilesystem) Open(string) File { return &dummyFile{} } -func (d *dummyFilesystem) Store(string, io.Reader) (int64, bool, error) { return 0, true, nil } -func (d *dummyFilesystem) Delete(string) int64 { return 0 } -func (d *dummyFilesystem) DeleteAll() int64 { return 0 } -func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} } - -// NewDummyFilesystem return a dummy filesystem -func NewDummyFilesystem() Filesystem { - return &dummyFilesystem{} -} diff --git a/io/fs/fs.go b/io/fs/fs.go index d1923c47..9f3b8661 100644 --- a/io/fs/fs.go +++ b/io/fs/fs.go @@ -3,24 +3,29 @@ package fs import ( "io" + "io/fs" + "os" "time" ) // FileInfo describes a file and is returned by Stat. type FileInfo interface { - // Name returns the full name of the file + // Name returns the full name of the file. Name() string - // Size reports the size of the file in bytes + // Size reports the size of the file in bytes. Size() int64 - // ModTime returns the time of last modification + // Mode returns the file mode. + Mode() fs.FileMode + + // ModTime returns the time of last modification. ModTime() time.Time // IsLink returns the path this file is linking to and true. Otherwise an empty string and false. IsLink() (string, bool) - // IsDir returns whether the file represents a directory + // IsDir returns whether the file represents a directory. IsDir() bool } @@ -28,52 +33,101 @@ type FileInfo interface { type File interface { io.ReadCloser - // Name returns the Name of the file + // Name returns the Name of the file. Name() string - // Stat returns the FileInfo to this file. In case of an error - // FileInfo is nil and the error is non-nil. + // Stat returns the FileInfo to this file. In case of an error FileInfo is nil + // and the error is non-nil. If the file is a symlink, the info reports the name and mode + // of the link itself, but the modification time and size of the linked file. Stat() (FileInfo, error) } -// Filesystem is an interface that provides access to a filesystem. -type Filesystem interface { - // Base returns the base path of this filesystem - Base() string - - // Rebase sets a new base path for this filesystem - Rebase(string) error - +type ReadFilesystem interface { // Size returns the consumed size and capacity of the filesystem in bytes. The - // capacity is negative if the filesystem can consume as much space as it can. + // capacity is zero or negative if the filesystem can consume as much space as it wants. Size() (int64, int64) - // Resize resizes the filesystem to the new size. Files may need to be deleted. - Resize(size int64) - // Files returns the current number of files in the filesystem. Files() int64 + // Open returns the file stored at the given path. It returns nil if the + // file doesn't exist. If the file is a symlink, the name is the name of + // the link, but it will read the contents of the linked file. + Open(path string) File + + // ReadFile reads the content of the file at the given path into the writer. Returns + // the number of bytes read or an error. + ReadFile(path string) ([]byte, error) + + // Stat returns info about the file at path. If the file doesn't exist, an error + // will be returned. If the file is a symlink, the info reports the name and mode + // of the link itself, but the modification time and size are of the linked file. + Stat(path string) (FileInfo, error) + + // List lists all files that are currently on the filesystem. + List(path, pattern string) []FileInfo + + // LookPath searches for an executable named file in the directories named by the PATH environment + // variable. If file contains a slash, it is tried directly and the PATH is not consulted. Otherwise, + // on success, the result is an absolute path. On non-disk filesystems. Only the mere existence + // of that file is verfied. + LookPath(file string) (string, error) +} + +type WriteFilesystem interface { // Symlink creates newname as a symbolic link to oldname. Symlink(oldname, newname string) error - // Open returns the file stored at the given path. It returns nil if the - // file doesn't exist. - Open(path string) File + // WriteFileReader adds a file to the filesystem. Returns the size of the data that has been + // stored in bytes and whether the file is new. The size is negative if there was + // an error adding the file and error is not nil. + WriteFileReader(path string, r io.Reader) (int64, bool, error) + + // WriteFile adds a file to the filesystem. Returns the size of the data that has been + // stored in bytes and whether the file is new. The size is negative if there was + // an error adding the file and error is not nil. + WriteFile(path string, data []byte) (int64, bool, error) - // Store adds a file to the filesystem. Returns the size of the data that has been + // WriteFileSafe adds a file to the filesystem by first writing it to a tempfile and then + // renaming it to the actual path. Returns the size of the data that has been // stored in bytes and whether the file is new. The size is negative if there was // an error adding the file and error is not nil. - Store(path string, r io.Reader) (int64, bool, error) + WriteFileSafe(path string, data []byte) (int64, bool, error) + + // MkdirAll creates a directory named path, along with any necessary parents, and returns nil, + // or else returns an error. The permission bits perm (before umask) are used for all directories + // that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil. + MkdirAll(path string, perm os.FileMode) error + + // Rename renames the file from src to dst. If src and dst can't be renamed + // regularly, the data is copied from src to dst. dst will be overwritten + // if it already exists. src will be removed after all data has been copied + // successfully. Both files exist during copying. + Rename(src, dst string) error + + // Copy copies a file from src to dst. + Copy(src, dst string) error - // Delete removes a file at the given path from the filesystem. Returns the size of + // Remove removes a file at the given path from the filesystem. Returns the size of // the remove file in bytes. The size is negative if the file doesn't exist. - Delete(path string) int64 + Remove(path string) int64 - // DeleteAll removes all files from the filesystem. Returns the size of the + // RemoveAll removes all files from the filesystem. Returns the size of the // removed files in bytes. - DeleteAll() int64 + RemoveAll() int64 +} - // List lists all files that are currently on the filesystem. - List(pattern string) []FileInfo +// Filesystem is an interface that provides access to a filesystem. +type Filesystem interface { + ReadFilesystem + WriteFilesystem + + // Name returns the name of the filesystem. + Name() string + + // Type returns the type of the filesystem, e.g. disk, mem, s3 + Type() string + + Metadata(key string) string + SetMetadata(key string, data string) } diff --git a/io/fs/fs_test.go b/io/fs/fs_test.go new file mode 100644 index 00000000..18a7aa9f --- /dev/null +++ b/io/fs/fs_test.go @@ -0,0 +1,742 @@ +package fs + +import ( + "errors" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var ErrNoMinio = errors.New("minio binary not found") + +func startMinio(t *testing.T, path string) (*exec.Cmd, error) { + err := os.MkdirAll(path, 0700) + require.NoError(t, err) + + minio, err := exec.LookPath("minio") + if err != nil { + return nil, ErrNoMinio + } + + proc := exec.Command(minio, "server", path, "--address", "127.0.0.1:9000") + proc.Stderr = os.Stderr + proc.Stdout = os.Stdout + err = proc.Start() + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + return proc, nil +} + +func stopMinio(t *testing.T, proc *exec.Cmd) { + err := proc.Process.Signal(os.Interrupt) + require.NoError(t, err) + + proc.Wait() +} + +func TestFilesystem(t *testing.T) { + miniopath, err := filepath.Abs("./minio") + require.NoError(t, err) + + err = os.RemoveAll(miniopath) + require.NoError(t, err) + + minio, err := startMinio(t, miniopath) + if err != nil { + if err != ErrNoMinio { + require.NoError(t, err) + } + } + + os.RemoveAll("./testing/") + + filesystems := map[string]func(string) (Filesystem, error){ + "memfs": func(name string) (Filesystem, error) { + return NewMemFilesystem(MemConfig{}) + }, + "diskfs": func(name string) (Filesystem, error) { + return NewRootedDiskFilesystem(RootedDiskConfig{ + Root: "./testing/" + name, + }) + }, + "s3fs": func(name string) (Filesystem, error) { + return NewS3Filesystem(S3Config{ + Name: name, + Endpoint: "127.0.0.1:9000", + AccessKeyID: "minioadmin", + SecretAccessKey: "minioadmin", + Region: "", + Bucket: strings.ToLower(name), + UseSSL: false, + Logger: nil, + }) + }, + } + + tests := map[string]func(*testing.T, Filesystem){ + "new": testNew, + "metadata": testMetadata, + "writeFile": testWriteFile, + "writeFileSafe": testWriteFileSafe, + "writeFileReader": testWriteFileReader, + "delete": testDelete, + "files": testFiles, + "replace": testReplace, + "list": testList, + "listGlob": testListGlob, + "deleteAll": testDeleteAll, + "data": testData, + "statDir": testStatDir, + "mkdirAll": testMkdirAll, + "rename": testRename, + "renameOverwrite": testRenameOverwrite, + "copy": testCopy, + "symlink": testSymlink, + "stat": testStat, + "copyOverwrite": testCopyOverwrite, + "symlinkErrors": testSymlinkErrors, + "symlinkOpenStat": testSymlinkOpenStat, + "open": testOpen, + } + + for fsname, fs := range filesystems { + for name, test := range tests { + t.Run(fsname+"-"+name, func(t *testing.T) { + if fsname == "s3fs" && minio == nil { + t.Skip("minio server not available") + } + filesystem, err := fs(name) + require.NoError(t, err) + test(t, filesystem) + }) + } + } + + os.RemoveAll("./testing/") + + if minio != nil { + stopMinio(t, minio) + } + + os.RemoveAll(miniopath) +} + +func testNew(t *testing.T, fs Filesystem) { + cur, max := fs.Size() + + require.Equal(t, int64(0), cur, "current size") + require.Equal(t, int64(-1), max, "max size") + + cur = fs.Files() + + require.Equal(t, int64(0), cur, "number of files") +} + +func testMetadata(t *testing.T, fs Filesystem) { + fs.SetMetadata("foo", "bar") + require.Equal(t, "bar", fs.Metadata("foo")) +} + +func testWriteFile(t *testing.T, fs Filesystem) { + size, created, err := fs.WriteFile("/foobar", []byte("xxxxx")) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testWriteFileSafe(t *testing.T, fs Filesystem) { + size, created, err := fs.WriteFileSafe("/foobar", []byte("xxxxx")) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testWriteFileReader(t *testing.T, fs Filesystem) { + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testOpen(t *testing.T, fs Filesystem) { + file := fs.Open("/foobar") + require.Nil(t, file) + + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + require.NoError(t, err) + + file = fs.Open("/foobar") + require.NotNil(t, file) + require.Equal(t, "/foobar", file.Name()) + + stat, err := file.Stat() + require.NoError(t, err) + require.Equal(t, "/foobar", stat.Name()) + require.Equal(t, int64(5), stat.Size()) + require.Equal(t, false, stat.IsDir()) +} + +func testDelete(t *testing.T, fs Filesystem) { + size := fs.Remove("/foobar") + + require.Equal(t, int64(-1), size) + + data := strings.NewReader("xxxxx") + + fs.WriteFileReader("/foobar", data) + + size = fs.Remove("/foobar") + + require.Equal(t, int64(5), size) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func testFiles(t *testing.T, fs Filesystem) { + require.Equal(t, int64(0), fs.Files()) + + fs.WriteFileReader("/foobar.txt", strings.NewReader("bar")) + + require.Equal(t, int64(1), fs.Files()) + + fs.MkdirAll("/path/to/foo", 0777) + + require.Equal(t, int64(1), fs.Files()) + + fs.Remove("/foobar.txt") + + require.Equal(t, int64(0), fs.Files()) +} + +func testReplace(t *testing.T, fs Filesystem) { + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testList(t *testing.T, fs Filesystem) { + fs.WriteFileReader("/foobar1", strings.NewReader("a")) + fs.WriteFileReader("/foobar2", strings.NewReader("bb")) + fs.WriteFileReader("/foobar3", strings.NewReader("ccc")) + fs.WriteFileReader("/foobar4", strings.NewReader("dddd")) + fs.WriteFileReader("/path/foobar3", strings.NewReader("ccc")) + fs.WriteFileReader("/path/to/foobar4", strings.NewReader("dddd")) + + cur, max := fs.Size() + + require.Equal(t, int64(17), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(6), cur) + + getNames := func(files []FileInfo) []string { + names := []string{} + for _, f := range files { + names = append(names, f.Name()) + } + return names + } + + files := fs.List("/", "") + + require.Equal(t, 6, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar2", "/foobar3", "/foobar4", "/path/foobar3", "/path/to/foobar4"}, getNames(files)) + + files = fs.List("/path", "") + + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar3", "/path/to/foobar4"}, getNames(files)) +} + +func testListGlob(t *testing.T, fs Filesystem) { + fs.WriteFileReader("/foobar1", strings.NewReader("a")) + fs.WriteFileReader("/path/foobar2", strings.NewReader("a")) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("a")) + fs.WriteFileReader("/foobar4", strings.NewReader("a")) + + cur := fs.Files() + + require.Equal(t, int64(4), cur) + + getNames := func(files []FileInfo) []string { + names := []string{} + for _, f := range files { + names = append(names, f.Name()) + } + return names + } + + files := getNames(fs.List("/", "/foo*")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files) + + files = getNames(fs.List("/", "/*bar?")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files) + + files = getNames(fs.List("/", "/path/*")) + require.Equal(t, 1, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2"}, files) + + files = getNames(fs.List("/", "/path/**")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files) + + files = getNames(fs.List("/path", "/**")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files) +} + +func testDeleteAll(t *testing.T, fs Filesystem) { + if _, ok := fs.(*diskFilesystem); ok { + return + } + + fs.WriteFileReader("/foobar1", strings.NewReader("abc")) + fs.WriteFileReader("/path/foobar2", strings.NewReader("abc")) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc")) + fs.WriteFileReader("/foobar4", strings.NewReader("abc")) + + cur := fs.Files() + + require.Equal(t, int64(4), cur) + + size := fs.RemoveAll() + require.Equal(t, int64(12), size) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func testData(t *testing.T, fs Filesystem) { + file := fs.Open("/foobar") + require.Nil(t, file) + + _, err := fs.ReadFile("/foobar") + require.Error(t, err) + + data := "gduwotoxqb" + + data1 := strings.NewReader(data) + + _, _, err = fs.WriteFileReader("/foobar", data1) + require.NoError(t, err) + + file = fs.Open("/foobar") + require.NotNil(t, file) + + data2 := make([]byte, len(data)+1) + n, err := file.Read(data2) + if err != nil { + if err != io.EOF { + require.NoError(t, err) + } + } + + require.Equal(t, len(data), n) + require.Equal(t, []byte(data), data2[:n]) + + data3, err := fs.ReadFile("/foobar") + + require.NoError(t, err) + require.Equal(t, []byte(data), data3) +} + +func testStatDir(t *testing.T, fs Filesystem) { + info, err := fs.Stat("/") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, true, info.IsDir()) + + data := strings.NewReader("gduwotoxqb") + fs.WriteFileReader("/these/are/some/directories/foobar", data) + + info, err = fs.Stat("/foobar") + require.Error(t, err) + require.Nil(t, info) + + info, err = fs.Stat("/these/are/some/directories/foobar") + require.NoError(t, err) + require.Equal(t, "/these/are/some/directories/foobar", info.Name()) + require.Equal(t, int64(10), info.Size()) + require.Equal(t, false, info.IsDir()) + + info, err = fs.Stat("/these") + require.NoError(t, err) + require.Equal(t, "/these", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/") + require.NoError(t, err) + require.Equal(t, "/these/are", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/some") + require.NoError(t, err) + require.Equal(t, "/these/are/some", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/some/directories") + require.NoError(t, err) + require.Equal(t, "/these/are/some/directories", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) +} + +func testMkdirAll(t *testing.T, fs Filesystem) { + info, err := fs.Stat("/foo/bar/dir") + require.Error(t, err) + require.Nil(t, info) + + err = fs.MkdirAll("/foo/bar/dir", 0755) + require.NoError(t, err) + + err = fs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + info, err = fs.Stat("/foo/bar/dir") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/foo") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/foo/bar") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + err = fs.MkdirAll("/foobar", 0755) + require.Error(t, err) +} + +func testRename(t *testing.T, fs Filesystem) { + err := fs.Rename("/foobar", "/foobaz") + require.Error(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + err = fs.Rename("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) +} + +func testRenameOverwrite(t *testing.T, fs Filesystem) { + _, err := fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + err = fs.Rename("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + data, err := fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlink(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + err := fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.NoError(t, err) + + file := fs.Open("/foobaz") + require.NotNil(t, file) + require.Equal(t, "/foobaz", file.Name()) + + data := make([]byte, 10) + n, err := file.Read(data) + if err != nil { + if err != io.EOF { + require.NoError(t, err) + } + } + require.NoError(t, err) + require.Equal(t, 6, n) + require.Equal(t, "foobar", string(data[:n])) + + stat, err := fs.Stat("/foobaz") + require.NoError(t, err) + require.Equal(t, "/foobaz", stat.Name()) + require.Equal(t, int64(6), stat.Size()) + require.NotEqual(t, 0, int(stat.Mode()&os.ModeSymlink)) + + link, ok := stat.IsLink() + require.Equal(t, "/foobar", link) + require.Equal(t, true, ok) + + data, err = fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlinkOpenStat(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.NoError(t, err) + + file := fs.Open("/foobaz") + require.NotNil(t, file) + require.Equal(t, "/foobaz", file.Name()) + + fstat, err := file.Stat() + require.NoError(t, err) + + stat, err := fs.Stat("/foobaz") + require.NoError(t, err) + + require.Equal(t, "/foobaz", fstat.Name()) + require.Equal(t, fstat.Name(), stat.Name()) + + require.Equal(t, int64(6), fstat.Size()) + require.Equal(t, fstat.Size(), stat.Size()) + + require.NotEqual(t, 0, int(fstat.Mode()&os.ModeSymlink)) + require.Equal(t, fstat.Mode(), stat.Mode()) +} + +func testStat(t *testing.T, fs Filesystem) { + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + file := fs.Open("/foobar") + require.NotNil(t, file) + + stat1, err := fs.Stat("/foobar") + require.NoError(t, err) + + stat2, err := file.Stat() + require.NoError(t, err) + + require.Equal(t, stat1, stat2) +} + +func testCopy(t *testing.T, fs Filesystem) { + err := fs.Rename("/foobar", "/foobaz") + require.Error(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + err = fs.Copy("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) +} + +func testCopyOverwrite(t *testing.T, fs Filesystem) { + _, err := fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + err = fs.Copy("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + data, err := fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlinkErrors(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + err := fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + err = fs.Symlink("/foobar", "/bazfoo") + require.NoError(t, err) + + err = fs.Symlink("/bazfoo", "/barfoo") + require.Error(t, err) +} diff --git a/io/fs/mem.go b/io/fs/mem.go index d682d0a3..a75eb932 100644 --- a/io/fs/mem.go +++ b/io/fs/mem.go @@ -4,7 +4,11 @@ import ( "bytes" "fmt" "io" + "io/fs" + "os" + "path/filepath" "sort" + "strings" "sync" "time" @@ -15,25 +19,15 @@ import ( // MemConfig is the config that is required for creating // a new memory filesystem. type MemConfig struct { - // Base is the base path to be reported for this filesystem - Base string - - // Size is the capacity of the filesystem in bytes - Size int64 - - // Set true to automatically delete the oldest files until there's - // enough space to store a new file - Purge bool - - // For logging, optional - Logger log.Logger + Logger log.Logger // For logging, optional } type memFileInfo struct { - name string - size int64 - lastMod time.Time - linkTo string + name string // Full name of the file (including path) + size int64 // The size of the file in bytes + dir bool // Whether this file represents a directory + lastMod time.Time // The time of the last modification of the file + linkTo string // Where the file links to, empty if it's not a link } func (f *memFileInfo) Name() string { @@ -44,6 +38,20 @@ func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() fs.FileMode { + mode := fs.FileMode(fs.ModePerm) + + if f.dir { + mode |= fs.ModeDir + } + + if len(f.linkTo) != 0 { + mode |= fs.ModeSymlink + } + + return mode +} + func (f *memFileInfo) ModTime() time.Time { return f.lastMod } @@ -53,24 +61,12 @@ func (f *memFileInfo) IsLink() (string, bool) { } func (f *memFileInfo) IsDir() bool { - return false + return f.dir } type memFile struct { - // Name of the file - name string - - // Size of the file in bytes - size int64 - - // Last modification of the file as a UNIX timestamp - lastMod time.Time - - // Contents of the file - data *bytes.Buffer - - // Link to another file - linkTo string + memFileInfo + data *bytes.Buffer // Contents of the file } func (f *memFile) Name() string { @@ -81,6 +77,7 @@ func (f *memFile) Stat() (FileInfo, error) { info := &memFileInfo{ name: f.name, size: f.size, + dir: f.dir, lastMod: f.lastMod, linkTo: f.linkTo, } @@ -107,7 +104,8 @@ func (f *memFile) Close() error { } type memFilesystem struct { - base string + metadata map[string]string + metaLock sync.RWMutex // Mapping of path to file files map[string]*memFile @@ -118,34 +116,27 @@ type memFilesystem struct { // Pool for the storage of the contents of files dataPool sync.Pool - // Max. size of the filesystem in bytes as - // given by the config - maxSize int64 - // Current size of the filesystem in bytes currentSize int64 - // Purge setting from the config - purge bool - // Logger from the config logger log.Logger } // NewMemFilesystem creates a new filesystem in memory that implements // the Filesystem interface. -func NewMemFilesystem(config MemConfig) Filesystem { +func NewMemFilesystem(config MemConfig) (Filesystem, error) { fs := &memFilesystem{ - base: config.Base, - maxSize: config.Size, - purge: config.Purge, - logger: config.Logger, + metadata: make(map[string]string), + logger: config.Logger, } if fs.logger == nil { - fs.logger = log.New("MemFS") + fs.logger = log.New("") } + fs.logger = fs.logger.WithField("type", "mem") + fs.files = make(map[string]*memFile) fs.dataPool = sync.Pool{ @@ -154,61 +145,105 @@ func NewMemFilesystem(config MemConfig) Filesystem { }, } - fs.logger.WithFields(log.Fields{ - "size_bytes": fs.maxSize, - "purge": fs.purge, - }).Debug().Log("Created") + fs.logger.Debug().Log("Created") - return fs + return fs, nil } -func (fs *memFilesystem) Base() string { - return fs.base -} +func NewMemFilesystemFromDir(dir string, config MemConfig) (Filesystem, error) { + mem, err := NewMemFilesystem(config) + if err != nil { + return nil, err + } -func (fs *memFilesystem) Rebase(base string) error { - fs.base = base + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } - return nil + if info.IsDir() { + return nil + } + + mode := info.Mode() + if !mode.IsRegular() { + return nil + } + + if mode&os.ModeSymlink != 0 { + return nil + } + + file, err := os.Open(path) + if err != nil { + return nil + } + + defer file.Close() + + _, _, err = mem.WriteFileReader(path, file) + if err != nil { + return fmt.Errorf("can't copy %s", path) + } + + return nil + }) + if err != nil { + return nil, err + } + + return mem, nil } -func (fs *memFilesystem) Size() (int64, int64) { - fs.filesLock.RLock() - defer fs.filesLock.RUnlock() +func (fs *memFilesystem) Name() string { + return "mem" +} - return fs.currentSize, fs.maxSize +func (fs *memFilesystem) Type() string { + return "mem" } -func (fs *memFilesystem) Resize(size int64) { - fs.filesLock.Lock() - defer fs.filesLock.Unlock() +func (fs *memFilesystem) Metadata(key string) string { + fs.metaLock.RLock() + defer fs.metaLock.RUnlock() - diffSize := fs.maxSize - size + return fs.metadata[key] +} - if diffSize == 0 { - return - } +func (fs *memFilesystem) SetMetadata(key, data string) { + fs.metaLock.Lock() + defer fs.metaLock.Unlock() - if diffSize > 0 { - fs.free(diffSize) - } + fs.metadata[key] = data +} - fs.logger.WithFields(log.Fields{ - "from_bytes": fs.maxSize, - "to_bytes": size, - }).Debug().Log("Resizing") +func (fs *memFilesystem) Size() (int64, int64) { + fs.filesLock.RLock() + defer fs.filesLock.RUnlock() - fs.maxSize = size + return fs.currentSize, -1 } func (fs *memFilesystem) Files() int64 { fs.filesLock.RLock() defer fs.filesLock.RUnlock() - return int64(len(fs.files)) + nfiles := int64(0) + + for _, f := range fs.files { + if f.dir { + continue + } + + nfiles++ + } + + return nfiles } func (fs *memFilesystem) Open(path string) File { + path = fs.cleanPath(path) + fs.filesLock.RLock() file, ok := fs.files[path] fs.filesLock.RUnlock() @@ -218,29 +253,68 @@ func (fs *memFilesystem) Open(path string) File { } newFile := &memFile{ - name: file.name, - size: file.size, - lastMod: file.lastMod, - linkTo: file.linkTo, + memFileInfo: memFileInfo{ + name: file.name, + size: file.size, + lastMod: file.lastMod, + linkTo: file.linkTo, + }, + } + + if len(file.linkTo) != 0 { + file, ok = fs.files[file.linkTo] + if !ok { + return nil + } } if file.data != nil { + newFile.lastMod = file.lastMod newFile.data = bytes.NewBuffer(file.data.Bytes()) + newFile.size = int64(newFile.data.Len()) } return newFile } +func (fs *memFilesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + + fs.filesLock.RLock() + file, ok := fs.files[path] + fs.filesLock.RUnlock() + + if !ok { + return nil, os.ErrNotExist + } + + if len(file.linkTo) != 0 { + file, ok = fs.files[file.linkTo] + if !ok { + return nil, os.ErrNotExist + } + } + + if file.data != nil { + return file.data.Bytes(), nil + } + + return nil, nil +} + func (fs *memFilesystem) Symlink(oldname, newname string) error { + oldname = fs.cleanPath(oldname) + newname = fs.cleanPath(newname) + fs.filesLock.Lock() defer fs.filesLock.Unlock() - if _, ok := fs.files[newname]; ok { - return fmt.Errorf("%s already exist", newname) + if _, ok := fs.files[oldname]; !ok { + return os.ErrNotExist } - if oldname[0] != '/' { - oldname = "/" + oldname + if _, ok := fs.files[newname]; ok { + return os.ErrExist } if file, ok := fs.files[oldname]; ok { @@ -250,11 +324,14 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { } newFile := &memFile{ - name: newname, - size: 0, - lastMod: time.Now(), - data: nil, - linkTo: oldname, + memFileInfo: memFileInfo{ + name: newname, + dir: false, + size: 0, + lastMod: time.Now(), + linkTo: oldname, + }, + data: nil, } fs.files[newname] = newFile @@ -262,18 +339,21 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { return nil } -func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { +func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) + newFile := &memFile{ - name: path, - size: 0, - lastMod: time.Now(), - data: nil, + memFileInfo: memFileInfo{ + name: path, + dir: false, + size: 0, + lastMod: time.Now(), + }, + data: fs.dataPool.Get().(*bytes.Buffer), } - data := fs.dataPool.Get().(*bytes.Buffer) - data.Reset() - - size, err := data.ReadFrom(r) + newFile.data.Reset() + size, err := newFile.data.ReadFrom(r) if err != nil { fs.logger.WithFields(log.Fields{ "path": path, @@ -281,55 +361,26 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { "error": err, }).Warn().Log("Incomplete file") } - newFile.size = size - newFile.data = data - // reject if the new file is larger than the available space - if fs.maxSize > 0 && newFile.size > fs.maxSize { - fs.dataPool.Put(data) - return -1, false, fmt.Errorf("File is too big") - } + newFile.size = size fs.filesLock.Lock() defer fs.filesLock.Unlock() - // calculate the new size of the filesystem - newSize := fs.currentSize + newFile.size - file, replace := fs.files[path] if replace { - newSize -= file.size - } - - if fs.maxSize > 0 { - if newSize > fs.maxSize { - if !fs.purge { - fs.dataPool.Put(data) - return -1, false, fmt.Errorf("not enough space on device") - } - - if replace { - delete(fs.files, path) - fs.currentSize -= file.size - - fs.dataPool.Put(file.data) - file.data = nil - } + delete(fs.files, path) - newSize -= fs.free(fs.currentSize + newFile.size - fs.maxSize) - } - } else { - if replace { - delete(fs.files, path) + fs.currentSize -= file.size - fs.dataPool.Put(file.data) - file.data = nil - } + fs.dataPool.Put(file.data) + file.data = nil } - fs.currentSize = newSize fs.files[path] = newFile + fs.currentSize += newFile.size + logger := fs.logger.WithFields(log.Fields{ "path": newFile.name, "filesize_bytes": newFile.size, @@ -345,7 +396,18 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { return newFile.size, !replace, nil } -func (fs *memFilesystem) free(size int64) int64 { +func (fs *memFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *memFilesystem) Purge(size int64) int64 { + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + files := []*memFile{} for _, f := range fs.files { @@ -383,7 +445,190 @@ func (fs *memFilesystem) free(size int64) int64 { return freed } -func (fs *memFilesystem) Delete(path string) int64 { +func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error { + path = fs.cleanPath(path) + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + info, err := fs.stat(path) + if err == nil { + if info.IsDir() { + return nil + } + + return os.ErrExist + } + + f := &memFile{ + memFileInfo: memFileInfo{ + name: path, + size: 0, + dir: true, + lastMod: time.Now(), + }, + data: nil, + } + + fs.files[path] = f + + return nil +} + +func (fs *memFilesystem) Rename(src, dst string) error { + src = filepath.Join("/", filepath.Clean(src)) + dst = filepath.Join("/", filepath.Clean(dst)) + + if src == dst { + return nil + } + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + srcFile, ok := fs.files[src] + if !ok { + return os.ErrNotExist + } + + dstFile, ok := fs.files[dst] + if ok { + fs.currentSize -= dstFile.size + + fs.dataPool.Put(dstFile.data) + dstFile.data = nil + } + + fs.files[dst] = srcFile + delete(fs.files, src) + + return nil +} + +func (fs *memFilesystem) Copy(src, dst string) error { + src = filepath.Join("/", filepath.Clean(src)) + dst = filepath.Join("/", filepath.Clean(dst)) + + if src == dst { + return nil + } + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + srcFile, ok := fs.files[src] + if !ok { + return os.ErrNotExist + } + + if srcFile.dir { + return os.ErrNotExist + } + + if fs.isDir(dst) { + return os.ErrInvalid + } + + dstFile, ok := fs.files[dst] + if ok { + fs.currentSize -= dstFile.size + } else { + dstFile = &memFile{ + memFileInfo: memFileInfo{ + name: dst, + dir: false, + size: srcFile.size, + lastMod: time.Now(), + }, + data: fs.dataPool.Get().(*bytes.Buffer), + } + } + + dstFile.data.Reset() + dstFile.data.Write(srcFile.data.Bytes()) + + fs.currentSize += dstFile.size + + fs.files[dst] = dstFile + + return nil +} + +func (fs *memFilesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + fs.filesLock.RLock() + defer fs.filesLock.RUnlock() + + return fs.stat(path) +} + +func (fs *memFilesystem) stat(path string) (FileInfo, error) { + file, ok := fs.files[path] + if ok { + f := &memFileInfo{ + name: file.name, + size: file.size, + dir: file.dir, + lastMod: file.lastMod, + linkTo: file.linkTo, + } + + if len(f.linkTo) != 0 { + file, ok := fs.files[f.linkTo] + if !ok { + return nil, os.ErrNotExist + } + + f.lastMod = file.lastMod + f.size = file.size + } + + return f, nil + } + + // Check for directories + if !fs.isDir(path) { + return nil, os.ErrNotExist + } + + f := &memFileInfo{ + name: path, + size: 0, + dir: true, + lastMod: time.Now(), + linkTo: "", + } + + return f, nil +} + +func (fs *memFilesystem) isDir(path string) bool { + file, ok := fs.files[path] + if ok { + return file.dir + } + + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + if path == "/" { + return true + } + + for k := range fs.files { + if strings.HasPrefix(k, path) { + return true + } + } + + return false +} + +func (fs *memFilesystem) Remove(path string) int64 { + path = fs.cleanPath(path) + fs.filesLock.Lock() defer fs.filesLock.Unlock() @@ -407,7 +652,7 @@ func (fs *memFilesystem) Delete(path string) int64 { return file.size } -func (fs *memFilesystem) DeleteAll() int64 { +func (fs *memFilesystem) RemoveAll() int64 { fs.filesLock.Lock() defer fs.filesLock.Unlock() @@ -419,19 +664,28 @@ func (fs *memFilesystem) DeleteAll() int64 { return size } -func (fs *memFilesystem) List(pattern string) []FileInfo { +func (fs *memFilesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) files := []FileInfo{} fs.filesLock.RLock() defer fs.filesLock.RUnlock() for _, file := range fs.files { + if !strings.HasPrefix(file.name, path) { + continue + } + if len(pattern) != 0 { if ok, _ := glob.Match(pattern, file.name, '/'); !ok { continue } } + if file.dir { + continue + } + files = append(files, &memFileInfo{ name: file.name, size: file.size, @@ -442,3 +696,44 @@ func (fs *memFilesystem) List(pattern string) []FileInfo { return files } + +func (fs *memFilesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + info, err := fs.Stat(file) + if err == nil { + if !info.Mode().IsRegular() { + return file, os.ErrNotExist + } + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if info, err := fs.Stat(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + if !info.Mode().IsRegular() { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *memFilesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + + return filepath.Join("/", filepath.Clean(path)) +} diff --git a/io/fs/mem_test.go b/io/fs/mem_test.go index 64794c10..d28a0d92 100644 --- a/io/fs/mem_test.go +++ b/io/fs/mem_test.go @@ -1,406 +1,30 @@ package fs import ( - "strings" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestNew(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - cur, max := mem.Size() - - assert.Equal(t, int64(0), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(0), cur) -} - -func TestSimplePutNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestSimpleDelete(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - size := mem.Delete("/foobar") - - assert.Equal(t, int64(-1), size) - - data := strings.NewReader("xxxxx") - - mem.Store("/foobar", data) - - size = mem.Delete("/foobar") - - assert.Equal(t, int64(5), size) - - cur, max := mem.Size() - - assert.Equal(t, int64(0), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(0), cur) -} - -func TestReplaceNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) - - data = strings.NewReader("yyy") - - size, created, err = mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(3), size) - assert.Equal(t, false, created) - - cur, max = mem.Size() - - assert.Equal(t, int64(3), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestReplacePurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxx") - data2 := strings.NewReader("yyy") - data3 := strings.NewReader("zzz") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - - cur, max := mem.Size() - - assert.Equal(t, int64(9), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(3), cur) - - data4 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar1", data4) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(8), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestReplaceUnlimited(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 0, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(0), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) - - data = strings.NewReader("yyy") - - size, created, err = mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(3), size) - assert.Equal(t, false, created) - - cur, max = mem.Size() - - assert.Equal(t, int64(3), cur) - assert.Equal(t, int64(0), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestTooBigNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxxyyyyyz") - - size, _, _ := mem.Store("/foobar", data) - - assert.Equal(t, int64(-1), size) -} - -func TestTooBigPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - data := strings.NewReader("xxxxxyyyyyz") - - size, _, _ := mem.Store("/foobar", data) - - assert.Equal(t, int64(-1), size) -} - -func TestFullSpaceNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) - - data3 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar3", data3) - - assert.Equal(t, int64(-1), size) -} - -func TestFullSpacePurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) - - data3 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar3", data3) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestFullSpacePurgeMulti(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxx") - data2 := strings.NewReader("yyy") - data3 := strings.NewReader("zzz") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - - cur, max := mem.Size() - - assert.Equal(t, int64(9), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(3), cur) - - data4 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar4", data4) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(8), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestPurgeOrder(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - data3 := strings.NewReader("zzzzz") - - mem.Store("/foobar1", data1) - time.Sleep(1 * time.Second) - mem.Store("/foobar2", data2) - time.Sleep(1 * time.Second) - mem.Store("/foobar3", data3) - - file := mem.Open("/foobar1") - - assert.Nil(t, file) -} - -func TestList(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data1 := strings.NewReader("a") - data2 := strings.NewReader("bb") - data3 := strings.NewReader("ccc") - data4 := strings.NewReader("dddd") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - mem.Store("/foobar4", data4) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(4), cur) - - files := mem.List("") - - assert.Equal(t, 4, len(files)) -} - -func TestData(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := "gduwotoxqb" - - data1 := strings.NewReader(data) - - mem.Store("/foobar", data1) - - file := mem.Open("/foobar") - - data2 := make([]byte, len(data)+1) - n, _ := file.Read(data2) - - assert.Equal(t, len(data), n) - assert.Equal(t, []byte(data), data2[:n]) +func TestMemFromDir(t *testing.T) { + mem, err := NewMemFilesystemFromDir(".", MemConfig{}) + require.NoError(t, err) + + names := []string{} + for _, f := range mem.List("/", "/*.go") { + names = append(names, f.Name()) + } + + require.ElementsMatch(t, []string{ + "/disk.go", + "/fs_test.go", + "/fs.go", + "/mem_test.go", + "/mem.go", + "/readonly_test.go", + "/readonly.go", + "/s3.go", + "/sized_test.go", + "/sized.go", + }, names) } diff --git a/io/fs/readonly.go b/io/fs/readonly.go new file mode 100644 index 00000000..889672a4 --- /dev/null +++ b/io/fs/readonly.go @@ -0,0 +1,54 @@ +package fs + +import ( + "io" + "os" +) + +type readOnlyFilesystem struct { + Filesystem +} + +func NewReadOnlyFilesystem(fs Filesystem) (Filesystem, error) { + r := &readOnlyFilesystem{ + Filesystem: fs, + } + + return r, nil +} + +func (r *readOnlyFilesystem) Symlink(oldname, newname string) error { + return os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) MkdirAll(path string, perm os.FileMode) error { + return os.ErrPermission +} + +func (r *readOnlyFilesystem) Remove(path string) int64 { + return -1 +} + +func (r *readOnlyFilesystem) RemoveAll() int64 { + return 0 +} + +func (r *readOnlyFilesystem) Purge(size int64) int64 { + return 0 +} + +func (r *readOnlyFilesystem) Resize(size int64) error { + return os.ErrPermission +} diff --git a/io/fs/readonly_test.go b/io/fs/readonly_test.go new file mode 100644 index 00000000..13360b47 --- /dev/null +++ b/io/fs/readonly_test.go @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReadOnly(t *testing.T) { + mem, err := NewMemFilesystemFromDir(".", MemConfig{}) + require.NoError(t, err) + + ro, err := NewReadOnlyFilesystem(mem) + require.NoError(t, err) + + err = ro.Symlink("/readonly.go", "/foobar.go") + require.Error(t, err) + + _, _, err = ro.WriteFile("/readonly.go", []byte("foobar")) + require.Error(t, err) + + _, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar")) + require.Error(t, err) + + _, _, err = ro.WriteFileSafe("/readonly.go", []byte("foobar")) + require.Error(t, err) + + err = ro.MkdirAll("/foobar/baz", 0700) + require.Error(t, err) + + res := ro.Remove("/readonly.go") + require.Equal(t, int64(-1), res) + + res = ro.RemoveAll() + require.Equal(t, int64(0), res) + + rop, ok := ro.(PurgeFilesystem) + require.True(t, ok, "must implement PurgeFilesystem") + + size, _ := ro.Size() + res = rop.Purge(size) + require.Equal(t, int64(0), res) + + ros, ok := ro.(SizedFilesystem) + require.True(t, ok, "must implement SizedFilesystem") + + err = ros.Resize(100) + require.Error(t, err) +} diff --git a/io/fs/s3.go b/io/fs/s3.go new file mode 100644 index 00000000..22c66d05 --- /dev/null +++ b/io/fs/s3.go @@ -0,0 +1,649 @@ +package fs + +import ( + "bytes" + "context" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/log" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +type S3Config struct { + // Namee is the name of the filesystem + Name string + Endpoint string + AccessKeyID string + SecretAccessKey string + Region string + Bucket string + UseSSL bool + + Logger log.Logger +} + +type s3Filesystem struct { + metadata map[string]string + metaLock sync.RWMutex + + name string + + endpoint string + accessKeyID string + secretAccessKey string + region string + bucket string + useSSL bool + + client *minio.Client + + logger log.Logger +} + +var fakeDirEntry = "..." + +func NewS3Filesystem(config S3Config) (Filesystem, error) { + fs := &s3Filesystem{ + metadata: make(map[string]string), + name: config.Name, + endpoint: config.Endpoint, + accessKeyID: config.AccessKeyID, + secretAccessKey: config.SecretAccessKey, + region: config.Region, + bucket: config.Bucket, + useSSL: config.UseSSL, + logger: config.Logger, + } + + if fs.logger == nil { + fs.logger = log.New("") + } + + client, err := minio.New(fs.endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""), + Region: fs.region, + Secure: fs.useSSL, + }) + + if err != nil { + return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err) + } + + fs.logger = fs.logger.WithFields(log.Fields{ + "name": fs.name, + "type": "s3", + "bucket": fs.bucket, + "region": fs.region, + "endpoint": fs.endpoint, + }) + + fs.logger.Debug().Log("Connected") + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) + defer cancel() + + exists, err := client.BucketExists(ctx, fs.bucket) + if err != nil { + fs.logger.WithError(err).Log("Can't access bucket") + return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err) + } + + if exists { + fs.logger.Debug().Log("Bucket already exists") + } else { + fs.logger.Debug().Log("Bucket doesn't exists") + err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region}) + if err != nil { + fs.logger.WithError(err).Log("Can't create bucket") + return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err) + } else { + fs.logger.Debug().Log("Bucket created") + } + } + + fs.client = client + + return fs, nil +} + +func (fs *s3Filesystem) Name() string { + return fs.name +} + +func (fs *s3Filesystem) Type() string { + return "s3" +} + +func (fs *s3Filesystem) Metadata(key string) string { + fs.metaLock.RLock() + defer fs.metaLock.RUnlock() + + return fs.metadata[key] +} + +func (fs *s3Filesystem) SetMetadata(key, data string) { + fs.metaLock.Lock() + defer fs.metaLock.Unlock() + + fs.metadata[key] = data +} + +func (fs *s3Filesystem) Size() (int64, int64) { + size := int64(0) + + files := fs.List("/", "") + + for _, file := range files { + size += file.Size() + } + + return size, -1 +} + +func (fs *s3Filesystem) Files() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: "", + Recursive: true, + MaxKeys: 0, + StartAfter: "", + UseV1: false, + }) + + nfiles := int64(0) + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + } + + if strings.HasSuffix("/"+object.Key, "/"+fakeDirEntry) { + // Skip fake entries (see MkdirAll) + continue + } + + nfiles++ + } + + return nfiles +} + +func (fs *s3Filesystem) Symlink(oldname, newname string) error { + return fmt.Errorf("not implemented") +} + +func (fs *s3Filesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + if len(path) == 0 { + return &s3FileInfo{ + name: "/", + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + ctx := context.Background() + + object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{}) + if err != nil { + if fs.isDir(path) { + return &s3FileInfo{ + name: "/" + path, + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + fs.logger.Debug().WithField("key", path).WithError(err).Log("Not found") + return nil, err + } + + defer object.Close() + + stat, err := object.Stat() + if err != nil { + if fs.isDir(path) { + return &s3FileInfo{ + name: "/" + path, + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + fs.logger.Debug().WithField("key", path).WithError(err).Log("Stat failed") + return nil, err + } + + return &s3FileInfo{ + name: "/" + stat.Key, + size: stat.Size, + lastModified: stat.LastModified, + }, nil +} + +func (fs *s3Filesystem) Open(path string) File { + path = fs.cleanPath(path) + ctx := context.Background() + + object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return nil + } + + stat, err := object.Stat() + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Stat failed") + return nil + } + + file := &s3File{ + data: object, + name: "/" + stat.Key, + size: stat.Size, + lastModified: stat.LastModified, + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Opened") + + return file +} + +func (fs *s3Filesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + file := fs.Open(path) + if file == nil { + return nil, os.ErrNotExist + } + + defer file.Close() + + buf := &bytes.Buffer{} + + _, err := buf.ReadFrom(file) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (fs *s3Filesystem) write(path string, r io.Reader) (int64, bool, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + overwrite := false + + _, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err == nil { + overwrite = true + } + + info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{ + UserMetadata: map[string]string{}, + UserTags: map[string]string{}, + Progress: nil, + ContentType: "", + ContentEncoding: "", + ContentDisposition: "", + ContentLanguage: "", + CacheControl: "", + Mode: "", + RetainUntilDate: time.Time{}, + ServerSideEncryption: nil, + NumThreads: 0, + StorageClass: "", + WebsiteRedirectLocation: "", + PartSize: 0, + LegalHold: "", + SendContentMd5: false, + DisableContentSha256: false, + DisableMultipart: false, + Internal: minio.AdvancedPutOptions{}, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", path).Log("Failed to store file") + return -1, false, err + } + + fs.logger.Debug().WithFields(log.Fields{ + "key": path, + "overwrite": overwrite, + }).Log("Stored") + + return info.Size, !overwrite, nil +} + +func (fs *s3Filesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) + return fs.write(path, r) +} + +func (fs *s3Filesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *s3Filesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *s3Filesystem) Rename(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + err := fs.Copy(src, dst) + if err != nil { + return err + } + + res := fs.Remove(src) + if res == -1 { + return fmt.Errorf("failed to remove source file: %s", src) + } + + return nil +} + +func (fs *s3Filesystem) Copy(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := fs.client.CopyObject(ctx, minio.CopyDestOptions{ + Bucket: fs.bucket, + Object: dst, + }, minio.CopySrcOptions{ + Bucket: fs.bucket, + Object: src, + }) + + return err +} + +func (fs *s3Filesystem) MkdirAll(path string, perm os.FileMode) error { + if path == "/" { + return nil + } + + info, err := fs.Stat(path) + if err == nil { + if !info.IsDir() { + return os.ErrExist + } + + return nil + } + + path = filepath.Join(path, fakeDirEntry) + + _, _, err = fs.write(path, strings.NewReader("")) + if err != nil { + return fmt.Errorf("can't create directory") + } + + return nil +} + +func (fs *s3Filesystem) Remove(path string) int64 { + path = fs.cleanPath(path) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return -1 + } + + err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{ + GovernanceBypass: true, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file") + return -1 + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Deleted") + + return stat.Size +} + +func (fs *s3Filesystem) RemoveAll() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + totalSize := int64(0) + + objectsCh := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + + for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + Recursive: true, + }) { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + totalSize += object.Size + objectsCh <- object + } + }() + + for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{ + GovernanceBypass: true, + }) { + fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed") + } + + fs.logger.Debug().Log("Deleted all files") + + return totalSize +} + +func (fs *s3Filesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: path, + Recursive: true, + MaxKeys: 0, + StartAfter: "", + UseV1: false, + }) + + files := []FileInfo{} + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + + key := "/" + object.Key + if strings.HasSuffix(key, "/"+fakeDirEntry) { + // filter out fake directory entries (see MkdirAll) + continue + } + + if len(pattern) != 0 { + if ok, _ := glob.Match(pattern, key, '/'); !ok { + continue + } + } + + f := &s3FileInfo{ + name: key, + size: object.Size, + lastModified: object.LastModified, + } + + files = append(files, f) + } + + return files +} + +func (fs *s3Filesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + info, err := fs.Stat(file) + if err == nil { + if !info.Mode().IsRegular() { + return file, os.ErrNotExist + } + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if info, err := fs.Stat(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + if !info.Mode().IsRegular() { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *s3Filesystem) isDir(path string) bool { + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + if path == "/" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: path, + Recursive: true, + MaxKeys: 1, + StartAfter: "", + UseV1: false, + }) + + files := uint64(0) + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + + files++ + } + + return files > 0 +} + +func (fs *s3Filesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + + path = strings.TrimSuffix(path, "/"+fakeDirEntry) + + return filepath.Join("/", filepath.Clean(path))[1:] +} + +type s3FileInfo struct { + name string + size int64 + dir bool + lastModified time.Time +} + +func (f *s3FileInfo) Name() string { + return f.name +} + +func (f *s3FileInfo) Size() int64 { + return f.size +} + +func (f *s3FileInfo) Mode() os.FileMode { + return fs.FileMode(fs.ModePerm) +} + +func (f *s3FileInfo) ModTime() time.Time { + return f.lastModified +} + +func (f *s3FileInfo) IsLink() (string, bool) { + return "", false +} + +func (f *s3FileInfo) IsDir() bool { + return f.dir +} + +type s3File struct { + data io.ReadCloser + name string + size int64 + lastModified time.Time +} + +func (f *s3File) Read(p []byte) (int, error) { + return f.data.Read(p) +} + +func (f *s3File) Close() error { + return f.data.Close() +} + +func (f *s3File) Name() string { + return f.name +} + +func (f *s3File) Stat() (FileInfo, error) { + return &s3FileInfo{ + name: f.name, + size: f.size, + lastModified: f.lastModified, + }, nil +} diff --git a/io/fs/sized.go b/io/fs/sized.go new file mode 100644 index 00000000..366ef6f5 --- /dev/null +++ b/io/fs/sized.go @@ -0,0 +1,168 @@ +package fs + +import ( + "bytes" + "fmt" + "io" +) + +type SizedFilesystem interface { + Filesystem + + // Resize resizes the filesystem to the new size. Files may need to be deleted. + Resize(size int64) error +} + +type PurgeFilesystem interface { + // Purge will free up at least size number of bytes and returns the actual + // freed space in bytes. + Purge(size int64) int64 +} + +type sizedFilesystem struct { + Filesystem + + // Size is the capacity of the filesystem in bytes + maxSize int64 + + // Set true to automatically delete the oldest files until there's + // enough space to store a new file + purge bool +} + +var _ PurgeFilesystem = &sizedFilesystem{} + +func NewSizedFilesystem(fs Filesystem, maxSize int64, purge bool) (SizedFilesystem, error) { + r := &sizedFilesystem{ + Filesystem: fs, + maxSize: maxSize, + purge: purge, + } + + return r, nil +} + +func (r *sizedFilesystem) Size() (int64, int64) { + currentSize, _ := r.Filesystem.Size() + + return currentSize, r.maxSize +} + +func (r *sizedFilesystem) Resize(size int64) error { + currentSize, _ := r.Size() + if size >= currentSize { + // If the new size is the same or larger than the current size, + // nothing to do. + r.maxSize = size + return nil + } + + // If the new size is less than the current size, purge some files. + r.Purge(currentSize - size) + + r.maxSize = size + + return nil +} + +func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { + currentSize, maxSize := r.Size() + if maxSize <= 0 { + return r.Filesystem.WriteFileReader(path, rd) + } + + data := bytes.Buffer{} + size, err := data.ReadFrom(rd) + if err != nil { + return -1, false, err + } + + // reject if the new file is larger than the available space + if size > maxSize { + return -1, false, fmt.Errorf("File is too big") + } + + // Calculate the new size of the filesystem + newSize := currentSize + size + + // If the the new size is larger than the allowed size, we have to free + // some space. + if newSize > maxSize { + if !r.purge { + return -1, false, fmt.Errorf("not enough space on device") + } + + if r.Purge(size) < size { + return -1, false, fmt.Errorf("not enough space on device") + } + } + + return r.Filesystem.WriteFileReader(path, &data) +} + +func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return r.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + currentSize, maxSize := r.Size() + if maxSize <= 0 { + return r.Filesystem.WriteFile(path, data) + } + + size := int64(len(data)) + + // reject if the new file is larger than the available space + if size > maxSize { + return -1, false, fmt.Errorf("File is too big") + } + + // Calculate the new size of the filesystem + newSize := currentSize + size + + // If the the new size is larger than the allowed size, we have to free + // some space. + if newSize > maxSize { + if !r.purge { + return -1, false, fmt.Errorf("not enough space on device") + } + + if r.Purge(size) < size { + return -1, false, fmt.Errorf("not enough space on device") + } + } + + return r.Filesystem.WriteFileSafe(path, data) +} + +func (r *sizedFilesystem) Purge(size int64) int64 { + if purger, ok := r.Filesystem.(PurgeFilesystem); ok { + return purger.Purge(size) + } + + return 0 + /* + files := r.Filesystem.List("/", "") + + sort.Slice(files, func(i, j int) bool { + return files[i].ModTime().Before(files[j].ModTime()) + }) + + var freed int64 = 0 + + for _, f := range files { + r.Filesystem.Remove(f.Name()) + size -= f.Size() + freed += f.Size() + r.currentSize -= f.Size() + + if size <= 0 { + break + } + } + + files = nil + + return freed + */ +} diff --git a/io/fs/sized_test.go b/io/fs/sized_test.go new file mode 100644 index 00000000..e158c422 --- /dev/null +++ b/io/fs/sized_test.go @@ -0,0 +1,350 @@ +package fs + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func newMemFS() Filesystem { + mem, _ := NewMemFilesystem(MemConfig{}) + + return mem +} + +func TestNewSized(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func TestSizedResize(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + err := fs.Resize(20) + require.NoError(t, err) + + cur, max = fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(20), max) +} + +func TestSizedResizePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx")) + + cur, max = fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + err := fs.Resize(5) + require.NoError(t, err) + + cur, max = fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(5), max) +} + +func TestSizedWrite(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + require.NoError(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max = fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(10), max) + + _, _, err = fs.WriteFile("/foobaz", []byte("xxxxxx")) + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx")) + require.Error(t, err) + + _, _, err = fs.WriteFileSafe("/foobaz", []byte("xxxxxx")) + require.Error(t, err) +} + +func TestSizedReplaceNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func TestSizedReplacePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxx") + data2 := strings.NewReader("yyy") + data3 := strings.NewReader("zzz") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar3", data3) + + cur, max := fs.Size() + + require.Equal(t, int64(9), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(3), cur) + + data4 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar1", data4) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(8), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedReplaceUnlimited(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), -1, false) + + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func TestSizedTooBigNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data := strings.NewReader("xxxxxyyyyyz") + + size, _, err := fs.WriteFileReader("/foobar", data) + require.Error(t, err) + require.Equal(t, int64(-1), size) +} + +func TestSizedTooBigPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + data := strings.NewReader("xxxxxyyyyyz") + + size, _, err := fs.WriteFileReader("/foobar", data) + require.Error(t, err) + require.Equal(t, int64(-1), size) + + require.Equal(t, int64(2), fs.Files()) +} + +func TestSizedFullSpaceNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + cur, max := fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) + + data3 := strings.NewReader("zzzzz") + + size, _, err := fs.WriteFileReader("/foobar3", data3) + require.Error(t, err) + require.Equal(t, int64(-1), size) +} + +func TestSizedFullSpacePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + cur, max := fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) + + data3 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar3", data3) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedFullSpacePurgeMulti(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxx") + data2 := strings.NewReader("yyy") + data3 := strings.NewReader("zzz") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar3", data3) + + cur, max := fs.Size() + + require.Equal(t, int64(9), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(3), cur) + + data4 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar4", data4) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(8), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedPurgeOrder(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + data3 := strings.NewReader("zzzzz") + + fs.WriteFileReader("/foobar1", data1) + time.Sleep(1 * time.Second) + fs.WriteFileReader("/foobar2", data2) + time.Sleep(1 * time.Second) + fs.WriteFileReader("/foobar3", data3) + + file := fs.Open("/foobar1") + + require.Nil(t, file) +} diff --git a/log/log.go b/log/log.go index be226028..14a78e2c 100644 --- a/log/log.go +++ b/log/log.go @@ -103,7 +103,6 @@ type Logger interface { type logger struct { output Writer component string - topics map[string]struct{} } // New returns an implementation of the Logger interface. @@ -121,14 +120,6 @@ func (l *logger) clone() *logger { component: l.component, } - if len(l.topics) != 0 { - clone.topics = make(map[string]struct{}) - - for topic := range l.topics { - clone.topics[topic] = struct{}{} - } - } - return clone } diff --git a/log/log_test.go b/log/log_test.go index 1a04a1f0..3ed0910c 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -5,15 +5,15 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoglevelNames(t *testing.T) { - assert.Equal(t, "DEBUG", Ldebug.String()) - assert.Equal(t, "ERROR", Lerror.String()) - assert.Equal(t, "WARN", Lwarn.String()) - assert.Equal(t, "INFO", Linfo.String()) - assert.Equal(t, `SILENT`, Lsilent.String()) + require.Equal(t, "DEBUG", Ldebug.String()) + require.Equal(t, "ERROR", Lerror.String()) + require.Equal(t, "WARN", Lwarn.String()) + require.Equal(t, "INFO", Linfo.String()) + require.Equal(t, `SILENT`, Lsilent.String()) } func TestLogColorToNotTTY(t *testing.T) { @@ -23,7 +23,7 @@ func TestLogColorToNotTTY(t *testing.T) { w := NewConsoleWriter(writer, Linfo, true).(*syncWriter) formatter := w.writer.(*consoleWriter).formatter.(*consoleFormatter) - assert.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") + require.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") } func TestLogContext(t *testing.T) { @@ -53,7 +53,7 @@ func TestLogContext(t *testing.T) { lenWithoutCtx := buffer.Len() buffer.Reset() - assert.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") + require.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") } func TestLogClone(t *testing.T) { @@ -65,7 +65,7 @@ func TestLogClone(t *testing.T) { logger.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="test"`) + require.Contains(t, buffer.String(), `component="test"`) buffer.Reset() @@ -74,7 +74,7 @@ func TestLogClone(t *testing.T) { logger2.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="tset"`) + require.Contains(t, buffer.String(), `component="tset"`) } func TestLogSilent(t *testing.T) { @@ -85,22 +85,22 @@ func TestLogSilent(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() } @@ -112,22 +112,22 @@ func TestLogDebug(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -139,22 +139,22 @@ func TestLogInfo(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -166,22 +166,22 @@ func TestLogWarn(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -193,21 +193,43 @@ func TestLogError(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } + +func TestLogWithField(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + logger := New("test").WithOutput(bufwriter) + logger = logger.WithField("foo", "bar") + logger.Info().Log("hello") + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Empty(t, events[0].err) + require.Equal(t, "bar", events[0].Data["foo"]) + + logger = logger.WithField("func", func() bool { return true }) + logger.Info().Log("hello") + + events = bufwriter.Events() + require.Equal(t, 2, len(events)) + require.NotEmpty(t, events[1].err) + require.Equal(t, "bar", events[0].Data["foo"]) +} diff --git a/log/writer_test.go b/log/writer_test.go new file mode 100644 index 00000000..7951cf29 --- /dev/null +++ b/log/writer_test.go @@ -0,0 +1,181 @@ +package log + +import ( + "bytes" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestJSONWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewJSONWriter(&buffer, Linfo) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `{"Time":"2009-11-10T23:00:00Z","Level":"INFO","Component":"test","Caller":"me","Message":"hello world","Data":{"caller":"me","component":"test","foo":"bar","message":"hello world","ts":"2009-11-10T23:00:00Z"}}`, buffer.String()) +} + +func TestConsoleWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewConsoleWriter(&buffer, Linfo, false) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `ts=2009-11-10T23:00:00Z level=INFO component="test" msg="hello world" foo="bar"`+"\n", buffer.String()) +} + +func TestTopicWriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + writer1 := NewTopicWriter(bufwriter, []string{}) + writer2 := NewTopicWriter(bufwriter, []string{"foobar"}) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter.Events())) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 3, len(bufwriter.Events())) +} + +func TestMultiwriter(t *testing.T) { + bufwriter1 := NewBufferWriter(Linfo, 10) + bufwriter2 := NewBufferWriter(Linfo, 10) + + writer := NewMultiWriter(bufwriter1, bufwriter2) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter1.Events())) + require.Equal(t, 1, len(bufwriter2.Events())) +} + +func TestLevelRewriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + rule := LevelRewriteRule{ + Level: Lwarn, + Component: "foobar", + Match: map[string]string{ + "foo": "bar", + }, + } + + writer := NewLevelRewriter(bufwriter, []LevelRewriteRule{rule}) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Equal(t, Lwarn, events[0].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"bar": "foo"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 2, len(events)) + require.Equal(t, Linfo, events[1].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 3, len(events)) + require.Equal(t, Linfo, events[2].Level) +} diff --git a/main.go b/main.go index 4606f67d..377af7e5 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,7 @@ import ( "os/signal" "github.com/datarhei/core/v16/app/api" + "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/log" _ "github.com/joho/godotenv/autoload" @@ -13,7 +14,9 @@ import ( func main() { logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true)) - app, err := api.New(os.Getenv("CORE_CONFIGFILE"), os.Stderr) + configfile := store.Location(os.Getenv("CORE_CONFIGFILE")) + + app, err := api.New(configfile, os.Stderr) if err != nil { logger.Error().WithError(err).Log("Failed to create new API") os.Exit(1) diff --git a/monitor/metric/metric.go b/monitor/metric/metric.go index a327c6d0..f2e88e42 100644 --- a/monitor/metric/metric.go +++ b/monitor/metric/metric.go @@ -12,7 +12,7 @@ type Pattern interface { Name() string // Match returns whether a map of labels with its label values - // match this pattern. + // match this pattern. All labels have to be present and need to match. Match(labels map[string]string) bool // IsValid returns whether the pattern is valid. @@ -26,7 +26,7 @@ type pattern struct { } // NewPattern creates a new pattern with the given prefix and group name. There -// has to be an even number of parameter, which is ("label", "labelvalue", "label", +// has to be an even number of labels, which is ("label", "labelvalue", "label", // "labelvalue" ...). The label value will be interpreted as regular expression. func NewPattern(name string, labels ...string) Pattern { p := &pattern{ @@ -38,7 +38,6 @@ func NewPattern(name string, labels ...string) Pattern { for i := 0; i < len(labels); i += 2 { exp, err := regexp.Compile(labels[i+1]) if err != nil { - fmt.Printf("error: %s\n", err) continue } @@ -84,19 +83,35 @@ func (p *pattern) IsValid() bool { return p.valid } +// Metrics is a collection of values type Metrics interface { + // Value returns the first value that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Value(name string, labels ...string) Value + + // Values returns all values that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Values(name string, labels ...string) []Value + + // Labels return a list of all values for a label. Labels(name string, label string) []string + + // All returns all values currently stored in the collection. All() []Value + + // Add adds a value to the collection. Add(v Value) + + // String return a string representation of all collected values. String() string } +// metrics is an implementation of the Metrics interface. type metrics struct { values []Value } +// NewMetrics returns a new metrics instance. func NewMetrics() *metrics { return &metrics{} } @@ -231,8 +246,15 @@ func (v *value) Hash() string { func (v *value) String() string { s := fmt.Sprintf("%s: %f {", v.name, v.value) - for k, v := range v.labels { - s += k + "=" + v + " " + keys := []string{} + for k := range v.labels { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + s += k + "=" + v.labels[k] + " " } s += "}" diff --git a/monitor/metric/metric_test.go b/monitor/metric/metric_test.go index 743739a7..615ce7cb 100644 --- a/monitor/metric/metric_test.go +++ b/monitor/metric/metric_test.go @@ -2,25 +2,154 @@ package metric import ( "testing" + + "github.com/stretchr/testify/require" ) +func TestPattern(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) + + p = NewPattern("bla", "label1", "value1", "label2", "valu(e2") + require.Equal(t, false, p.IsValid()) + + p = NewPattern("bla") + require.Equal(t, true, p.IsValid()) + require.Equal(t, "bla", p.Name()) + + p = NewPattern("bla", "label1", "value1", "label2", "value2") + require.Equal(t, true, p.IsValid()) +} + +func TestPatternMatch(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + + p0 := NewPattern("bla") + require.Equal(t, true, p0.IsValid()) + require.Equal(t, true, p0.Match(map[string]string{})) + require.Equal(t, true, p0.Match(map[string]string{"labelX": "foobar"})) + + p = NewPattern("bla", "label1", "value.", "label2", "val?ue2") + require.Equal(t, true, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{})) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value1", "label2": "value2"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value5", "label2": "vaue2"})) +} + func TestValue(t *testing.T) { - d := NewDesc("group", "", []string{"name"}) + d := NewDesc("group", "", []string{"label1", "label2"}) v := NewValue(d, 42, "foobar") + require.Nil(t, v) + + v = NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v) + require.Equal(t, float64(42), v.Val()) + + require.Equal(t, "", v.L("labelX")) + require.Equal(t, "foobar", v.L("label1")) + require.Equal(t, "foobaz", v.L("label2")) + require.Equal(t, "group", v.Name()) + require.Equal(t, "group:label1=foobar label2=foobaz ", v.Hash()) + require.Equal(t, "group: 42.000000 {label1=foobar label2=foobaz }", v.String()) - if v.L("name") != "foobar" { - t.Fatalf("label name doesn't have the expected value") - } + require.Equal(t, map[string]string{"label1": "foobar", "label2": "foobaz"}, v.Labels()) +} + +func TestValuePattern(t *testing.T) { + d := NewDesc("group", "", []string{"label1", "label2"}) + v := NewValue(d, 42, "foobar", "foobaz") p1 := NewPattern("group") + p2 := NewPattern("group", "label1", "foobar") + p3 := NewPattern("group", "label2", "foobaz") + p4 := NewPattern("group", "label2", "foobaz", "label1", "foobar") + + require.Equal(t, true, v.Match(nil)) + require.Equal(t, true, v.Match([]Pattern{p1})) + require.Equal(t, true, v.Match([]Pattern{p2})) + require.Equal(t, true, v.Match([]Pattern{p3})) + require.Equal(t, true, v.Match([]Pattern{p4})) + require.Equal(t, true, v.Match([]Pattern{p1, p2, p3, p4})) + + p5 := NewPattern("group", "label1", "foobaz") + + require.Equal(t, false, v.Match([]Pattern{p5})) + + require.Equal(t, true, v.Match([]Pattern{p4, p5})) + require.Equal(t, true, v.Match([]Pattern{p5, p4})) +} + +func TestDescription(t *testing.T) { + d := NewDesc("name", "blabla", []string{"label"}) + + require.Equal(t, "name", d.Name()) + require.Equal(t, "blabla", d.Description()) + require.ElementsMatch(t, []string{"label"}, d.Labels()) + require.Equal(t, "name: blabla (label)", d.String()) +} + +func TestMetri(t *testing.T) { + m := NewMetrics() + + require.Equal(t, "", m.String()) + require.Equal(t, 0, len(m.All())) + + d := NewDesc("group", "", []string{"label1", "label2"}) + v1 := NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v1) + + m.Add(v1) + + require.Equal(t, v1.String(), m.String()) + require.Equal(t, 1, len(m.All())) + + l := m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz"}, l) + + v2 := NewValue(d, 77, "barfoo", "bazfoo") + + m.Add(v2) + + require.Equal(t, v1.String()+v2.String(), m.String()) + require.Equal(t, 2, len(m.All())) + + l = m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz", "bazfoo"}, l) + + v := m.Value("bla", "label1", "foo*") + + require.Equal(t, nullValue, v) + + v = m.Value("group") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label1", "foo*") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label2", "baz") + + require.NotEqual(t, nullValue, v) + + vs := m.Values("group") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label1", "foo*") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label2", "*baz*") - if v.Match([]Pattern{p1}) == false { - t.Fatalf("pattern p1 should have matched") - } + require.NotEqual(t, 2, len(vs)) - p2 := NewPattern("group", "name", "foobar") + vs = m.Values("group", "label1") - if v.Match([]Pattern{p2}) == false { - t.Fatalf("pattern p2 should have matched") - } + require.Equal(t, 0, len(vs)) } diff --git a/net/ip_test.go b/net/ip_test.go index eaca6bc3..bd9bd575 100644 --- a/net/ip_test.go +++ b/net/ip_test.go @@ -3,18 +3,27 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAnonymizeIPString(t *testing.T) { + _, err := AnonymizeIPString("127.987.475.21") + require.Error(t, err) + + _, err = AnonymizeIPString("bbd1:xxxx") + require.Error(t, err) + + _, err = AnonymizeIPString("hello-world") + require.Error(t, err) + ipv4 := "192.168.1.42" ipv6 := "bbd1:e95a:adbb:b29a:e38b:577f:6f9a:1fa7" anonymizedIPv4, err := AnonymizeIPString(ipv4) - assert.Nil(t, err) - assert.Equal(t, "192.168.1.0", anonymizedIPv4) + require.NoError(t, err) + require.Equal(t, "192.168.1.0", anonymizedIPv4) anonymizedIPv6, err := AnonymizeIPString(ipv6) - assert.Nil(t, err) - assert.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) + require.NoError(t, err) + require.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) } diff --git a/net/iplimit_test.go b/net/iplimit_test.go index 7016cc12..3e6a2d1c 100644 --- a/net/iplimit_test.go +++ b/net/iplimit_test.go @@ -3,57 +3,63 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIPLimiterNew(t *testing.T) { var err error _, err = NewIPLimiter([]string{}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{"::1/128", "127.0.0.1/32", ""}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{}, []string{"::1/128", "127.0.0.1/32", ""}) - assert.Nil(t, err) + require.Nil(t, err) } func TestIPLimiterError(t *testing.T) { var err error _, err = NewIPLimiter([]string{}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{"::1"}, []string{}) - assert.NotNil(t, err, "Should not accept invalid IP") + require.NotNil(t, err, "Should not accept invalid IP") _, err = NewIPLimiter([]string{}, []string{"::1"}) - assert.NotNil(t, err, "Should not accept invalid IP") + require.NotNil(t, err, "Should not accept invalid IP") } func TestIPLimiterInvalidIPs(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{}) - assert.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed") + require.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed") } func TestIPLimiterNoIPs(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{}) - assert.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed") + require.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed") } func TestIPLimiterAllowlist(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{"::1/128"}) - assert.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed") - assert.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed") + require.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed") + require.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed") } func TestIPLimiterBlocklist(t *testing.T) { limiter, _ := NewIPLimiter([]string{"::1/128"}, []string{}) - assert.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed") - assert.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed") + require.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed") + require.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed") +} + +func TestNullIPLimiter(t *testing.T) { + limiter := NewNullIPLimiter() + + require.True(t, limiter.IsAllowed("foobar")) } diff --git a/net/port_test.go b/net/port_test.go index 019afcf0..dec2d5b9 100644 --- a/net/port_test.go +++ b/net/port_test.go @@ -3,19 +3,30 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewPortrange(t *testing.T) { _, err := NewPortrange(1000, 1999) - assert.Nil(t, err, "Valid port range not accepted: %s", err) + require.Nil(t, err, "Valid port range not accepted: %s", err) } func TestInvalidPortrange(t *testing.T) { _, err := NewPortrange(1999, 1000) - assert.NotNil(t, err, "Invalid port range accepted") + require.NotNil(t, err, "Invalid port range accepted") +} + +func TestOutOfRangePortrange(t *testing.T) { + p, err := NewPortrange(-1, 70000) + + require.NoError(t, err) + + portrange := p.(*portrange) + + require.Equal(t, 1, portrange.min) + require.Equal(t, 65535, len(portrange.ports)) } func TestGetPort(t *testing.T) { @@ -23,26 +34,26 @@ func TestGetPort(t *testing.T) { port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestGetPutPort(t *testing.T) { portrange, _ := NewPortrange(1000, 1999) port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1001, port) + require.Nil(t, err) + require.Equal(t, 1001, port) portrange.Put(1000) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestPortUnavailable(t *testing.T) { @@ -50,12 +61,12 @@ func TestPortUnavailable(t *testing.T) { for i := 0; i < 1000; i++ { port, _ := portrange.Get() - assert.Equal(t, 1000+i, port, "at index %d", i) + require.Equal(t, 1000+i, port, "at index %d", i) } port, err := portrange.Get() - assert.NotNil(t, err) - assert.Less(t, port, 0) + require.NotNil(t, err) + require.Less(t, port, 0) } func TestPutPort(t *testing.T) { @@ -73,16 +84,27 @@ func TestClampRange(t *testing.T) { port, _ := portrange.Get() - assert.Equal(t, 65000, port) + require.Equal(t, 65000, port) portrange.Put(65000) for i := 65000; i <= 65535; i++ { port, _ := portrange.Get() - assert.Equal(t, i, port, "at index %d", i) + require.Equal(t, i, port, "at index %d", i) } port, _ = portrange.Get() - assert.Less(t, port, 0) + require.Less(t, port, 0) +} + +func TestDummyPortranger(t *testing.T) { + portrange := NewDummyPortrange() + + port, err := portrange.Get() + + require.Error(t, err) + require.Equal(t, 0, port) + + portrange.Put(42) } diff --git a/net/url/url_test.go b/net/url/url_test.go index 977a5123..460663e7 100644 --- a/net/url/url_test.go +++ b/net/url/url_test.go @@ -7,9 +7,20 @@ import ( ) func TestLookup(t *testing.T) { - _, err := Lookup("https://www.google.com") + ip, err := Lookup("/localhost:8080/foobar") require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("http://") + + require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("https://www.google.com") + + require.NoError(t, err) + require.NotEmpty(t, ip) } func TestLocalhost(t *testing.T) { @@ -18,3 +29,22 @@ func TestLocalhost(t *testing.T) { require.NoError(t, err) require.Subset(t, []string{"127.0.0.1", "::1"}, []string{ip}) } + +func TestValidate(t *testing.T) { + err := Validate("http://localhost/foobar") + require.NoError(t, err) + + err = Validate("foobar") + require.NoError(t, err) +} + +func TestScheme(t *testing.T) { + r := HasScheme("http://localhost/foobar") + require.True(t, r) + + r = HasScheme("iueriherfd://localhost/foobar") + require.True(t, r) + + r = HasScheme("//localhost/foobar") + require.False(t, r) +} diff --git a/process/process.go b/process/process.go index 3c927e2b..4bfcb4b4 100644 --- a/process/process.go +++ b/process/process.go @@ -192,6 +192,7 @@ type process struct { onStart func() onExit func() onStateChange func(from, to string) + lock sync.Mutex } limits Limiter } @@ -588,6 +589,7 @@ func (p *process) stop(wait bool) error { if wait { wg.Add(1) + p.callbacks.lock.Lock() if p.callbacks.onExit == nil { p.callbacks.onExit = func() { wg.Done() @@ -601,6 +603,7 @@ func (p *process) stop(wait bool) error { p.callbacks.onExit = cb } } + p.callbacks.lock.Unlock() } var err error @@ -829,10 +832,12 @@ func (p *process) waiter() { // Reset the parser stats p.parser.ResetStats() - // Call the onStop callback + // Call the onExit callback + p.callbacks.lock.Lock() if p.callbacks.onExit != nil { go p.callbacks.onExit() } + p.callbacks.lock.Unlock() p.order.lock.Lock() defer p.order.lock.Unlock() diff --git a/psutil/process.go b/psutil/process.go index 187485d4..1b1ab3bd 100644 --- a/psutil/process.go +++ b/psutil/process.go @@ -98,7 +98,7 @@ func (p *process) cpuTimes() (*cpuTimesStat, error) { } s := &cpuTimesStat{ - total: times.Total(), + total: cpuTotal(times), system: times.System, user: times.User, } diff --git a/psutil/psutil.go b/psutil/psutil.go index dc75d0b9..b2a33a4f 100644 --- a/psutil/psutil.go +++ b/psutil/psutil.go @@ -285,7 +285,7 @@ func (u *util) cpuTimes() (*cpuTimesStat, error) { } s := &cpuTimesStat{ - total: times[0].Total(), + total: cpuTotal(×[0]), system: times[0].System, user: times[0].User, idle: times[0].Idle, @@ -496,3 +496,8 @@ func (u *util) readFile(path string) ([]string, error) { return lines, nil } + +func cpuTotal(c *cpu.TimesStat) float64 { + return c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + + c.Softirq + c.Steal + c.Guest + c.GuestNice +} diff --git a/restream/app/process.go b/restream/app/process.go index 1d62220b..4ec6036a 100644 --- a/restream/app/process.go +++ b/restream/app/process.go @@ -2,7 +2,6 @@ package app import ( "github.com/datarhei/core/v16/process" - "github.com/datarhei/core/v16/restream/replace" ) type ConfigIOCleanup struct { @@ -80,79 +79,6 @@ func (config *Config) Clone() *Config { return clone } -// ReplacePlaceholders replaces all placeholders in the config. The config -// will be modified in place. -func (config *Config) ResolvePlaceholders(r replace.Replacer) { - for i, option := range config.Options { - // Replace any known placeholders - option = r.Replace(option, "diskfs", "") - - config.Options[i] = option - } - - // Resolving the given inputs - for i, input := range config.Input { - // Replace any known placeholders - input.ID = r.Replace(input.ID, "processid", config.ID) - input.ID = r.Replace(input.ID, "reference", config.Reference) - input.Address = r.Replace(input.Address, "inputid", input.ID) - input.Address = r.Replace(input.Address, "processid", config.ID) - input.Address = r.Replace(input.Address, "reference", config.Reference) - input.Address = r.Replace(input.Address, "diskfs", "") - input.Address = r.Replace(input.Address, "memfs", "") - input.Address = r.Replace(input.Address, "rtmp", "") - input.Address = r.Replace(input.Address, "srt", "") - - for j, option := range input.Options { - // Replace any known placeholders - option = r.Replace(option, "inputid", input.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - input.Options[j] = option - } - - config.Input[i] = input - } - - // Resolving the given outputs - for i, output := range config.Output { - // Replace any known placeholders - output.ID = r.Replace(output.ID, "processid", config.ID) - output.Address = r.Replace(output.Address, "outputid", output.ID) - output.Address = r.Replace(output.Address, "processid", config.ID) - output.Address = r.Replace(output.Address, "reference", config.Reference) - output.Address = r.Replace(output.Address, "diskfs", "") - output.Address = r.Replace(output.Address, "memfs", "") - output.Address = r.Replace(output.Address, "rtmp", "") - output.Address = r.Replace(output.Address, "srt", "") - - for j, option := range output.Options { - // Replace any known placeholders - option = r.Replace(option, "outputid", output.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - output.Options[j] = option - } - - for j, cleanup := range output.Cleanup { - // Replace any known placeholders - cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference) - - output.Cleanup[j] = cleanup - } - - config.Output[i] = output - } -} - // CreateCommand created the FFmpeg command from this config. func (config *Config) CreateCommand() []string { var command []string diff --git a/restream/fs/fs.go b/restream/fs/fs.go index 29216aa9..0e676da9 100644 --- a/restream/fs/fs.go +++ b/restream/fs/fs.go @@ -62,6 +62,11 @@ func New(config Config) Filesystem { rfs.logger = log.New("") } + rfs.logger = rfs.logger.WithFields(log.Fields{ + "name": config.FS.Name(), + "type": config.FS.Type(), + }) + rfs.cleanupPatterns = make(map[string][]Pattern) // already drain the stop @@ -130,7 +135,7 @@ func (rfs *filesystem) cleanup() { for _, patterns := range rfs.cleanupPatterns { for _, pattern := range patterns { - filesAndDirs := rfs.Filesystem.List(pattern.Pattern) + filesAndDirs := rfs.Filesystem.List("/", pattern.Pattern) files := []fs.FileInfo{} for _, f := range filesAndDirs { @@ -146,7 +151,7 @@ func (rfs *filesystem) cleanup() { if pattern.MaxFiles > 0 && uint(len(files)) > pattern.MaxFiles { for i := uint(0); i < uint(len(files))-pattern.MaxFiles; i++ { rfs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded") - rfs.Filesystem.Delete(files[i].Name()) + rfs.Filesystem.Remove(files[i].Name()) } } @@ -156,7 +161,7 @@ func (rfs *filesystem) cleanup() { for _, f := range files { if f.ModTime().Before(bestBefore) { rfs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded") - rfs.Filesystem.Delete(f.Name()) + rfs.Filesystem.Remove(f.Name()) } } } @@ -170,11 +175,11 @@ func (rfs *filesystem) purge(patterns []Pattern) (nfiles uint64) { continue } - files := rfs.Filesystem.List(pattern.Pattern) + files := rfs.Filesystem.List("/", pattern.Pattern) sort.Slice(files, func(i, j int) bool { return len(files[i].Name()) > len(files[j].Name()) }) for _, f := range files { rfs.logger.Debug().WithField("path", f.Name()).Log("Purging file") - rfs.Filesystem.Delete(f.Name()) + rfs.Filesystem.Remove(f.Name()) nfiles++ } } diff --git a/restream/fs/fs_test.go b/restream/fs/fs_test.go index ace6e9fa..0162be1d 100644 --- a/restream/fs/fs_test.go +++ b/restream/fs/fs_test.go @@ -10,11 +10,7 @@ import ( ) func TestMaxFiles(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -30,15 +26,15 @@ func TestMaxFiles(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -47,7 +43,7 @@ func TestMaxFiles(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -60,11 +56,7 @@ func TestMaxFiles(t *testing.T) { } func TestMaxAge(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -80,15 +72,15 @@ func TestMaxAge(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 0 }, 5*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 1 { @@ -97,7 +89,7 @@ func TestMaxAge(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -110,11 +102,7 @@ func TestMaxAge(t *testing.T) { } func TestUnsetCleanup(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -130,15 +118,15 @@ func TestUnsetCleanup(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -147,7 +135,7 @@ func TestUnsetCleanup(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -158,7 +146,7 @@ func TestUnsetCleanup(t *testing.T) { cleanfs.UnsetCleanup("foobar") - cleanfs.Store("/chunk_4.ts", strings.NewReader("chunk_4")) + cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4")) require.Eventually(t, func() bool { if cleanfs.Files() != 4 { @@ -167,7 +155,7 @@ func TestUnsetCleanup(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } diff --git a/restream/replace/replace.go b/restream/replace/replace.go index 47885a38..e9b45adc 100644 --- a/restream/replace/replace.go +++ b/restream/replace/replace.go @@ -4,17 +4,23 @@ import ( "net/url" "regexp" "strings" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/restream/app" ) +type TemplateFn func(config *app.Config, section string) string + type Replacer interface { // RegisterTemplate registers a template for a specific placeholder. Template // may contain placeholders as well of the form {name}. They will be replaced - // by the parameters of the placeholder (see Replace). - RegisterTemplate(placeholder, template string) + // by the parameters of the placeholder (see Replace). If a parameter is not of + // a template is not present, default values can be provided. + RegisterTemplate(placeholder, template string, defaults map[string]string) // RegisterTemplateFunc does the same as RegisterTemplate, but the template // is returned by the template function. - RegisterTemplateFunc(placeholder string, template func() string) + RegisterTemplateFunc(placeholder string, template TemplateFn, defaults map[string]string) // Replace replaces all occurences of placeholder in str with value. The placeholder is of the // form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^ @@ -24,12 +30,18 @@ type Replacer interface { // the value of the corresponding key in the parameters. // If the value is an empty string, the registered templates will be searched for that // placeholder. If no template is found, the placeholder will be replaced by the empty string. - // A placeholder name may consist on of the letters a-z. - Replace(str, placeholder, value string) string + // A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain + // a glob pattern to find the appropriate template. + Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string +} + +type template struct { + fn TemplateFn + defaults map[string]string } type replacer struct { - templates map[string]func() string + templates map[string]template re *regexp.Regexp templateRe *regexp.Regexp @@ -38,41 +50,51 @@ type replacer struct { // New returns a Replacer func New() Replacer { r := &replacer{ - templates: make(map[string]func() string), - re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`), - templateRe: regexp.MustCompile(`{([a-z]+)}`), + templates: make(map[string]template), + re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`), + templateRe: regexp.MustCompile(`{([a-z:]+)}`), } return r } -func (r *replacer) RegisterTemplate(placeholder, template string) { - r.templates[placeholder] = func() string { return template } +func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) { + r.RegisterTemplateFunc(placeholder, func(*app.Config, string) string { return tmpl }, defaults) } -func (r *replacer) RegisterTemplateFunc(placeholder string, template func() string) { - r.templates[placeholder] = template +func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateFn, defaults map[string]string) { + r.templates[placeholder] = template{ + fn: templateFn, + defaults: defaults, + } } -func (r *replacer) Replace(str, placeholder, value string) string { +func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string { str = r.re.ReplaceAllStringFunc(str, func(match string) string { matches := r.re.FindStringSubmatch(match) - if matches[1] != placeholder { + + if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok { return match } + placeholder := matches[1] + // We need a copy from the value v := value + var tmpl template = template{ + fn: func(*app.Config, string) string { return v }, + } // Check for a registered template if len(v) == 0 { - tmplFunc, ok := r.templates[placeholder] + t, ok := r.templates[placeholder] if ok { - v = tmplFunc() + tmpl = t } } - v = r.compileTemplate(v, matches[3]) + v = tmpl.fn(config, section) + v = r.compileTemplate(v, matches[3], vars, tmpl.defaults) if len(matches[2]) != 0 { // If there's a character to escape, we also have to escape the @@ -97,13 +119,18 @@ func (r *replacer) Replace(str, placeholder, value string) string { // placeholder name and will be replaced with the value. The resulting string is "Hello World!". // If a placeholder name is not present in the params string, it will not be replaced. The key // and values can be escaped as in net/url.QueryEscape. -func (r *replacer) compileTemplate(str, params string) string { - if len(params) == 0 { +func (r *replacer) compileTemplate(str, params string, vars map[string]string, defaults map[string]string) string { + if len(params) == 0 && len(defaults) == 0 { return str } p := make(map[string]string) + // Copy the defaults + for key, value := range defaults { + p[key] = value + } + // taken from net/url.ParseQuery for params != "" { var key string @@ -111,15 +138,22 @@ func (r *replacer) compileTemplate(str, params string) string { if key == "" { continue } + key, value, _ := strings.Cut(key, "=") key, err := url.QueryUnescape(key) if err != nil { continue } + value, err = url.QueryUnescape(value) if err != nil { continue } + + for name, v := range vars { + value = strings.ReplaceAll(value, "$"+name, v) + } + p[key] = value } diff --git a/restream/replace/replace_test.go b/restream/replace/replace_test.go index 7474775d..1d9ccfe0 100644 --- a/restream/replace/replace_test.go +++ b/restream/replace/replace_test.go @@ -3,6 +3,7 @@ package replace import ( "testing" + "github.com/datarhei/core/v16/restream/app" "github.com/stretchr/testify/require" ) @@ -24,28 +25,56 @@ func TestReplace(t *testing.T) { r := New() for _, e := range samples { - replaced := r.Replace(e[0], "foobar", foobar) + replaced := r.Replace(e[0], "foobar", foobar, nil, nil, "") require.Equal(t, e[1], replaced, e[0]) } - replaced := r.Replace("{foobar}", "foobar", "") + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") require.Equal(t, "", replaced) } func TestReplaceTemplate(t *testing.T) { r := New() - r.RegisterTemplate("foobar", "Hello {who}! {what}?") + r.RegisterTemplate("foo:bar", "Hello {who}! {what}?", nil) - replaced := r.Replace("{foobar,who=World}", "foobar", "") + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! {what}?", replaced) - replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "") + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc^2?", replaced) - replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "") + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) } +func TestReplaceTemplateFunc(t *testing.T) { + r := New() + r.RegisterTemplateFunc("foo:bar", func(config *app.Config, kind string) string { return "Hello {who}! {what}?" }, nil) + + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! {what}?", replaced) + + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc^2?", replaced) + + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) +} + +func TestReplaceTemplateDefaults(t *testing.T) { + r := New() + r.RegisterTemplate("foobar", "Hello {who}! {what}?", map[string]string{ + "who": "someone", + "what": "something", + }) + + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") + require.Equal(t, "Hello someone! something?", replaced) + + replaced = r.Replace("{foobar,who=World}", "foobar", "", nil, nil, "") + require.Equal(t, "Hello World! something?", replaced) +} + func TestReplaceCompileTemplate(t *testing.T) { samples := [][3]string{ {"Hello {who}!", "who=World", "Hello World!"}, @@ -58,7 +87,58 @@ func TestReplaceCompileTemplate(t *testing.T) { r := New().(*replacer) for _, e := range samples { - replaced := r.compileTemplate(e[0], e[1]) + replaced := r.compileTemplate(e[0], e[1], nil, nil) require.Equal(t, e[2], replaced, e[0]) } } + +func TestReplaceCompileTemplateDefaults(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "", "Hello someone!"}, + {"Hello {who}!", "who=World", "Hello World!"}, + {"Hello {who}! {what}?", "who=World", "Hello World! something?"}, + {"Hello {who}! {what}?", "who=World,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=World,what=", "Hello World! ?"}, + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], nil, map[string]string{ + "who": "someone", + "what": "something", + }) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceCompileTemplateWithVars(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "who=$processid", "Hello 123456789!"}, + {"Hello {who}! {what}?", "who=$location", "Hello World! {what}?"}, + {"Hello {who}! {what}?", "who=$location,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=$location,what=$processid", "Hello World! 123456789?"}, + {"Hello {who}!", "who=$processidxxx", "Hello 123456789xxx!"}, + } + + vars := map[string]string{ + "processid": "123456789", + "location": "World", + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], vars, nil) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceGlob(t *testing.T) { + r := New() + r.RegisterTemplate("foo:bar", "Hello foobar", nil) + r.RegisterTemplate("foo:baz", "Hello foobaz", nil) + + replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "", nil, nil, "") + require.Equal(t, "Hello foobaz, Hello foobar", replaced) +} diff --git a/restream/restream.go b/restream/restream.go index 4c5f0b28..fcf38999 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -30,30 +30,31 @@ import ( // The Restreamer interface type Restreamer interface { - ID() string // ID of this instance - Name() string // Arbitrary name of this instance - CreatedAt() time.Time // Time of when this instance has been created - Start() // Start all processes that have a "start" order - Stop() // Stop all running process but keep their "start" order - AddProcess(config *app.Config) error // Add a new process - GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference - DeleteProcess(id string) error // Delete a process - UpdateProcess(id string, config *app.Config) error // Update a process - StartProcess(id string) error // Start a process - StopProcess(id string) error // Stop a process - RestartProcess(id string) error // Restart a process - ReloadProcess(id string) error // Reload a process - GetProcess(id string) (*app.Process, error) // Get a process - GetProcessState(id string) (*app.State, error) // Get the state of a process - GetProcessLog(id string) (*app.Log, error) // Get the logs of a process - GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process - Probe(id string) app.Probe // Probe a process - Skills() skills.Skills // Get the ffmpeg skills - ReloadSkills() error // Reload the ffmpeg skills - SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process - GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process - SetMetadata(key string, data interface{}) error // Set general metadata - GetMetadata(key string) (interface{}, error) // Get previously set general metadata + ID() string // ID of this instance + Name() string // Arbitrary name of this instance + CreatedAt() time.Time // Time of when this instance has been created + Start() // Start all processes that have a "start" order + Stop() // Stop all running process but keep their "start" order + AddProcess(config *app.Config) error // Add a new process + GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference + DeleteProcess(id string) error // Delete a process + UpdateProcess(id string, config *app.Config) error // Update a process + StartProcess(id string) error // Start a process + StopProcess(id string) error // Stop a process + RestartProcess(id string) error // Restart a process + ReloadProcess(id string) error // Reload a process + GetProcess(id string) (*app.Process, error) // Get a process + GetProcessState(id string) (*app.State, error) // Get the state of a process + GetProcessLog(id string) (*app.Log, error) // Get the logs of a process + GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process + Probe(id string) app.Probe // Probe a process + ProbeWithTimeout(id string, timeout time.Duration) app.Probe // Probe a process with specific timeout + Skills() skills.Skills // Get the ffmpeg skills + ReloadSkills() error // Reload the ffmpeg skills + SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process + GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process + SetMetadata(key string, data interface{}) error // Set general metadata + GetMetadata(key string) (interface{}, error) // Get previously set general metadata } // Config is the required configuration for a new restreamer instance. @@ -61,8 +62,7 @@ type Config struct { ID string Name string Store store.Store - DiskFS fs.Filesystem - MemFS fs.Filesystem + Filesystems []fs.Filesystem Replace replace.Replacer FFmpeg ffmpeg.FFmpeg MaxProcesses int64 @@ -93,8 +93,8 @@ type restream struct { maxProc int64 nProc int64 fs struct { - diskfs rfs.Filesystem - memfs rfs.Filesystem + list []rfs.Filesystem + diskfs []rfs.Filesystem stopObserver context.CancelFunc } replace replace.Replacer @@ -124,29 +124,28 @@ func New(config Config) (Restreamer, error) { } if r.store == nil { - r.store = store.NewDummyStore(store.DummyConfig{}) - } - - if config.DiskFS != nil { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: config.DiskFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"), - }) - } else { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), + dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + s, err := store.NewJSON(store.JSONConfig{ + Filesystem: dummyfs, }) + if err != nil { + return nil, err + } + r.store = s } - if config.MemFS != nil { - r.fs.memfs = rfs.New(rfs.Config{ - FS: config.MemFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"), - }) - } else { - r.fs.memfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), + for _, fs := range config.Filesystems { + fs := rfs.New(rfs.Config{ + FS: fs, + Logger: r.logger.WithComponent("Cleanup"), }) + + r.fs.list = append(r.fs.list, fs) + + // Add the diskfs filesystems also to a separate array. We need it later for input and output validation + if fs.Type() == "disk" { + r.fs.diskfs = append(r.fs.diskfs, fs) + } } if r.replace == nil { @@ -185,12 +184,16 @@ func (r *restream) Start() { r.setCleanup(id, t.config) } - r.fs.diskfs.Start() - r.fs.memfs.Start() - ctx, cancel := context.WithCancel(context.Background()) r.fs.stopObserver = cancel - go r.observe(ctx, 10*time.Second) + + for _, fs := range r.fs.list { + fs.Start() + + if fs.Type() == "disk" { + go r.observe(ctx, fs, 10*time.Second) + } + } r.stopOnce = sync.Once{} }) @@ -214,14 +217,16 @@ func (r *restream) Stop() { r.fs.stopObserver() - r.fs.diskfs.Stop() - r.fs.memfs.Stop() + // Stop the cleanup jobs + for _, fs := range r.fs.list { + fs.Stop() + } r.startOnce = sync.Once{} }) } -func (r *restream) observe(ctx context.Context, interval time.Duration) { +func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() @@ -230,14 +235,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { case <-ctx.Done(): return case <-ticker.C: - size, limit := r.fs.diskfs.Size() + size, limit := fs.Size() isFull := false if limit > 0 && size >= limit { isFull = true } if isFull { - // Stop all tasks that write to disk + // Stop all tasks that write to this filesystem r.lock.Lock() for id, t := range r.tasks { if !t.valid { @@ -252,7 +257,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { continue } - r.logger.Warn().Log("Shutting down because disk is full") + r.logger.Warn().Log("Shutting down because filesystem is full") r.stopProcess(id) } r.lock.Unlock() @@ -290,7 +295,7 @@ func (r *restream) load() error { } // Replace all placeholders in the config - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) tasks[id] = t } @@ -463,7 +468,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) { logger: r.logger.WithField("id", process.ID), } - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -502,34 +507,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) { } func (r *restream) setCleanup(id string, config *app.Config) { + rePrefix := regexp.MustCompile(`^([a-z]+):`) + for _, output := range config.Output { for _, c := range output.Cleanup { - if strings.HasPrefix(c.Pattern, "memfs:") { - r.fs.memfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "memfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, - }) - } else if strings.HasPrefix(c.Pattern, "diskfs:") { - r.fs.diskfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, + matches := rePrefix.FindStringSubmatch(c.Pattern) + if matches == nil { + continue + } + + name := matches[1] + + // Support legacy names + if name == "diskfs" { + name = "disk" + } else if name == "memfs" { + name = "mem" + } + + for _, fs := range r.fs.list { + if fs.Name() != name { + continue + } + + pattern := rfs.Pattern{ + Pattern: rePrefix.ReplaceAllString(c.Pattern, ""), + MaxFiles: c.MaxFiles, + MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, + PurgeOnDelete: c.PurgeOnDelete, + } + + fs.SetCleanup(id, []rfs.Pattern{ + pattern, }) + + break } } } } func (r *restream) unsetCleanup(id string) { - r.fs.diskfs.UnsetCleanup(id) - r.fs.memfs.UnsetCleanup(id) + for _, fs := range r.fs.list { + fs.UnsetCleanup(id) + } } func (r *restream) setPlayoutPorts(t *task) error { @@ -618,9 +639,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID) } - io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + io.Address, err = r.validateInputAddress(io.Address, fs.Metadata("base")) + if err != nil { + maxFails++ + } + } + + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } + } else { + io.Address, err = r.validateInputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } } } @@ -650,15 +685,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID) } - isFile := false + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Metadata("base")) + if err != nil { + maxFails++ + } - io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) - } + if isFile { + hasFiles = true + } + } - if isFile { - hasFiles = true + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + } else { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + + if isFile { + hasFiles = true + } } } @@ -1089,7 +1142,7 @@ func (r *restream) reloadProcess(id string) error { t.config = t.process.Config.Clone() - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -1251,6 +1304,10 @@ func (r *restream) GetProcessLog(id string) (*app.Log, error) { } func (r *restream) Probe(id string) app.Probe { + return r.ProbeWithTimeout(id, 20*time.Second) +} + +func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe { r.lock.RLock() appprobe := app.Probe{} @@ -1288,7 +1345,7 @@ func (r *restream) Probe(id string) app.Probe { ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{ Reconnect: false, ReconnectDelay: 0, - StaleTimeout: 20 * time.Second, + StaleTimeout: timeout, Command: command, Parser: prober, Logger: task.logger, @@ -1437,3 +1494,97 @@ func (r *restream) GetMetadata(key string) (interface{}, error) { return data, nil } + +// resolvePlaceholders replaces all placeholders in the config. The config +// will be modified in place. +func resolvePlaceholders(config *app.Config, r replace.Replacer) { + vars := map[string]string{ + "processid": config.ID, + "reference": config.Reference, + } + + for i, option := range config.Options { + // Replace any known placeholders + option = r.Replace(option, "diskfs", "", vars, config, "global") + option = r.Replace(option, "fs:*", "", vars, config, "global") + + config.Options[i] = option + } + + // Resolving the given inputs + for i, input := range config.Input { + // Replace any known placeholders + input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input") + input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input") + + vars["inputid"] = input.ID + + input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input") + input.Address = r.Replace(input.Address, "diskfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "memfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "fs:*", "", vars, config, "input") + input.Address = r.Replace(input.Address, "rtmp", "", vars, config, "input") + input.Address = r.Replace(input.Address, "srt", "", vars, config, "input") + + for j, option := range input.Options { + // Replace any known placeholders + option = r.Replace(option, "inputid", input.ID, nil, nil, "input") + option = r.Replace(option, "processid", config.ID, nil, nil, "input") + option = r.Replace(option, "reference", config.Reference, nil, nil, "input") + option = r.Replace(option, "diskfs", "", vars, config, "input") + option = r.Replace(option, "memfs", "", vars, config, "input") + option = r.Replace(option, "fs:*", "", vars, config, "input") + + input.Options[j] = option + } + + delete(vars, "inputid") + + config.Input[i] = input + } + + // Resolving the given outputs + for i, output := range config.Output { + // Replace any known placeholders + output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output") + output.ID = r.Replace(output.ID, "reference", config.Reference, nil, nil, "output") + + vars["outputid"] = output.ID + + output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output") + output.Address = r.Replace(output.Address, "diskfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "memfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "fs:*", "", vars, config, "output") + output.Address = r.Replace(output.Address, "rtmp", "", vars, config, "output") + output.Address = r.Replace(output.Address, "srt", "", vars, config, "output") + + for j, option := range output.Options { + // Replace any known placeholders + option = r.Replace(option, "outputid", output.ID, nil, nil, "output") + option = r.Replace(option, "processid", config.ID, nil, nil, "output") + option = r.Replace(option, "reference", config.Reference, nil, nil, "output") + option = r.Replace(option, "diskfs", "", vars, config, "output") + option = r.Replace(option, "memfs", "", vars, config, "output") + option = r.Replace(option, "fs:*", "", vars, config, "output") + + output.Options[j] = option + } + + for j, cleanup := range output.Cleanup { + // Replace any known placeholders + cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference, nil, nil, "output") + + output.Cleanup[j] = cleanup + } + + delete(vars, "outputid") + + config.Output[i] = output + } +} diff --git a/restream/restream_test.go b/restream/restream_test.go index 18c53bf5..11b08240 100644 --- a/restream/restream_test.go +++ b/restream/restream_test.go @@ -9,11 +9,12 @@ import ( "github.com/datarhei/core/v16/internal/testhelper" "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/restream/app" + "github.com/datarhei/core/v16/restream/replace" "github.com/stretchr/testify/require" ) -func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator) (Restreamer, error) { +func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) { binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper") if err != nil { return nil, fmt.Errorf("failed to build helper program: %w", err) @@ -30,7 +31,8 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp } rs, err := New(Config{ - FFmpeg: ffmpeg, + FFmpeg: ffmpeg, + Replace: replacer, }) if err != nil { return nil, err @@ -77,7 +79,7 @@ func getDummyProcess() *app.Config { } func TestAddProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -97,7 +99,7 @@ func TestAddProcess(t *testing.T) { } func TestAutostartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -112,7 +114,7 @@ func TestAutostartProcess(t *testing.T) { } func TestAddInvalidProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) // Invalid process ID @@ -180,7 +182,7 @@ func TestAddInvalidProcess(t *testing.T) { } func TestRemoveProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -195,24 +197,98 @@ func TestRemoveProcess(t *testing.T) { require.NotEqual(t, nil, err, "Unset process found (%s)", process.ID) } +func TestUpdateProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process1 := getDummyProcess() + require.NotNil(t, process1) + process1.ID = "process1" + + process2 := getDummyProcess() + require.NotNil(t, process2) + process2.ID = "process2" + + err = rs.AddProcess(process1) + require.Equal(t, nil, err) + + err = rs.AddProcess(process2) + require.Equal(t, nil, err) + + process3 := getDummyProcess() + require.NotNil(t, process3) + process3.ID = "process2" + + err = rs.UpdateProcess("process1", process3) + require.Error(t, err) + + process3.ID = "process3" + err = rs.UpdateProcess("process1", process3) + require.NoError(t, err) + + _, err = rs.GetProcess(process1.ID) + require.Error(t, err) + + _, err = rs.GetProcess(process3.ID) + require.NoError(t, err) +} + func TestGetProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) - process := getDummyProcess() + process1 := getDummyProcess() + process1.ID = "foo_aaa_1" + process1.Reference = "foo_aaa_1" + process2 := getDummyProcess() + process2.ID = "bar_bbb_2" + process2.Reference = "bar_bbb_2" + process3 := getDummyProcess() + process3.ID = "foo_ccc_3" + process3.Reference = "foo_ccc_3" + process4 := getDummyProcess() + process4.ID = "bar_ddd_4" + process4.Reference = "bar_ddd_4" - rs.AddProcess(process) + rs.AddProcess(process1) + rs.AddProcess(process2) + rs.AddProcess(process3) + rs.AddProcess(process4) - _, err = rs.GetProcess(process.ID) - require.Equal(t, nil, err, "Process not found (%s)", process.ID) + _, err = rs.GetProcess(process1.ID) + require.Equal(t, nil, err) list := rs.GetProcessIDs("", "") - require.Len(t, list, 1, "expected 1 process") - require.Equal(t, process.ID, list[0], "expected same process ID") + require.Len(t, list, 4) + require.ElementsMatch(t, []string{"foo_aaa_1", "bar_bbb_2", "foo_ccc_3", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("foo_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("bar_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("*_bbb_*", "") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) + + list = rs.GetProcessIDs("", "foo_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("", "bar_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("", "*_bbb_*") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) } func TestStartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -238,7 +314,7 @@ func TestStartProcess(t *testing.T) { } func TestStopProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -263,7 +339,7 @@ func TestStopProcess(t *testing.T) { } func TestRestartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -288,7 +364,7 @@ func TestRestartProcess(t *testing.T) { } func TestReloadProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -318,8 +394,21 @@ func TestReloadProcess(t *testing.T) { rs.StopProcess(process.ID) } -func TestProcessData(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) +func TestProbeProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + rs.AddProcess(process) + + probe := rs.ProbeWithTimeout(process.ID, 5*time.Second) + + require.Equal(t, 3, len(probe.Streams)) +} + +func TestProcessMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -340,7 +429,7 @@ func TestProcessData(t *testing.T) { } func TestLog(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -373,7 +462,7 @@ func TestLog(t *testing.T) { } func TestPlayoutNoRange(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -396,7 +485,7 @@ func TestPlayoutRange(t *testing.T) { portrange, err := net.NewPortrange(3000, 3001) require.NoError(t, err) - rs, err := getDummyRestreamer(portrange, nil, nil) + rs, err := getDummyRestreamer(portrange, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -417,7 +506,7 @@ func TestPlayoutRange(t *testing.T) { } func TestAddressReference(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process1 := getDummyProcess() @@ -449,7 +538,7 @@ func TestAddressReference(t *testing.T) { } func TestConfigValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -496,7 +585,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil) require.NoError(t, err) - rsi, err := getDummyRestreamer(nil, valIn, valOut) + rsi, err := getDummyRestreamer(nil, valIn, valOut, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -522,7 +611,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { } func TestOutputAddressValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -561,3 +650,196 @@ func TestOutputAddressValidation(t *testing.T) { require.Equal(t, r.path, path) } } + +func TestMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + data, _ := rs.GetMetadata("foobar") + require.Equal(t, nil, data, "nothing should be stored under the key") + + rs.SetMetadata("foobar", process) + + data, _ = rs.GetMetadata("foobar") + require.NotEqual(t, nil, data, "there should be something stored under the key") + + p := data.(*app.Config) + + require.Equal(t, process.ID, p.ID, "failed to retrieve stored data") +} + +func TestReplacer(t *testing.T) { + replacer := replace.New() + + replacer.RegisterTemplateFunc("diskfs", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:disk", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("memfs", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:mem", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("rtmp", func(config *app.Config, section string) string { + return "rtmp://localhost/app/{name}?token=foobar" + }, nil) + + replacer.RegisterTemplateFunc("srt", func(config *app.Config, section string) string { + template := "srt://localhost:6000?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + template += ",token:abcfoobar&passphrase=secret" + + return template + }, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) + + rsi, err := getDummyRestreamer(nil, nil, nil, replacer) + require.NoError(t, err) + + process := &app.Config{ + ID: "314159265359", + Reference: "refref", + Input: []app.ConfigIO{ + { + ID: "in_{processid}_{reference}", + Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:{inputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$inputid.txt", + }, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_{processid}_{reference}", + Address: "output:{outputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=$processid}_srt:{srt,name=$reference,latency=42}_rtmp:{rtmp,name=$outputid}", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:{outputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_{outputid}_{processid}_{reference}_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "{diskfs}/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "{fs:disk}/foobar_on_disk_aswell.txt", + "{fs:mem}/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + err = rsi.AddProcess(process) + require.NoError(t, err) + + rs := rsi.(*restream) + + process = &app.Config{ + ID: "314159265359", + Reference: "refref", + FFVersion: "^4.0.2", + Input: []app.ConfigIO{ + { + ID: "in_314159265359_refref", + Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:in_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$inputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{}, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_314159265359_refref", + Address: "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:out_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_out_314159265359_refref_314159265359_refref_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "/mnt/diskfs/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "/mnt/diskfs/foobar_on_disk_aswell.txt", + "http://localhost/mnt/memfs/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + require.Equal(t, process, rs.tasks["314159265359"].config) +} diff --git a/restream/store/dummy.go b/restream/store/dummy.go deleted file mode 100644 index ea978e1f..00000000 --- a/restream/store/dummy.go +++ /dev/null @@ -1,37 +0,0 @@ -package store - -import ( - "github.com/datarhei/core/v16/log" -) - -type DummyConfig struct { - Logger log.Logger -} - -type dummyStore struct { - logger log.Logger -} - -func NewDummyStore(config DummyConfig) Store { - s := &dummyStore{ - logger: config.Logger, - } - - if s.logger == nil { - s.logger = log.New("") - } - - return s -} - -func (sb *dummyStore) Store(data StoreData) error { - sb.logger.Debug().Log("Data stored") - - return nil -} - -func (sb *dummyStore) Load() (StoreData, error) { - sb.logger.Debug().Log("Data loaded") - - return NewStoreData(), nil -} diff --git a/restream/store/json.go b/restream/store/json.go index b8edc834..36e5720e 100644 --- a/restream/store/json.go +++ b/restream/store/json.go @@ -4,24 +4,23 @@ import ( gojson "encoding/json" "fmt" "os" - "path" "sync" "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" ) type JSONConfig struct { - Filepath string - FFVersion string - Logger log.Logger + Filesystem fs.Filesystem + Filepath string // Full path to the database file + Logger log.Logger } type jsonStore struct { - filepath string - ffversion string - logger log.Logger + fs fs.Filesystem + filepath string + logger log.Logger // Mutex to serialize access to the backend lock sync.RWMutex @@ -29,18 +28,26 @@ type jsonStore struct { var version uint64 = 4 -func NewJSONStore(config JSONConfig) Store { +func NewJSON(config JSONConfig) (Store, error) { s := &jsonStore{ - filepath: config.Filepath, - ffversion: config.FFVersion, - logger: config.Logger, + fs: config.Filesystem, + filepath: config.Filepath, + logger: config.Logger, + } + + if len(s.filepath) == 0 { + s.filepath = "/db.json" + } + + if s.fs == nil { + return nil, fmt.Errorf("no valid filesystem provided") } if s.logger == nil { s.logger = log.New("") } - return s + return s, nil } func (s *jsonStore) Load() (StoreData, error) { @@ -79,28 +86,11 @@ func (s *jsonStore) store(filepath string, data StoreData) error { return err } - dir := path.Dir(filepath) - name := path.Base(filepath) - - tmpfile, err := os.CreateTemp(dir, name) + _, _, err = s.fs.WriteFileSafe(filepath, jsondata) if err != nil { return err } - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return err - } - - if err := tmpfile.Close(); err != nil { - return err - } - - if err := file.Rename(tmpfile.Name(), filepath); err != nil { - return err - } - s.logger.WithField("file", filepath).Debug().Log("Stored data") return nil @@ -113,7 +103,7 @@ type storeVersion struct { func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) { r := NewStoreData() - _, err := os.Stat(filepath) + _, err := s.fs.Stat(filepath) if err != nil { if os.IsNotExist(err) { return r, nil @@ -122,7 +112,7 @@ func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) { return r, err } - jsondata, err := os.ReadFile(filepath) + jsondata, err := s.fs.ReadFile(filepath) if err != nil { return r, err } diff --git a/restream/store/json_test.go b/restream/store/json_test.go index 615f0bef..8b2c4698 100644 --- a/restream/store/json_test.go +++ b/restream/store/json_test.go @@ -1,40 +1,61 @@ package store import ( - "os" "testing" + "github.com/datarhei/core/v16/io/fs" "github.com/stretchr/testify/require" ) -func TestNew(t *testing.T) { - store := NewJSONStore(JSONConfig{}) +func getFS(t *testing.T) fs.Filesystem { + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: ".", + }) + require.NoError(t, err) + + info, err := fs.Stat("./fixtures/v4_empty.json") + require.NoError(t, err) + require.Equal(t, "/fixtures/v4_empty.json", info.Name()) + return fs +} + +func TestNew(t *testing.T) { + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + }) + require.NoError(t, err) require.NotEmpty(t, store) } func TestLoad(t *testing.T) { - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v4_empty.json", + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_empty.json", }) + require.NoError(t, err) - _, err := store.Load() - require.Equal(t, nil, err) + _, err = store.Load() + require.NoError(t, err) } func TestLoadFailed(t *testing.T) { - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v4_invalid.json", + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_invalid.json", }) + require.NoError(t, err) - _, err := store.Load() - require.NotEqual(t, nil, err) + _, err = store.Load() + require.Error(t, err) } func TestIsEmpty(t *testing.T) { - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v4_empty.json", + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_empty.json", }) + require.NoError(t, err) data, err := store.Load() require.NoError(t, err) @@ -42,9 +63,11 @@ func TestIsEmpty(t *testing.T) { } func TestNotExists(t *testing.T) { - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v4_notexist.json", + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_notexist.json", }) + require.NoError(t, err) data, err := store.Load() require.NoError(t, err) @@ -52,11 +75,14 @@ func TestNotExists(t *testing.T) { } func TestStore(t *testing.T) { - os.Remove("./fixtures/v4_store.json") + fs := getFS(t) + fs.Remove("./fixtures/v4_store.json") - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v4_store.json", + store, err := NewJSON(JSONConfig{ + Filesystem: fs, + Filepath: "./fixtures/v4_store.json", }) + require.NoError(t, err) data, err := store.Load() require.NoError(t, err) @@ -70,13 +96,15 @@ func TestStore(t *testing.T) { require.NoError(t, err) require.Equal(t, data, data2) - os.Remove("./fixtures/v4_store.json") + fs.Remove("./fixtures/v4_store.json") } func TestInvalidVersion(t *testing.T) { - store := NewJSONStore(JSONConfig{ - Filepath: "./fixtures/v3_empty.json", + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v3_empty.json", }) + require.NoError(t, err) data, err := store.Load() require.Error(t, err) diff --git a/rtmp/rtmp.go b/rtmp/rtmp.go index fafb466b..4990b49d 100644 --- a/rtmp/rtmp.go +++ b/rtmp/rtmp.go @@ -6,6 +6,7 @@ import ( "crypto/tls" "fmt" "net" + "net/url" "path/filepath" "strings" "sync" @@ -326,18 +327,53 @@ func (s *server) log(who, action, path, message string, client net.Addr) { }).Log(message) } +// getToken returns the path and the token found in the URL. If the token +// was part of the path, the token is removed from the path. The token in +// the query string takes precedence. The token in the path is assumed to +// be the last path element. +func getToken(u *url.URL) (string, string) { + q := u.Query() + token := q.Get("token") + + if len(token) != 0 { + // The token was in the query. Return the unmomdified path and the token + return u.Path, token + } + + pathElements := strings.Split(u.EscapedPath(), "/") + nPathElements := len(pathElements) + + if nPathElements == 0 { + return u.Path, "" + } + + // Return the path without the token + return strings.Join(pathElements[:nPathElements-1], "/"), pathElements[nPathElements-1] +} + // handlePlay is called when a RTMP client wants to play a stream func (s *server) handlePlay(conn *rtmp.Conn) { client := conn.NetConn().RemoteAddr() - // Check the token - q := conn.URL.Query() - token := q.Get("token") + defer conn.Close() - if len(s.token) != 0 && s.token != token { - s.log("PLAY", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", client) - conn.Close() - return + playPath := conn.URL.Path + + // Check the token in the URL if one is required + if len(s.token) != 0 { + path, token := getToken(conn.URL) + + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) + return + } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } /* @@ -361,14 +397,14 @@ func (s *server) handlePlay(conn *rtmp.Conn) { // Look for the stream s.lock.RLock() - ch := s.channels[conn.URL.Path] + ch := s.channels[playPath] s.lock.RUnlock() if ch != nil { // Set the metadata for the client conn.SetMetaData(ch.metadata) - s.log("PLAY", "START", conn.URL.Path, "", client) + s.log("PLAY", "START", playPath, "", client) // Get a cursor and apply filters cursor := ch.queue.Oldest() @@ -381,7 +417,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) { } // Adjust the timestamp such that the stream starts from 0 - filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: true}) + filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: false}) demuxer := &pktque.FilterDemuxer{ Filter: filters, @@ -395,32 +431,39 @@ func (s *server) handlePlay(conn *rtmp.Conn) { ch.RemoveSubscriber(id) - s.log("PLAY", "STOP", conn.URL.Path, "", client) + s.log("PLAY", "STOP", playPath, "", client) } else { - s.log("PLAY", "NOTFOUND", conn.URL.Path, "", client) + s.log("PLAY", "NOTFOUND", playPath, "", client) } - - conn.Close() } // handlePublish is called when a RTMP client wants to publish a stream func (s *server) handlePublish(conn *rtmp.Conn) { client := conn.NetConn().RemoteAddr() - // Check the token - q := conn.URL.Query() - token := q.Get("token") + defer conn.Close() - if len(s.token) != 0 && s.token != token { - s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", client) - conn.Close() - return + playPath := conn.URL.Path + + if len(s.token) != 0 { + path, token := getToken(conn.URL) + + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) + return + } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } // Check the app patch - if !strings.HasPrefix(conn.URL.Path, s.app) { + if !strings.HasPrefix(playPath, s.app) { s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid app", client) - conn.Close() return } @@ -428,8 +471,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { streams, _ := conn.Streams() if len(streams) == 0 { - s.log("PUBLISH", "INVALID", conn.URL.Path, "no streams available", client) - conn.Close() + s.log("PUBLISH", "INVALID", playPath, "no streams available", client) return } @@ -437,7 +479,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { ch := s.channels[conn.URL.Path] if ch == nil { - reference := strings.TrimPrefix(strings.TrimSuffix(conn.URL.Path, filepath.Ext(conn.URL.Path)), s.app+"/") + reference := strings.TrimPrefix(strings.TrimSuffix(playPath, filepath.Ext(playPath)), s.app+"/") // Create a new channel ch = newChannel(conn, reference, s.collector) @@ -456,7 +498,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { } } - s.channels[conn.URL.Path] = ch + s.channels[playPath] = ch } else { ch = nil } @@ -464,27 +506,24 @@ func (s *server) handlePublish(conn *rtmp.Conn) { s.lock.Unlock() if ch == nil { - s.log("PUBLISH", "CONFLICT", conn.URL.Path, "already publishing", client) - conn.Close() + s.log("PUBLISH", "CONFLICT", playPath, "already publishing", client) return } - s.log("PUBLISH", "START", conn.URL.Path, "", client) + s.log("PUBLISH", "START", playPath, "", client) for _, stream := range streams { - s.log("PUBLISH", "STREAM", conn.URL.Path, stream.Type().String(), client) + s.log("PUBLISH", "STREAM", playPath, stream.Type().String(), client) } // Ingest the data avutil.CopyPackets(ch.queue, conn) s.lock.Lock() - delete(s.channels, conn.URL.Path) + delete(s.channels, playPath) s.lock.Unlock() ch.Close() - s.log("PUBLISH", "STOP", conn.URL.Path, "", client) - - conn.Close() + s.log("PUBLISH", "STOP", playPath, "", client) } diff --git a/rtmp/rtmp_test.go b/rtmp/rtmp_test.go new file mode 100644 index 00000000..20bb5274 --- /dev/null +++ b/rtmp/rtmp_test.go @@ -0,0 +1,26 @@ +package rtmp + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestToken(t *testing.T) { + data := [][]string{ + {"/foo/bar", "/foo", "bar"}, + {"/foo/bar?token=abc", "/foo/bar", "abc"}, + {"/foo/bar/abc", "/foo/bar", "abc"}, + } + + for _, d := range data { + u, err := url.Parse(d[0]) + require.NoError(t, err) + + path, token := getToken(u) + + require.Equal(t, d[1], path, "url=%s", u.String()) + require.Equal(t, d[2], token, "url=%s", u.String()) + } +} diff --git a/service/api/api.go b/service/api/api.go index 30060289..3afd02c5 100644 --- a/service/api/api.go +++ b/service/api/api.go @@ -9,6 +9,8 @@ import ( "net/http" "strings" "time" + + "github.com/datarhei/core/v16/log" ) type API interface { @@ -19,6 +21,7 @@ type Config struct { URL string Token string Client *http.Client + Logger log.Logger } type api struct { @@ -29,6 +32,8 @@ type api struct { accessTokenType string client *http.Client + + logger log.Logger } func New(config Config) (API, error) { @@ -36,6 +41,11 @@ func New(config Config) (API, error) { url: config.URL, token: config.Token, client: config.Client, + logger: config.Logger, + } + + if a.logger == nil { + a.logger = log.New("") } if !strings.HasSuffix(a.url, "/") { @@ -95,7 +105,7 @@ func (c *copyReader) Read(p []byte) (int, error) { if err == io.EOF { c.reader = c.copy - c.copy = new(bytes.Buffer) + c.copy = &bytes.Buffer{} } return i, err diff --git a/service/service.go b/service/service.go index 861927d5..c90c00b6 100644 --- a/service/service.go +++ b/service/service.go @@ -55,7 +55,7 @@ func New(config Config) (Service, error) { } if s.logger == nil { - s.logger = log.New("Service") + s.logger = log.New("") } s.logger = s.logger.WithField("url", config.URL) @@ -214,7 +214,10 @@ func (s *service) collect() (time.Duration, error) { return 15 * time.Minute, fmt.Errorf("failed to send monitor data to service: %w", err) } - s.logger.Debug().WithField("next", r.Next).Log("Sent monitor data") + s.logger.Debug().WithFields(log.Fields{ + "next": r.Next, + "data": data, + }).Log("Sent monitor data") if r.Next == 0 { r.Next = 5 * 60 @@ -230,6 +233,8 @@ func (s *service) Start() { go s.tick(ctx, time.Second) s.stopOnce = sync.Once{} + + s.logger.Info().Log("Connected") }) } @@ -237,6 +242,8 @@ func (s *service) Stop() { s.stopOnce.Do(func() { s.stopTicker() s.startOnce = sync.Once{} + + s.logger.Info().Log("Disconnected") }) } diff --git a/session/collector.go b/session/collector.go index a7084bc2..58dedac9 100644 --- a/session/collector.go +++ b/session/collector.go @@ -3,13 +3,11 @@ package session import ( "context" "encoding/json" - "os" - "path/filepath" "sort" "sync" "time" - "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/net" @@ -244,6 +242,7 @@ type collector struct { persist struct { enable bool + fs fs.Filesystem path string interval time.Duration done context.CancelFunc @@ -275,7 +274,7 @@ const ( // NewCollector returns a new collector according to the provided configuration. If such a // collector can't be created, a NullCollector is returned. func NewCollector(config CollectorConfig) Collector { - collector, err := newCollector("", "", nil, config) + collector, err := newCollector("", nil, nil, config) if err != nil { return NewNullCollector() } @@ -285,7 +284,7 @@ func NewCollector(config CollectorConfig) Collector { return collector } -func newCollector(id, persistPath string, logger log.Logger, config CollectorConfig) (*collector, error) { +func newCollector(id string, persistFS fs.Filesystem, logger log.Logger, config CollectorConfig) (*collector, error) { c := &collector{ maxRxBitrate: float64(config.MaxRxBitrate), maxTxBitrate: float64(config.MaxTxBitrate), @@ -379,11 +378,12 @@ func newCollector(id, persistPath string, logger log.Logger, config CollectorCon c.history.Sessions = make(map[string]totals) - c.persist.enable = len(persistPath) != 0 - c.persist.path = persistPath + c.persist.enable = persistFS != nil + c.persist.fs = persistFS + c.persist.path = "/" + id + ".json" c.persist.interval = config.PersistInterval - c.loadHistory(c.persist.path, &c.history) + c.loadHistory(c.persist.fs, c.persist.path, &c.history) c.stopOnce.Do(func() {}) @@ -433,7 +433,7 @@ func (c *collector) Persist() { c.lock.history.RLock() defer c.lock.history.RUnlock() - c.saveHistory(c.persist.path, &c.history) + c.saveHistory(c.persist.fs, c.persist.path, &c.history) } func (c *collector) persister(ctx context.Context, interval time.Duration) { @@ -450,17 +450,20 @@ func (c *collector) persister(ctx context.Context, interval time.Duration) { } } -func (c *collector) loadHistory(path string, data *history) { - c.logger.WithComponent("SessionStore").WithField("path", path).Debug().Log("Loading history") - - if len(path) == 0 { +func (c *collector) loadHistory(fs fs.Filesystem, path string, data *history) { + if fs == nil { return } + c.logger.WithComponent("SessionStore").WithFields(log.Fields{ + "base": fs.Metadata("base"), + "path": path, + }).Debug().Log("Loading history") + c.lock.persist.Lock() defer c.lock.persist.Unlock() - jsondata, err := os.ReadFile(path) + jsondata, err := fs.ReadFile(path) if err != nil { return } @@ -470,12 +473,15 @@ func (c *collector) loadHistory(path string, data *history) { } } -func (c *collector) saveHistory(path string, data *history) { - if len(path) == 0 { +func (c *collector) saveHistory(fs fs.Filesystem, path string, data *history) { + if fs == nil { return } - c.logger.WithComponent("SessionStore").WithField("path", path).Debug().Log("Storing history") + c.logger.WithComponent("SessionStore").WithFields(log.Fields{ + "base": fs.Metadata("base"), + "path": path, + }).Debug().Log("Storing history") c.lock.persist.Lock() defer c.lock.persist.Unlock() @@ -485,27 +491,10 @@ func (c *collector) saveHistory(path string, data *history) { return } - dir := filepath.Dir(path) - filename := filepath.Base(path) - - tmpfile, err := os.CreateTemp(dir, filename) + _, _, err = fs.WriteFileSafe(path, jsondata) if err != nil { return } - - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return - } - - if err := tmpfile.Close(); err != nil { - return - } - - if err := file.Rename(tmpfile.Name(), path); err != nil { - return - } } func (c *collector) IsCollectableIP(ip string) bool { diff --git a/session/collector_test.go b/session/collector_test.go index 2e5b44f8..4e9a0d52 100644 --- a/session/collector_test.go +++ b/session/collector_test.go @@ -8,7 +8,7 @@ import ( ) func TestRegisterSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Hour, SessionTimeout: time.Hour, }) @@ -31,7 +31,7 @@ func TestRegisterSession(t *testing.T) { } func TestInactiveSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -52,7 +52,7 @@ func TestInactiveSession(t *testing.T) { } func TestActivateSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Second, }) @@ -73,7 +73,7 @@ func TestActivateSession(t *testing.T) { } func TestIngress(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -92,7 +92,7 @@ func TestIngress(t *testing.T) { } func TestEgress(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -111,7 +111,7 @@ func TestEgress(t *testing.T) { } func TestNbSessions(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Hour, SessionTimeout: time.Hour, }) diff --git a/session/registry.go b/session/registry.go index adf40530..405f010a 100644 --- a/session/registry.go +++ b/session/registry.go @@ -2,20 +2,18 @@ package session import ( "fmt" - "os" - "path/filepath" "regexp" "sync" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" ) // Config is the configuration for creating a new registry type Config struct { - // PersistDir is a path to the directory where the session - // history will be persisted. If it is an empty value, the + // PersistFS is a filesystem in whose root the session history will be persisted. If it is nil, the // history will not be persisted. - PersistDir string + PersistFS fs.Filesystem // Logger is an instance of a logger. If it is nil, no logs // will be written. @@ -52,9 +50,9 @@ type Registry interface { } type registry struct { - collector map[string]*collector - persistDir string - logger log.Logger + collector map[string]*collector + persistFS fs.Filesystem + logger log.Logger lock sync.Mutex } @@ -63,21 +61,15 @@ type registry struct { // is non-nil if the PersistDir from the config can't be created. func New(conf Config) (Registry, error) { r := ®istry{ - collector: make(map[string]*collector), - persistDir: conf.PersistDir, - logger: conf.Logger, + collector: make(map[string]*collector), + persistFS: conf.PersistFS, + logger: conf.Logger, } if r.logger == nil { r.logger = log.New("Session") } - if len(r.persistDir) != 0 { - if err := os.MkdirAll(r.persistDir, 0700); err != nil { - return nil, err - } - } - return r, nil } @@ -99,12 +91,7 @@ func (r *registry) Register(id string, conf CollectorConfig) (Collector, error) return nil, fmt.Errorf("a collector with the ID '%s' already exists", id) } - persistPath := "" - if len(r.persistDir) != 0 { - persistPath = filepath.Join(r.persistDir, id+".json") - } - - m, err := newCollector(id, persistPath, r.logger, conf) + m, err := newCollector(id, r.persistFS, r.logger, conf) if err != nil { return nil, err } diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60..8bf0e5b7 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000..94b9c443 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5..a9e0d45c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf..3e8b1325 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000..7e3145a2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807..9216e0a4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a8216..26df13bb 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a..e86f1b5f 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2..1c1638fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 00000000..ac12e485 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go_import_path: github.com/dustin/go-humanize +go: + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - diff -u <(echo -n) <(gofmt -d -s .) + - go vet . + - go install -v -race ./... + - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 00000000..8d9a94a9 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 00000000..7d0b16b3 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 00000000..f49dc337 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 00000000..3b015fd5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,189 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 00000000..0b498f48 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 00000000..520ae3e5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 00000000..2bc83a03 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,41 @@ +//go:build go1.6 +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 00000000..bce923f3 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,49 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 00000000..a2c2da31 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 00000000..6470d0d4 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < (0.0 - math.MaxFloat64) { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 00000000..43d88a86 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 00000000..8b850198 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,127 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 00000000..dd3fbf5e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index f5d551ca..30f2f2a6 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -54,9 +54,9 @@ import "github.com/golang-jwt/jwt/v4" See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) ## Extensions @@ -96,7 +96,7 @@ A token is simply a JSON object that is signed by its author. this tells you exa * The author of the token was in the possession of the signing secret * The data has not been modified since it was signed -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. ### Choosing a Signing Method @@ -110,10 +110,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation -* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth @@ -131,7 +131,7 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go index 9d95cad2..364cec87 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -265,9 +265,5 @@ func verifyIss(iss string, cmp string, required bool) bool { if iss == "" { return !required } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index 2f61a69d..c0a6f692 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -42,6 +42,13 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index 3cb0f3f0..71e909ea 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -99,6 +99,11 @@ func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token return NewParser(options...).Parse(tokenString, keyFunc) } +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) } diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000..955dc0be --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000..15556530 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000..449e67cd --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..c8a9fbb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..313a0f88 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..c589addf --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,85 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000..92d2cc4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000..f6b8aeab --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000..9d1e901a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000..1f12f661 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000..2adcdc3b --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000..3095662b --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000..29b31cf7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000..204fe0e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000..8a3d8b6f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,342 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000..d786a89f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000..58ee89c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000..e91eefb1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000..9303de41 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000..6cf66d04 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000..c2934f91 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000..e2389b56 --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000..39acb320 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ == nil || typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000..13a0b7b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000..8b6bc8b4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000..74a97bfe --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000..98d45c1e --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000..eba434f2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,76 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000..58296713 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000..3e21f375 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000..f88722d1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000..fa71f474 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000..9441d79d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000..92ae912d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1097 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000..152e3ef5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000..23d8a3ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000..826aa594 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000..d1059ee4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000..54c2ba0b --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML