From 18be75d013cd15ab911866025cfd3c0069a07ff2 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 22 Nov 2022 21:25:54 +0100 Subject: [PATCH 01/39] Use new streamid format for {srt} placeholder --- app/api/api.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/api/api.go b/app/api/api.go index 77145852..d6dd727d 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -468,9 +468,9 @@ func (a *api) start() error { host = "localhost" } - template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid=#!:m={mode},r={name}" + template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid={name},mode:{mode}" if len(cfg.SRT.Token) != 0 { - template += ",token=" + cfg.SRT.Token + template += ",token:" + cfg.SRT.Token } if len(cfg.SRT.Passphrase) != 0 { template += "&passphrase=" + cfg.SRT.Passphrase From d0262cc88769e8291a49a623e080a528f796827b Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 27 Dec 2022 09:47:59 +0100 Subject: [PATCH 02/39] Add logging for service --- service/api/api.go | 12 +++++++++++- service/service.go | 11 +++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/service/api/api.go b/service/api/api.go index 30060289..3afd02c5 100644 --- a/service/api/api.go +++ b/service/api/api.go @@ -9,6 +9,8 @@ import ( "net/http" "strings" "time" + + "github.com/datarhei/core/v16/log" ) type API interface { @@ -19,6 +21,7 @@ type Config struct { URL string Token string Client *http.Client + Logger log.Logger } type api struct { @@ -29,6 +32,8 @@ type api struct { accessTokenType string client *http.Client + + logger log.Logger } func New(config Config) (API, error) { @@ -36,6 +41,11 @@ func New(config Config) (API, error) { url: config.URL, token: config.Token, client: config.Client, + logger: config.Logger, + } + + if a.logger == nil { + a.logger = log.New("") } if !strings.HasSuffix(a.url, "/") { @@ -95,7 +105,7 @@ func (c *copyReader) Read(p []byte) (int, error) { if err == io.EOF { c.reader = c.copy - c.copy = new(bytes.Buffer) + c.copy = &bytes.Buffer{} } return i, err diff --git a/service/service.go b/service/service.go index 861927d5..c90c00b6 100644 --- a/service/service.go +++ b/service/service.go @@ -55,7 +55,7 @@ func New(config Config) (Service, error) { } if s.logger == nil { - s.logger = log.New("Service") + s.logger = log.New("") } s.logger = s.logger.WithField("url", config.URL) @@ -214,7 +214,10 @@ func (s *service) collect() (time.Duration, error) { return 15 * time.Minute, fmt.Errorf("failed to send monitor data to service: %w", err) } - s.logger.Debug().WithField("next", r.Next).Log("Sent monitor data") + s.logger.Debug().WithFields(log.Fields{ + "next": r.Next, + "data": data, + }).Log("Sent monitor data") if r.Next == 0 { r.Next = 5 * 60 @@ -230,6 +233,8 @@ func (s *service) Start() { go s.tick(ctx, time.Second) s.stopOnce = sync.Once{} + + s.logger.Info().Log("Connected") }) } @@ -237,6 +242,8 @@ func (s *service) Stop() { s.stopOnce.Do(func() { s.stopTicker() s.startOnce = sync.Once{} + + s.logger.Info().Log("Disconnected") }) } From 1a9ef8b7c98136c759b8463f22cac1c35202fa0b Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 27 Dec 2022 10:26:49 +0100 Subject: [PATCH 03/39] Add Let's Debug auto TLS error diagnostic --- app/api/api.go | 33 +- go.mod | 7 +- go.sum | 32 + .../github.com/eggsampler/acme/v3/.gitignore | 3 + .../github.com/eggsampler/acme/v3/.travis.yml | 26 + vendor/github.com/eggsampler/acme/v3/LICENSE | 21 + vendor/github.com/eggsampler/acme/v3/Makefile | 66 + .../github.com/eggsampler/acme/v3/README.md | 43 + .../github.com/eggsampler/acme/v3/THIRD-PARTY | 35 + .../github.com/eggsampler/acme/v3/account.go | 128 + vendor/github.com/eggsampler/acme/v3/acme.go | 294 + .../eggsampler/acme/v3/authorization.go | 43 + .../github.com/eggsampler/acme/v3/autocert.go | 430 + .../eggsampler/acme/v3/certificate.go | 106 + .../eggsampler/acme/v3/challenge.go | 102 + vendor/github.com/eggsampler/acme/v3/jws.go | 187 + vendor/github.com/eggsampler/acme/v3/nonce.go | 45 + .../github.com/eggsampler/acme/v3/options.go | 70 + vendor/github.com/eggsampler/acme/v3/order.go | 136 + .../github.com/eggsampler/acme/v3/problem.go | 65 + vendor/github.com/eggsampler/acme/v3/types.go | 163 + .../github.com/letsdebug/letsdebug/.gitignore | 6 + .../letsdebug/letsdebug/.travis.yml | 23 + .../github.com/letsdebug/letsdebug/Makefile | 32 + .../github.com/letsdebug/letsdebug/README.md | 170 + .../github.com/letsdebug/letsdebug/checker.go | 107 + .../github.com/letsdebug/letsdebug/context.go | 81 + .../github.com/letsdebug/letsdebug/dns01.go | 156 + .../letsdebug/letsdebug/dns_util.go | 127 + .../github.com/letsdebug/letsdebug/generic.go | 861 ++ .../github.com/letsdebug/letsdebug/http01.go | 268 + .../letsdebug/letsdebug/http_util.go | 310 + .../letsdebug/letsdebug/letsdebug.go | 85 + .../github.com/letsdebug/letsdebug/problem.go | 75 + vendor/github.com/lib/pq/.gitignore | 4 + vendor/github.com/lib/pq/.travis.sh | 73 + vendor/github.com/lib/pq/.travis.yml | 44 + vendor/github.com/lib/pq/LICENSE.md | 8 + vendor/github.com/lib/pq/README.md | 30 + vendor/github.com/lib/pq/TESTS.md | 33 + vendor/github.com/lib/pq/array.go | 756 ++ vendor/github.com/lib/pq/buf.go | 91 + vendor/github.com/lib/pq/conn.go | 1996 ++++ vendor/github.com/lib/pq/conn_go18.go | 149 + vendor/github.com/lib/pq/connector.go | 115 + vendor/github.com/lib/pq/copy.go | 307 + vendor/github.com/lib/pq/doc.go | 268 + vendor/github.com/lib/pq/encode.go | 622 ++ vendor/github.com/lib/pq/error.go | 515 + vendor/github.com/lib/pq/krb.go | 27 + vendor/github.com/lib/pq/notice.go | 71 + vendor/github.com/lib/pq/notify.go | 858 ++ vendor/github.com/lib/pq/oid/doc.go | 6 + vendor/github.com/lib/pq/oid/types.go | 343 + vendor/github.com/lib/pq/rows.go | 93 + vendor/github.com/lib/pq/scram/scram.go | 264 + vendor/github.com/lib/pq/ssl.go | 175 + vendor/github.com/lib/pq/ssl_permissions.go | 20 + vendor/github.com/lib/pq/ssl_windows.go | 9 + vendor/github.com/lib/pq/url.go | 76 + vendor/github.com/lib/pq/user_posix.go | 24 + vendor/github.com/lib/pq/user_windows.go | 27 + vendor/github.com/lib/pq/uuid.go | 23 + vendor/github.com/miekg/unbound/.travis.yml | 8 + vendor/github.com/miekg/unbound/README.md | 14 + vendor/github.com/miekg/unbound/dns.go | 87 + vendor/github.com/miekg/unbound/lookup.go | 164 + vendor/github.com/miekg/unbound/unbound.go | 386 + .../weppos/publicsuffix-go/LICENSE.txt | 21 + .../net/publicsuffix/publicsuffix.go | 39 + .../publicsuffix/publicsuffix.go | 544 + .../publicsuffix-go/publicsuffix/rules.go | 8847 +++++++++++++++++ vendor/modules.txt | 18 + 73 files changed, 21482 insertions(+), 9 deletions(-) create mode 100644 vendor/github.com/eggsampler/acme/v3/.gitignore create mode 100644 vendor/github.com/eggsampler/acme/v3/.travis.yml create mode 100644 vendor/github.com/eggsampler/acme/v3/LICENSE create mode 100644 vendor/github.com/eggsampler/acme/v3/Makefile create mode 100644 vendor/github.com/eggsampler/acme/v3/README.md create mode 100644 vendor/github.com/eggsampler/acme/v3/THIRD-PARTY create mode 100644 vendor/github.com/eggsampler/acme/v3/account.go create mode 100644 vendor/github.com/eggsampler/acme/v3/acme.go create mode 100644 vendor/github.com/eggsampler/acme/v3/authorization.go create mode 100644 vendor/github.com/eggsampler/acme/v3/autocert.go create mode 100644 vendor/github.com/eggsampler/acme/v3/certificate.go create mode 100644 vendor/github.com/eggsampler/acme/v3/challenge.go create mode 100644 vendor/github.com/eggsampler/acme/v3/jws.go create mode 100644 vendor/github.com/eggsampler/acme/v3/nonce.go create mode 100644 vendor/github.com/eggsampler/acme/v3/options.go create mode 100644 vendor/github.com/eggsampler/acme/v3/order.go create mode 100644 vendor/github.com/eggsampler/acme/v3/problem.go create mode 100644 vendor/github.com/eggsampler/acme/v3/types.go create mode 100644 vendor/github.com/letsdebug/letsdebug/.gitignore create mode 100644 vendor/github.com/letsdebug/letsdebug/.travis.yml create mode 100644 vendor/github.com/letsdebug/letsdebug/Makefile create mode 100644 vendor/github.com/letsdebug/letsdebug/README.md create mode 100644 vendor/github.com/letsdebug/letsdebug/checker.go create mode 100644 vendor/github.com/letsdebug/letsdebug/context.go create mode 100644 vendor/github.com/letsdebug/letsdebug/dns01.go create mode 100644 vendor/github.com/letsdebug/letsdebug/dns_util.go create mode 100644 vendor/github.com/letsdebug/letsdebug/generic.go create mode 100644 vendor/github.com/letsdebug/letsdebug/http01.go create mode 100644 vendor/github.com/letsdebug/letsdebug/http_util.go create mode 100644 vendor/github.com/letsdebug/letsdebug/letsdebug.go create mode 100644 vendor/github.com/letsdebug/letsdebug/problem.go create mode 100644 vendor/github.com/lib/pq/.gitignore create mode 100644 vendor/github.com/lib/pq/.travis.sh create mode 100644 vendor/github.com/lib/pq/.travis.yml create mode 100644 vendor/github.com/lib/pq/LICENSE.md create mode 100644 vendor/github.com/lib/pq/README.md create mode 100644 vendor/github.com/lib/pq/TESTS.md create mode 100644 vendor/github.com/lib/pq/array.go create mode 100644 vendor/github.com/lib/pq/buf.go create mode 100644 vendor/github.com/lib/pq/conn.go create mode 100644 vendor/github.com/lib/pq/conn_go18.go create mode 100644 vendor/github.com/lib/pq/connector.go create mode 100644 vendor/github.com/lib/pq/copy.go create mode 100644 vendor/github.com/lib/pq/doc.go create mode 100644 vendor/github.com/lib/pq/encode.go create mode 100644 vendor/github.com/lib/pq/error.go create mode 100644 vendor/github.com/lib/pq/krb.go create mode 100644 vendor/github.com/lib/pq/notice.go create mode 100644 vendor/github.com/lib/pq/notify.go create mode 100644 vendor/github.com/lib/pq/oid/doc.go create mode 100644 vendor/github.com/lib/pq/oid/types.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/lib/pq/scram/scram.go create mode 100644 vendor/github.com/lib/pq/ssl.go create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go create mode 100644 vendor/github.com/lib/pq/ssl_windows.go create mode 100644 vendor/github.com/lib/pq/url.go create mode 100644 vendor/github.com/lib/pq/user_posix.go create mode 100644 vendor/github.com/lib/pq/user_windows.go create mode 100644 vendor/github.com/lib/pq/uuid.go create mode 100644 vendor/github.com/miekg/unbound/.travis.yml create mode 100644 vendor/github.com/miekg/unbound/README.md create mode 100644 vendor/github.com/miekg/unbound/dns.go create mode 100644 vendor/github.com/miekg/unbound/lookup.go create mode 100644 vendor/github.com/miekg/unbound/unbound.go create mode 100644 vendor/github.com/weppos/publicsuffix-go/LICENSE.txt create mode 100644 vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go create mode 100644 vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go create mode 100644 vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go diff --git a/app/api/api.go b/app/api/api.go index d6dd727d..fb36559b 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -39,6 +39,8 @@ import ( "github.com/datarhei/core/v16/update" "github.com/caddyserver/certmagic" + "github.com/letsdebug/letsdebug" + "go.uber.org/zap" ) // The API interface is the implementation for the restreamer API. @@ -655,26 +657,28 @@ func (a *api) start() error { if cfg.TLS.Enable { if cfg.TLS.Auto { if len(cfg.Host.Name) == 0 { - return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME") + return fmt.Errorf("at least one host must be provided in host.name or CORE_HOST_NAME") } + certmagic.Default.Storage = &certmagic.FileStorage{ + Path: cfg.DB.Dir + "/cert", + } + certmagic.Default.DefaultServerName = cfg.Host.Name[0] + certmagic.Default.Logger = zap.NewNop() + certmagic.DefaultACME.Agreed = true certmagic.DefaultACME.Email = cfg.TLS.Email certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA certmagic.DefaultACME.DisableHTTPChallenge = false certmagic.DefaultACME.DisableTLSALPNChallenge = true - certmagic.DefaultACME.Logger = nil - - certmagic.Default.Storage = &certmagic.FileStorage{ - Path: cfg.DB.Dir + "/cert", - } - certmagic.Default.DefaultServerName = cfg.Host.Name[0] - certmagic.Default.Logger = nil + certmagic.DefaultACME.Logger = zap.NewNop() magic := certmagic.NewDefault() acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME) + acme.Logger = zap.NewNop() magic.Issuers = []certmagic.Issuer{acme} + magic.Logger = zap.NewNop() autocertManager = magic @@ -713,6 +717,19 @@ func (a *api) start() error { if err != nil { logger.Error().WithField("error", err).Log("Failed to acquire certificate") certerror = true + + problems, err := letsdebug.Check(host, letsdebug.HTTP01) + if err != nil { + logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition") + } + + for _, p := range problems { + logger.Error().WithFields(log.Fields{ + "name": p.Name, + "detail": p.Detail, + }).Log(p.Explanation) + } + break } diff --git a/go.mod b/go.mod index ae0e3c5f..c0984c44 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.4.0 github.com/labstack/echo/v4 v4.9.1 + github.com/letsdebug/letsdebug v1.6.1 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.16 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 @@ -26,6 +27,7 @@ require ( github.com/swaggo/swag v1.8.7 github.com/vektah/gqlparser/v2 v2.5.1 github.com/xeipuuv/gojsonschema v1.2.0 + go.uber.org/zap v1.23.0 golang.org/x/mod v0.6.0 ) @@ -37,6 +39,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/eggsampler/acme/v3 v3.1.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -53,6 +56,7 @@ require ( github.com/klauspost/cpuid/v2 v2.1.2 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect + github.com/lib/pq v1.8.0 // indirect github.com/libdns/libdns v0.2.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -60,6 +64,7 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mholt/acmez v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect @@ -73,13 +78,13 @@ require ( github.com/urfave/cli/v2 v2.8.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/weppos/publicsuffix-go v0.13.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.1.0 // indirect golang.org/x/sys v0.1.0 // indirect diff --git a/go.sum b/go.sum index 8712f523..a845dca5 100644 --- a/go.sum +++ b/go.sum @@ -40,6 +40,7 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= @@ -89,11 +90,19 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eggsampler/acme/v3 v3.1.1 h1:hSze1Cw4bHtCUdiQE2R0GKfXjAuLirSFPUX1IBz9wKw= +github.com/eggsampler/acme/v3 v3.1.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -128,6 +137,7 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -136,6 +146,7 @@ github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keL github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -206,6 +217,7 @@ github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -218,6 +230,7 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= @@ -243,6 +256,11 @@ github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8 github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/letsdebug/letsdebug v1.6.1 h1:ef4qwhKAXbyoLB2jGWsIWeI245UjyDYvOgenwr/pblA= +github.com/letsdebug/letsdebug v1.6.1/go.mod h1:Bl1mFMHJqyTb3kzsznBpfTpcQLKaChV7xCsWEIdA2Ew= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40= github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= @@ -263,13 +281,17 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80= github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba h1:RHTbLjrNIt6k3R4Aq2Q9KNBwFw8rZcZuoJVASoeB6Es= +github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba/go.mod h1:lGLaihw972wB1AFBO88/Q69nOTzLqG/qR/uSp2YBLgM= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -281,6 +303,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -338,6 +361,7 @@ github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZ github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -378,6 +402,8 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4= github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= +github.com/weppos/publicsuffix-go v0.13.0 h1:0Tu1uzLBd1jPn4k6OnMmOPZH/l/9bj9kUOMMkoRs6Gg= +github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -473,6 +499,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -487,6 +514,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -534,6 +562,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -553,6 +582,7 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -618,6 +648,7 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -732,6 +763,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/vendor/github.com/eggsampler/acme/v3/.gitignore b/vendor/github.com/eggsampler/acme/v3/.gitignore new file mode 100644 index 00000000..236968e6 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/.gitignore @@ -0,0 +1,3 @@ +.idea/ +*.out +coverage* diff --git a/vendor/github.com/eggsampler/acme/v3/.travis.yml b/vendor/github.com/eggsampler/acme/v3/.travis.yml new file mode 100644 index 00000000..900ffd6d --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/.travis.yml @@ -0,0 +1,26 @@ +language: go + +go: + - "1.11" + - "1.x" + +env: + - GO111MODULE=on + +sudo: required + +services: + - docker + +before_install: + - GO111MODULE=off go get github.com/mattn/goveralls + +script: + - unset TRAVIS_GO_VERSION + # test the examples first + - make clean examples + # test pebble integration + - make clean pebble + # test boulder integration + - make clean boulder + - goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/eggsampler/acme/v3/LICENSE b/vendor/github.com/eggsampler/acme/v3/LICENSE new file mode 100644 index 00000000..b9a4b365 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Isaac + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eggsampler/acme/v3/Makefile b/vendor/github.com/eggsampler/acme/v3/Makefile new file mode 100644 index 00000000..aebecc9a --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/Makefile @@ -0,0 +1,66 @@ + +.PHONY: test examples clean test_full pebble pebble_setup pebble_start pebble_wait pebble_stop boulder boulder_setup boulder_start boulder_stop + + +GOPATH ?= $(HOME)/go +BOULDER_PATH ?= $(GOPATH)/src/github.com/letsencrypt/boulder +PEBBLE_PATH ?= $(GOPATH)/src/github.com/letsencrypt/pebble +TEST_PATH ?= github.com/eggsampler/acme/v3 + + +# tests the code against a running ca instance +test: + -go clean -testcache + go test -v -race -coverprofile=coverage.out -covermode=atomic $(TEST_PATH) + +examples: + go build -o /dev/null examples/certbot/certbot.go + go build -o /dev/null examples/autocert/autocert.go + +clean: + rm -f coverage.out + +test_full: clean examples pebble pebble_stop boulder boulder_stop + + +pebble: pebble_setup pebble_start pebble_wait test pebble_stop + +pebble_setup: + mkdir -p $(PEBBLE_PATH) + git clone --depth 1 https://github.com/letsencrypt/pebble.git $(PEBBLE_PATH) \ + || (cd $(PEBBLE_PATH); git checkout -f master && git reset --hard HEAD && git pull -q) + docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down + +# runs an instance of pebble using docker +pebble_start: + docker-compose -f $(PEBBLE_PATH)/docker-compose.yml up -d + +# waits until pebble responds +pebble_wait: + while ! wget --delete-after -q --no-check-certificate "https://localhost:14000/dir" ; do sleep 1 ; done + +# stops the running pebble instance +pebble_stop: + docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down + + +boulder: boulder_setup boulder_start boulder_wait test boulder_stop + +# NB: this edits docker-compose.yml +boulder_setup: + mkdir -p $(BOULDER_PATH) + git clone --depth 1 https://github.com/letsencrypt/boulder.git $(BOULDER_PATH) \ + || (cd $(BOULDER_PATH); git checkout -f master && git reset --hard HEAD && git pull -q) + docker-compose -f $(BOULDER_PATH)/docker-compose.yml down + +# runs an instance of boulder +boulder_start: + docker-compose -f $(BOULDER_PATH)/docker-compose.yml up -d + +# waits until boulder responds +boulder_wait: + while ! wget --delete-after -q --no-check-certificate "http://localhost:4001/directory" ; do sleep 1 ; done + +# stops the running docker instance +boulder_stop: + docker-compose -f $(BOULDER_PATH)/docker-compose.yml down diff --git a/vendor/github.com/eggsampler/acme/v3/README.md b/vendor/github.com/eggsampler/acme/v3/README.md new file mode 100644 index 00000000..389f3b80 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/README.md @@ -0,0 +1,43 @@ +# eggsampler/acme + +[![GoDoc](https://godoc.org/github.com/eggsampler/acme?status.svg)](https://godoc.org/github.com/eggsampler/acme) +[![Build Status](https://travis-ci.com/eggsampler/acme.svg?branch=master)](https://travis-ci.com/eggsampler/acme) +[![Coverage Status](https://coveralls.io/repos/github/eggsampler/acme/badge.svg?branch=master)](https://coveralls.io/github/eggsampler/acme?branch=master) + +## About + +`eggsampler/acme` is a Go client library implementation for [RFC8555](https://tools.ietf.org/html/rfc8555) (previously ACME v2), specifically for use with the [Let's Encrypt](https://letsencrypt.org/)™ service. + +The library is designed to provide a zero external dependency wrapper over exposed directory endpoints and provide objects in easy to use structures. + +## Requirements + +A Go version of at least 1.11 is required as this repository is designed to be imported as a Go module. + +## Usage + +Simply import the module into a project, + +```go +import "github.com/eggsampler/acme/v3" +``` + +Note the `/v3` major version at the end. Due to the way modules function, this is the major version as represented in the `go.mod` file and latest git repo [semver](https://semver.org/) tag. +All functions are still exported and called using the `acme` package name. + +## Examples + +A simple [certbot](https://certbot.eff.org/)-like example is provided in the examples/certbot directory. +This code demonstrates account registration, new order submission, fulfilling challenges, finalising an order and fetching the issued certificate chain. + +An example of how to use the autocert package is also provided in examples/autocert. + +## Tests + +The tests can be run against an instance of [boulder](https://github.com/letsencrypt/boulder) or [pebble](https://github.com/letsencrypt/pebble). + +Challenge fulfilment is designed to use the new `challtestsrv` server present inside boulder and pebble which responds to dns queries and challenges as required. + +To run tests against an already running instance of boulder or pebble, use the `test` target in the Makefile. + +Some convenience targets for launching pebble/boulder using their respective docker compose files have also been included in the Makefile. diff --git a/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY b/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY new file mode 100644 index 00000000..1c53d754 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY @@ -0,0 +1,35 @@ +This document contains Third Party Software Notices and/or Additional Terms and Conditions for licensed third party software components included within this product. + +== + +https://github.com/golang/crypto/blob/master/acme/jws.go +https://github.com/golang/crypto/blob/master/acme/jws_test.go +(with modifications) + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/eggsampler/acme/v3/account.go b/vendor/github.com/eggsampler/acme/v3/account.go new file mode 100644 index 00000000..2dbd30a5 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/account.go @@ -0,0 +1,128 @@ +package acme + +import ( + "crypto" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" +) + +// NewAccount registers a new account with the acme service +func (c Client) NewAccount(privateKey crypto.Signer, onlyReturnExisting, termsOfServiceAgreed bool, contact ...string) (Account, error) { + newAccountReq := struct { + OnlyReturnExisting bool `json:"onlyReturnExisting"` + TermsOfServiceAgreed bool `json:"termsOfServiceAgreed"` + Contact []string `json:"contact,omitempty"` + }{ + OnlyReturnExisting: onlyReturnExisting, + TermsOfServiceAgreed: termsOfServiceAgreed, + Contact: contact, + } + + account := Account{} + resp, err := c.post(c.dir.NewAccount, "", privateKey, newAccountReq, &account, http.StatusOK, http.StatusCreated) + if err != nil { + return account, err + } + + account.URL = resp.Header.Get("Location") + account.PrivateKey = privateKey + + if account.Thumbprint == "" { + account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public()) + if err != nil { + return account, fmt.Errorf("acme: error computing account thumbprint: %v", err) + } + } + + return account, nil +} + +// UpdateAccount updates an existing account with the acme service. +func (c Client) UpdateAccount(account Account, contact ...string) (Account, error) { + var updateAccountReq interface{} + + if !reflect.DeepEqual(account.Contact, contact) { + // Only provide a non-nil updateAccountReq when there is an update to be made. + updateAccountReq = struct { + Contact []string `json:"contact,omitempty"` + }{ + Contact: contact, + } + } else { + // Otherwise use "" to trigger a POST-as-GET to fetch up-to-date account + // information from the acme service. + updateAccountReq = "" + } + + _, err := c.post(account.URL, account.URL, account.PrivateKey, updateAccountReq, &account, http.StatusOK) + if err != nil { + return account, err + } + + if account.Thumbprint == "" { + account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public()) + if err != nil { + return account, fmt.Errorf("acme: error computing account thumbprint: %v", err) + } + } + + return account, nil +} + +// AccountKeyChange rolls over an account to a new key. +func (c Client) AccountKeyChange(account Account, newPrivateKey crypto.Signer) (Account, error) { + oldJwkKeyPub, err := jwkEncode(account.PrivateKey.Public()) + if err != nil { + return account, fmt.Errorf("acme: error encoding new private key: %v", err) + } + + keyChangeReq := struct { + Account string `json:"account"` + OldKey json.RawMessage `json:"oldKey"` + }{ + Account: account.URL, + OldKey: []byte(oldJwkKeyPub), + } + + innerJws, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange) + if err != nil { + return account, fmt.Errorf("acme: error encoding inner jws: %v", err) + } + + if _, err := c.post(c.dir.KeyChange, account.URL, account.PrivateKey, json.RawMessage(innerJws), nil, http.StatusOK); err != nil { + return account, err + } + + account.PrivateKey = newPrivateKey + + return account, nil +} + +// DeactivateAccount deactivates a given account. +func (c Client) DeactivateAccount(account Account) (Account, error) { + deactivateReq := struct { + Status string `json:"status"` + }{ + Status: "deactivated", + } + + _, err := c.post(account.URL, account.URL, account.PrivateKey, deactivateReq, &account, http.StatusOK) + + return account, err +} + +// FetchOrderList fetches a list of orders from the account url provided in the account Orders field +func (c Client) FetchOrderList(account Account) (OrderList, error) { + orderList := OrderList{} + + if account.Orders == "" { + return orderList, errors.New("no order list for account") + } + + _, err := c.post(account.Orders, account.URL, account.PrivateKey, "", &orderList, http.StatusOK) + + return orderList, err +} diff --git a/vendor/github.com/eggsampler/acme/v3/acme.go b/vendor/github.com/eggsampler/acme/v3/acme.go new file mode 100644 index 00000000..d104b908 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/acme.go @@ -0,0 +1,294 @@ +package acme + +import ( + "bytes" + "crypto" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "regexp" + "strings" + "time" +) + +const ( + // LetsEncryptProduction holds the production directory url + LetsEncryptProduction = "https://acme-v02.api.letsencrypt.org/directory" + + // LetsEncryptStaging holds the staging directory url + LetsEncryptStaging = "https://acme-staging-v02.api.letsencrypt.org/directory" + + userAgentString = "eggsampler-acme/1.0 Go-http-client/1.1" +) + +// NewClient creates a new acme client given a valid directory url. +func NewClient(directoryURL string, options ...OptionFunc) (Client, error) { + // Set a default http timeout of 60 seconds, this can be overridden + // via an OptionFunc eg: acme.NewClient(url, WithHTTPTimeout(10 * time.Second)) + httpClient := &http.Client{ + Timeout: 60 * time.Second, + } + + acmeClient := Client{ + httpClient: httpClient, + nonces: &nonceStack{}, + retryCount: 5, + } + + acmeClient.dir.URL = directoryURL + + for _, opt := range options { + if err := opt(&acmeClient); err != nil { + return acmeClient, fmt.Errorf("acme: error setting option: %v", err) + } + } + + if _, err := acmeClient.get(directoryURL, &acmeClient.dir, http.StatusOK); err != nil { + return acmeClient, err + } + + return acmeClient, nil +} + +// The directory object returned by the client connecting to a directory url. +func (c Client) Directory() Directory { + return c.dir +} + +// Helper function to get the poll interval and poll timeout, defaulting if 0 +func (c Client) getPollingDurations() (time.Duration, time.Duration) { + pollInterval := c.PollInterval + if pollInterval == 0 { + pollInterval = 500 * time.Millisecond + } + pollTimeout := c.PollTimeout + if pollTimeout == 0 { + pollTimeout = 30 * time.Second + } + return pollInterval, pollTimeout +} + +// Helper function to have a central point for performing http requests. +// Stores any returned nonces in the stack. +func (c Client) do(req *http.Request, addNonce bool) (*http.Response, error) { + // identifier for this client, as well as the default go user agent + if c.userAgentSuffix != "" { + req.Header.Set("User-Agent", userAgentString+" "+c.userAgentSuffix) + } else { + req.Header.Set("User-Agent", userAgentString) + } + + if c.acceptLanguage != "" { + req.Header.Set("Accept-Language", c.acceptLanguage) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, err + } + + if addNonce { + c.nonces.push(resp.Header.Get("Replay-Nonce")) + } + + return resp, nil +} + +// Helper function to perform an http get request and read the body. +func (c Client) getRaw(url string, expectedStatus ...int) (*http.Response, []byte, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, nil, fmt.Errorf("acme: error creating request: %v", err) + } + + resp, err := c.do(req, true) + if err != nil { + return resp, nil, fmt.Errorf("acme: error fetching response: %v", err) + } + defer resp.Body.Close() + + if err := checkError(resp, expectedStatus...); err != nil { + return resp, nil, err + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, body, fmt.Errorf("acme: error reading response body: %v", err) + } + + return resp, body, nil +} + +// Helper function for performing a http get on an acme resource. +func (c Client) get(url string, out interface{}, expectedStatus ...int) (*http.Response, error) { + resp, body, err := c.getRaw(url, expectedStatus...) + if err != nil { + return resp, err + } + + if len(body) > 0 && out != nil { + if err := json.Unmarshal(body, out); err != nil { + return resp, fmt.Errorf("acme: error parsing response body: %v", err) + } + } + + return resp, nil +} + +func (c Client) nonce() (string, error) { + nonce := c.nonces.pop() + if nonce != "" { + return nonce, nil + } + + if c.dir.NewNonce == "" { + return "", errors.New("acme: no new nonce url") + } + + req, err := http.NewRequest("HEAD", c.dir.NewNonce, nil) + if err != nil { + return "", fmt.Errorf("acme: error creating new nonce request: %v", err) + } + + resp, err := c.do(req, false) + if err != nil { + return "", fmt.Errorf("acme: error fetching new nonce: %v", err) + } + + nonce = resp.Header.Get("Replay-Nonce") + return nonce, nil +} + +// Helper function to perform an http post request and read the body. +// Will attempt to retry if error is badNonce +func (c Client) postRaw(retryCount int, requestURL, kid string, privateKey crypto.Signer, payload interface{}, expectedStatus []int) (*http.Response, []byte, error) { + nonce, err := c.nonce() + if err != nil { + return nil, nil, err + } + + data, err := jwsEncodeJSON(payload, privateKey, keyID(kid), nonce, requestURL) + if err != nil { + return nil, nil, fmt.Errorf("acme: error encoding json payload: %v", err) + } + + req, err := http.NewRequest(http.MethodPost, requestURL, bytes.NewReader(data)) + if err != nil { + return nil, nil, fmt.Errorf("acme: error creating request: %v", err) + } + req.Header.Set("Content-Type", "application/jose+json") + + resp, err := c.do(req, true) + if err != nil { + return resp, nil, fmt.Errorf("acme: error sending request: %v", err) + } + defer resp.Body.Close() + + if err := checkError(resp, expectedStatus...); err != nil { + prob, ok := err.(Problem) + if !ok { + // don't retry for an error we don't know about + return resp, nil, err + } + if retryCount >= c.retryCount { + // don't attempt to retry if too many retries + return resp, nil, err + } + if strings.HasSuffix(prob.Type, ":badNonce") { + // only retry if error is badNonce + return c.postRaw(retryCount+1, requestURL, kid, privateKey, payload, expectedStatus) + } + return resp, nil, err + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, body, fmt.Errorf("acme: error reading response body: %v", err) + } + + return resp, body, nil +} + +// Helper function for performing a http post to an acme resource. +func (c Client) post(requestURL, keyID string, privateKey crypto.Signer, payload interface{}, out interface{}, expectedStatus ...int) (*http.Response, error) { + resp, body, err := c.postRaw(0, requestURL, keyID, privateKey, payload, expectedStatus) + if err != nil { + return resp, err + } + + if _, b := os.LookupEnv("ACME_DEBUG_POST"); b { + fmt.Println() + fmt.Println(string(body)) + fmt.Println() + } + + if len(body) > 0 && out != nil { + if err := json.Unmarshal(body, out); err != nil { + return resp, fmt.Errorf("acme: error parsing response: %v - %s", err, string(body)) + } + } + + return resp, nil +} + +var regLink = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`) + +// Fetches a http Link header from a http response +func fetchLink(resp *http.Response, wantedLink string) string { + if resp == nil { + return "" + } + linkHeader := resp.Header["Link"] + if len(linkHeader) == 0 { + return "" + } + for _, l := range linkHeader { + matches := regLink.FindAllStringSubmatch(l, -1) + for _, m := range matches { + if len(m) != 3 { + continue + } + if m[2] == wantedLink { + return m[1] + } + } + } + return "" +} + +// FetchRaw is a helper function to assist with POST-AS-GET requests +func (c Client) Fetch(account Account, requestURL string, result interface{}, expectedStatus ...int) error { + if len(expectedStatus) == 0 { + expectedStatus = []int{http.StatusOK} + } + _, err := c.post(requestURL, account.URL, account.PrivateKey, "", result, expectedStatus...) + + return err +} + +// Fetches all http Link header from a http response +func fetchLinks(resp *http.Response, wantedLink string) []string { + if resp == nil { + return nil + } + linkHeader := resp.Header["Link"] + if len(linkHeader) == 0 { + return nil + } + var links []string + for _, l := range linkHeader { + matches := regLink.FindAllStringSubmatch(l, -1) + for _, m := range matches { + if len(m) != 3 { + continue + } + if m[2] == wantedLink { + links = append(links, m[1]) + } + } + } + return links +} diff --git a/vendor/github.com/eggsampler/acme/v3/authorization.go b/vendor/github.com/eggsampler/acme/v3/authorization.go new file mode 100644 index 00000000..09d5906d --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/authorization.go @@ -0,0 +1,43 @@ +package acme + +import "net/http" + +// FetchAuthorization fetches an authorization from an authorization url provided in an order. +func (c Client) FetchAuthorization(account Account, authURL string) (Authorization, error) { + authResp := Authorization{} + _, err := c.post(authURL, account.URL, account.PrivateKey, "", &authResp, http.StatusOK) + if err != nil { + return authResp, err + } + + for i := 0; i < len(authResp.Challenges); i++ { + if authResp.Challenges[i].KeyAuthorization == "" { + authResp.Challenges[i].KeyAuthorization = authResp.Challenges[i].Token + "." + account.Thumbprint + } + } + + authResp.ChallengeMap = map[string]Challenge{} + authResp.ChallengeTypes = []string{} + for _, c := range authResp.Challenges { + authResp.ChallengeMap[c.Type] = c + authResp.ChallengeTypes = append(authResp.ChallengeTypes, c.Type) + } + + authResp.URL = authURL + + return authResp, nil +} + +// DeactivateAuthorization deactivate a provided authorization url from an order. +func (c Client) DeactivateAuthorization(account Account, authURL string) (Authorization, error) { + deactivateReq := struct { + Status string `json:"status"` + }{ + Status: "deactivated", + } + deactivateResp := Authorization{} + + _, err := c.post(authURL, account.URL, account.PrivateKey, deactivateReq, &deactivateResp, http.StatusOK) + + return deactivateResp, err +} diff --git a/vendor/github.com/eggsampler/acme/v3/autocert.go b/vendor/github.com/eggsampler/acme/v3/autocert.go new file mode 100644 index 00000000..eea65ec3 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/autocert.go @@ -0,0 +1,430 @@ +package acme + +// Similar to golang.org/x/crypto/acme/autocert + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "path" + "strings" + "sync" +) + +// HostCheck function prototype to implement for checking hosts against before issuing certificates +type HostCheck func(host string) error + +// WhitelistHosts implements a simple whitelist HostCheck +func WhitelistHosts(hosts ...string) HostCheck { + m := map[string]bool{} + for _, v := range hosts { + m[v] = true + } + + return func(host string) error { + if !m[host] { + return errors.New("autocert: host not whitelisted") + } + return nil + } +} + +// AutoCert is a stateful certificate manager for issuing certificates on connecting hosts +type AutoCert struct { + // Acme directory Url + // If nil, uses `LetsEncryptStaging` + DirectoryURL string + + // Options contains the options used for creating the acme client + Options []OptionFunc + + // A function to check whether a host is allowed or not + // If nil, all hosts allowed + // Use `WhitelistHosts(hosts ...string)` for a simple white list of hostnames + HostCheck HostCheck + + // Cache dir to store account data and certificates + // If nil, does not write cache data to file + CacheDir string + + // When using a staging environment, include a root certificate for verification purposes + RootCert string + + // Called before updating challenges + PreUpdateChallengeHook func(Account, Challenge) + + // Mapping of token -> keyauth + // Protected by a mutex, but not rwmutex because tokens are deleted once read + tokensLock sync.RWMutex + tokens map[string][]byte + + // Mapping of cache key -> value + cacheLock sync.Mutex + cache map[string][]byte + + // read lock around getting existing certs + // write lock around issuing new certificate + certLock sync.RWMutex + + client Client +} + +// HTTPHandler Wraps a handler and provides serving of http-01 challenge tokens from /.well-known/acme-challenge/ +// If handler is nil, will redirect all traffic otherwise to https +func (m *AutoCert) HTTPHandler(handler http.Handler) http.Handler { + if handler == nil { + handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "https://"+r.Host+r.URL.RequestURI(), http.StatusMovedPermanently) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + handler.ServeHTTP(w, r) + return + } + + if err := m.checkHost(r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + + token := path.Base(r.URL.Path) + m.tokensLock.RLock() + defer m.tokensLock.RUnlock() + keyAuth := m.tokens[token] + if len(keyAuth) == 0 { + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + + _, _ = w.Write(keyAuth) + }) +} + +// GetCertificate implements a tls.Config.GetCertificate hook +func (m *AutoCert) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + name := strings.TrimSuffix(hello.ServerName, ".") + + if name == "" { + return nil, errors.New("autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("autocert: server name component count invalid") + } + if strings.ContainsAny(name, `/\`) { + return nil, errors.New("autocert: server name contains invalid character") + } + + // check the hostname is allowed + if err := m.checkHost(name); err != nil { + return nil, err + } + + // check if there's an existing cert + m.certLock.RLock() + existingCert := m.getExistingCert(name) + m.certLock.RUnlock() + if existingCert != nil { + return existingCert, nil + } + + // if not, attempt to issue a new cert + m.certLock.Lock() + defer m.certLock.Unlock() + return m.issueCert(name) +} + +func (m *AutoCert) getDirectoryURL() string { + if m.DirectoryURL != "" { + return m.DirectoryURL + } + + return LetsEncryptStaging +} + +func (m *AutoCert) getCache(keys ...string) []byte { + key := strings.Join(keys, "-") + + m.cacheLock.Lock() + defer m.cacheLock.Unlock() + + b := m.cache[key] + if len(b) > 0 { + return b + } + + if m.CacheDir == "" { + return nil + } + + b, _ = ioutil.ReadFile(path.Join(m.CacheDir, key)) + if len(b) == 0 { + return nil + } + + if m.cache == nil { + m.cache = map[string][]byte{} + } + m.cache[key] = b + return b +} + +func (m *AutoCert) putCache(data []byte, keys ...string) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + key := strings.Join(keys, "-") + + m.cacheLock.Lock() + defer m.cacheLock.Unlock() + + if m.cache == nil { + m.cache = map[string][]byte{} + } + m.cache[key] = data + + if m.CacheDir == "" { + cancel() + return ctx + } + + go func() { + _ = ioutil.WriteFile(path.Join(m.CacheDir, key), data, 0700) + cancel() + }() + + return ctx +} + +func (m *AutoCert) checkHost(name string) error { + if m.HostCheck == nil { + return nil + } + return m.HostCheck(name) +} + +func (m *AutoCert) getExistingCert(name string) *tls.Certificate { + // check for a stored cert + certData := m.getCache("cert", name) + if len(certData) == 0 { + // no cert + return nil + } + + privBlock, pubData := pem.Decode(certData) + if len(pubData) == 0 { + // no public key data (cert/issuer), ignore + return nil + } + + // decode pub chain + var pubDER [][]byte + var pub []byte + for len(pubData) > 0 { + var b *pem.Block + b, pubData = pem.Decode(pubData) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + pub = append(pub, b.Bytes...) + } + if len(pubData) > 0 { + // leftover data in file - possibly corrupt, ignore + return nil + } + + certs, err := x509.ParseCertificates(pub) + if err != nil { + // bad certificates, ignore + return nil + } + + leaf := certs[0] + + // add any intermediate certs if present + var intermediates *x509.CertPool + if len(certs) > 1 { + intermediates = x509.NewCertPool() + for i := 1; i < len(certs); i++ { + intermediates.AddCert(certs[i]) + } + } + + // add a root certificate if present + var roots *x509.CertPool + if m.RootCert != "" { + roots = x509.NewCertPool() + rootBlock, _ := pem.Decode([]byte(m.RootCert)) + rootCert, err := x509.ParseCertificate(rootBlock.Bytes) + if err != nil { + return nil + } + roots.AddCert(rootCert) + } + + if _, err := leaf.Verify(x509.VerifyOptions{DNSName: name, Intermediates: intermediates, Roots: roots}); err != nil { + // invalid certificates , ignore + return nil + } + + privKey, err := x509.ParseECPrivateKey(privBlock.Bytes) + if err != nil { + // invalid private key, ignore + return nil + } + + return &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } +} + +func (m *AutoCert) issueCert(domainName string) (*tls.Certificate, error) { + // attempt to load an existing account key + var privKey *ecdsa.PrivateKey + if keyData := m.getCache("account"); len(keyData) > 0 { + block, _ := pem.Decode(keyData) + x509Encoded := block.Bytes + privKey, _ = x509.ParseECPrivateKey(x509Encoded) + } + + // otherwise generate a new one + if privKey == nil { + var err error + privKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("autocert: error generating new account key: %v", err) + } + + x509Encoded, _ := x509.MarshalECPrivateKey(privKey) + pemEncoded := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: x509Encoded}) + + m.putCache(pemEncoded, "account") + } + + // create a new client if one doesn't exist + if m.client.Directory().URL == "" { + var err error + m.client, err = NewClient(m.getDirectoryURL(), m.Options...) + if err != nil { + return nil, err + } + } + + // create/fetch acme account + account, err := m.client.NewAccount(privKey, false, true) + if err != nil { + return nil, fmt.Errorf("autocert: error creating/fetching account: %v", err) + } + + // start a new order process + order, err := m.client.NewOrderDomains(account, domainName) + if err != nil { + return nil, fmt.Errorf("autocert: error creating new order for domain %s: %v", domainName, err) + } + + // loop through each of the provided authorization Urls + for _, authURL := range order.Authorizations { + auth, err := m.client.FetchAuthorization(account, authURL) + if err != nil { + return nil, fmt.Errorf("autocert: error fetching authorization Url %q: %v", authURL, err) + } + + if auth.Status == "valid" { + continue + } + + chal, ok := auth.ChallengeMap[ChallengeTypeHTTP01] + if !ok { + return nil, fmt.Errorf("autocert: unable to find http-01 challenge for auth %s, Url: %s", auth.Identifier.Value, authURL) + } + + m.tokensLock.Lock() + if m.tokens == nil { + m.tokens = map[string][]byte{} + } + m.tokens[chal.Token] = []byte(chal.KeyAuthorization) + m.tokensLock.Unlock() + + if m.PreUpdateChallengeHook != nil { + m.PreUpdateChallengeHook(account, chal) + } + + chal, err = m.client.UpdateChallenge(account, chal) + if err != nil { + return nil, fmt.Errorf("autocert: error updating authorization %s challenge (Url: %s) : %v", auth.Identifier.Value, authURL, err) + } + + m.tokensLock.Lock() + delete(m.tokens, chal.Token) + m.tokensLock.Unlock() + } + + // generate private key for cert + certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("autocert: error generating certificate key for %s: %v", domainName, err) + } + certKeyEnc, err := x509.MarshalECPrivateKey(certKey) + if err != nil { + return nil, fmt.Errorf("autocert: error encoding certificate key for %s: %v", domainName, err) + } + certKeyPem := pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: certKeyEnc, + }) + + // create the new csr template + tpl := &x509.CertificateRequest{ + SignatureAlgorithm: x509.ECDSAWithSHA256, + PublicKeyAlgorithm: x509.ECDSA, + PublicKey: certKey.Public(), + Subject: pkix.Name{CommonName: domainName}, + DNSNames: []string{domainName}, + } + csrDer, err := x509.CreateCertificateRequest(rand.Reader, tpl, certKey) + if err != nil { + return nil, fmt.Errorf("autocert: error creating certificate request for %s: %v", domainName, err) + } + csr, err := x509.ParseCertificateRequest(csrDer) + if err != nil { + return nil, fmt.Errorf("autocert: error parsing certificate request for %s: %v", domainName, err) + } + + // finalize the order with the acme server given a csr + order, err = m.client.FinalizeOrder(account, order, csr) + if err != nil { + return nil, fmt.Errorf("autocert: error finalizing order for %s: %v", domainName, err) + } + + // fetch the certificate chain from the finalized order provided by the acme server + certs, err := m.client.FetchCertificates(account, order.Certificate) + if err != nil { + return nil, fmt.Errorf("autocert: error fetching order certificates for %s: %v", domainName, err) + } + + certPem := certKeyPem + // var certDer [][]byte + for _, c := range certs { + b := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: c.Raw, + }) + certPem = append(certPem, b...) + // certDer = append(certDer, c.Raw) + } + m.putCache(certPem, "cert", domainName) + + return m.getExistingCert(domainName), nil +} diff --git a/vendor/github.com/eggsampler/acme/v3/certificate.go b/vendor/github.com/eggsampler/acme/v3/certificate.go new file mode 100644 index 00000000..0791f67f --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/certificate.go @@ -0,0 +1,106 @@ +package acme + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net/http" +) + +func (c Client) decodeCertificateChain(body []byte, resp *http.Response, account Account) ([]*x509.Certificate, error) { + var certs []*x509.Certificate + for { + var p *pem.Block + p, body = pem.Decode(body) + if p == nil { + break + } + cert, err := x509.ParseCertificate(p.Bytes) + if err != nil { + return certs, fmt.Errorf("acme: error parsing certificate: %v", err) + } + certs = append(certs, cert) + } + + up := fetchLink(resp, "up") + if up != "" { + upCerts, err := c.FetchCertificates(account, up) + if err != nil { + return certs, fmt.Errorf("acme: error fetching up cert: %v", err) + } + if len(upCerts) != 0 { + certs = append(certs, upCerts...) + } + } + + return certs, nil +} + +// FetchCertificates downloads a certificate chain from a url given in an order certificate. +func (c Client) FetchCertificates(account Account, certificateURL string) ([]*x509.Certificate, error) { + resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) + if err != nil { + return nil, err + } + + return c.decodeCertificateChain(body, resp, account) +} + +// FetchAllCertificates downloads a certificate chain from a url given in an order certificate, as well as any alternate certificates if provided. +// Returns a mapping of certificate urls to the certificate chain. +func (c Client) FetchAllCertificates(account Account, certificateURL string) (map[string][]*x509.Certificate, error) { + resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) + if err != nil { + return nil, err + } + + certChain, err := c.decodeCertificateChain(body, resp, account) + if err != nil { + return nil, err + } + + certs := map[string][]*x509.Certificate{ + certificateURL: certChain, + } + + alternates := fetchLinks(resp, "alternate") + + for _, altURL := range alternates { + altResp, altBody, err := c.postRaw(0, altURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) + if err != nil { + return certs, fmt.Errorf("acme: error fetching alt cert chain at %q - %v", altURL, err) + } + altCertChain, err := c.decodeCertificateChain(altBody, altResp, account) + if err != nil { + return certs, fmt.Errorf("acme: error decoding alt cert chain at %q - %v", altURL, err) + } + certs[altURL] = altCertChain + } + + return certs, nil + +} + +// RevokeCertificate revokes a given certificate given the certificate key or account key, and a reason. +func (c Client) RevokeCertificate(account Account, cert *x509.Certificate, key crypto.Signer, reason int) error { + revokeReq := struct { + Certificate string `json:"certificate"` + Reason int `json:"reason"` + }{ + Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw), + Reason: reason, + } + + kid := "" + if key == account.PrivateKey { + kid = account.URL + } + + if _, err := c.post(c.dir.RevokeCert, kid, key, revokeReq, nil, http.StatusOK); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/eggsampler/acme/v3/challenge.go b/vendor/github.com/eggsampler/acme/v3/challenge.go new file mode 100644 index 00000000..6d57bfb9 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/challenge.go @@ -0,0 +1,102 @@ +package acme + +import ( + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "net/http" + "time" +) + +// EncodeDNS01KeyAuthorization encodes a key authorization and provides a value to be put in the TXT record for the _acme-challenge DNS entry. +func EncodeDNS01KeyAuthorization(keyAuth string) string { + h := sha256.Sum256([]byte(keyAuth)) + return base64.RawURLEncoding.EncodeToString(h[:]) +} + +// Helper function to determine whether a challenge is "finished" by it's status. +func checkUpdatedChallengeStatus(challenge Challenge) (bool, error) { + switch challenge.Status { + case "pending": + // Challenge objects are created in the "pending" state. + // TODO: https://github.com/letsencrypt/boulder/issues/3346 + // return true, errors.New("acme: unexpected 'pending' challenge state") + return false, nil + + case "processing": + // They transition to the "processing" state when the client responds to the + // challenge and the server begins attempting to validate that the client has completed the challenge. + return false, nil + + case "valid": + // If validation is successful, the challenge moves to the "valid" state + return true, nil + + case "invalid": + // if there is an error, the challenge moves to the "invalid" state. + if challenge.Error.Type != "" { + return true, challenge.Error + } + return true, errors.New("acme: challenge is invalid, no error provided") + + default: + return true, fmt.Errorf("acme: unknown challenge status: %s", challenge.Status) + } +} + +// UpdateChallenge responds to a challenge to indicate to the server to complete the challenge. +func (c Client) UpdateChallenge(account Account, challenge Challenge) (Challenge, error) { + resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, struct{}{}, &challenge, http.StatusOK) + if err != nil { + return challenge, err + } + + if loc := resp.Header.Get("Location"); loc != "" { + challenge.URL = loc + } + challenge.AuthorizationURL = fetchLink(resp, "up") + + if finished, err := checkUpdatedChallengeStatus(challenge); finished { + return challenge, err + } + + pollInterval, pollTimeout := c.getPollingDurations() + end := time.Now().Add(pollTimeout) + for { + if time.Now().After(end) { + return challenge, errors.New("acme: challenge update timeout") + } + time.Sleep(pollInterval) + + resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK) + if err != nil { + // i don't think it's worth exiting the loop on this error + // it could just be connectivity issue that's resolved before the timeout duration + continue + } + + if loc := resp.Header.Get("Location"); loc != "" { + challenge.URL = loc + } + challenge.AuthorizationURL = fetchLink(resp, "up") + + if finished, err := checkUpdatedChallengeStatus(challenge); finished { + return challenge, err + } + } +} + +// FetchChallenge fetches an existing challenge from the given url. +func (c Client) FetchChallenge(account Account, challengeURL string) (Challenge, error) { + challenge := Challenge{} + resp, err := c.post(challengeURL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK) + if err != nil { + return challenge, err + } + + challenge.URL = resp.Header.Get("Location") + challenge.AuthorizationURL = fetchLink(resp, "up") + + return challenge, nil +} diff --git a/vendor/github.com/eggsampler/acme/v3/jws.go b/vendor/github.com/eggsampler/acme/v3/jws.go new file mode 100644 index 00000000..9461d969 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/jws.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the THIRD-PARTY file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "math/big" +) + +var errUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + +// keyID is the account identity provided by a CA during registration. +type keyID string + +// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID. +// See jwsEncodeJSON for details. +const noKeyID = keyID("") + +// noPayload indicates jwsEncodeJSON will encode zero-length octet string +// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make +// authenticated GET requests via POSTing with an empty payload. +// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details. +const noPayload = "" + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format containing either kid or jwk +// fields based on the provided keyID value. +// +// If kid is non-empty, its quoted value is inserted in the protected head +// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted +// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive. +// +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) { + alg, sha := jwsHasher(key.Public()) + if alg == "" || !sha.Available() { + return nil, errUnsupportedKey + } + var phead string + switch kid { + case noKeyID: + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url) + default: + phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url) + } + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + var payload string + if claimset != noPayload { + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload = base64.RawURLEncoding.EncodeToString(cs) + } + hash := sha.New() + _, _ = hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", errUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// The hash is unused for ECDSA keys. +// +// Note: non-stdlib crypto.Signer implementations are expected to return +// the signature in the format as specified in RFC7518. +// See https://tools.ietf.org/html/rfc7518 for more details. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + if key, ok := key.(*ecdsa.PrivateKey); ok { + // The key.Sign method of ecdsa returns ASN1-encoded signature. + // So, we use the package Sign function instead + // to get R and S values directly and format the result accordingly. + r, s, err := ecdsa.Sign(rand.Reader, key, digest) + if err != nil { + return nil, err + } + rb, sb := r.Bytes(), s.Bytes() + size := key.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return key.Sign(rand.Reader, digest, hash) +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { + switch pub := pub.(type) { + case *rsa.PublicKey: + return "RS256", crypto.SHA256 + case *ecdsa.PublicKey: + switch pub.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/github.com/eggsampler/acme/v3/nonce.go b/vendor/github.com/eggsampler/acme/v3/nonce.go new file mode 100644 index 00000000..2ef9aca7 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/nonce.go @@ -0,0 +1,45 @@ +package acme + +import ( + "sync" +) + +// Simple thread-safe stack impl +type nonceStack struct { + lock sync.Mutex + stack []string +} + +// Pushes a nonce to the stack. +// Doesn't push empty nonces, or if there's more than 100 nonces on the stack +func (ns *nonceStack) push(v string) { + if v == "" { + return + } + + ns.lock.Lock() + defer ns.lock.Unlock() + + if len(ns.stack) > 100 { + return + } + + ns.stack = append(ns.stack, v) +} + +// Pops a nonce from the stack. +// Returns empty string if there are no nonces +func (ns *nonceStack) pop() string { + ns.lock.Lock() + defer ns.lock.Unlock() + + n := len(ns.stack) + if n == 0 { + return "" + } + + v := ns.stack[n-1] + ns.stack = ns.stack[:n-1] + + return v +} diff --git a/vendor/github.com/eggsampler/acme/v3/options.go b/vendor/github.com/eggsampler/acme/v3/options.go new file mode 100644 index 00000000..ff19867f --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/options.go @@ -0,0 +1,70 @@ +package acme + +import ( + "crypto/tls" + "errors" + "net/http" + "time" +) + +// OptionFunc function prototype for passing options to NewClient +type OptionFunc func(client *Client) error + +// WithHTTPTimeout sets a timeout on the http client used by the Client +func WithHTTPTimeout(duration time.Duration) OptionFunc { + return func(client *Client) error { + client.httpClient.Timeout = duration + return nil + } +} + +// WithInsecureSkipVerify sets InsecureSkipVerify on the http client transport tls client config used by the Client +func WithInsecureSkipVerify() OptionFunc { + return func(client *Client) error { + client.httpClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + return nil + } +} + +// WithUserAgentSuffix appends a user agent suffix for http requests to acme resources +func WithUserAgentSuffix(userAgentSuffix string) OptionFunc { + return func(client *Client) error { + client.userAgentSuffix = userAgentSuffix + return nil + } +} + +// WithAcceptLanguage sets an Accept-Language header on http requests +func WithAcceptLanguage(acceptLanguage string) OptionFunc { + return func(client *Client) error { + client.acceptLanguage = acceptLanguage + return nil + } +} + +// WithRetryCount sets the number of times the acme client retries when receiving an api error (eg, nonce failures, etc). +// Default: 5 +func WithRetryCount(retryCount int) OptionFunc { + return func(client *Client) error { + if retryCount < 1 { + return errors.New("retryCount must be > 0") + } + client.retryCount = retryCount + return nil + } +} + +// WithHTTPClient Allows setting a custom http client for acme connections +func WithHTTPClient(httpClient *http.Client) OptionFunc { + return func(client *Client) error { + if httpClient == nil { + return errors.New("client must not be nil") + } + client.httpClient = httpClient + return nil + } +} diff --git a/vendor/github.com/eggsampler/acme/v3/order.go b/vendor/github.com/eggsampler/acme/v3/order.go new file mode 100644 index 00000000..6604a137 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/order.go @@ -0,0 +1,136 @@ +package acme + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "net/http" + "time" +) + +// NewOrder initiates a new order for a new certificate. +func (c Client) NewOrder(account Account, identifiers []Identifier) (Order, error) { + newOrderReq := struct { + Identifiers []Identifier `json:"identifiers"` + }{ + Identifiers: identifiers, + } + newOrderResp := Order{} + resp, err := c.post(c.dir.NewOrder, account.URL, account.PrivateKey, newOrderReq, &newOrderResp, http.StatusCreated) + if err != nil { + return newOrderResp, err + } + + newOrderResp.URL = resp.Header.Get("Location") + + return newOrderResp, nil +} + +// NewOrderDomains is a wrapper for NewOrder(AcmeAccount, []AcmeIdentifiers) +// Creates a dns identifier for each provided domain +func (c Client) NewOrderDomains(account Account, domains ...string) (Order, error) { + if len(domains) == 0 { + return Order{}, errors.New("acme: no domains provided") + } + + var ids []Identifier + for _, d := range domains { + ids = append(ids, Identifier{Type: "dns", Value: d}) + } + + return c.NewOrder(account, ids) +} + +// FetchOrder fetches an existing order given an order url. +func (c Client) FetchOrder(account Account, orderURL string) (Order, error) { + orderResp := Order{ + URL: orderURL, // boulder response doesn't seem to contain location header for this request + } + _, err := c.post(orderURL, account.URL, account.PrivateKey, "", &orderResp, http.StatusOK) + + return orderResp, err +} + +// Helper function to determine whether an order is "finished" by it's status. +func checkFinalizedOrderStatus(order Order) (bool, error) { + switch order.Status { + case "invalid": + // "invalid": The certificate will not be issued. Consider this + // order process abandoned. + if order.Error.Type != "" { + return true, order.Error + } + return true, errors.New("acme: finalized order is invalid, no error provided") + + case "pending": + // "pending": The server does not believe that the client has + // fulfilled the requirements. Check the "authorizations" array for + // entries that are still pending. + return true, errors.New("acme: authorizations not fulfilled") + + case "ready": + // "ready": The server agrees that the requirements have been + // fulfilled, and is awaiting finalization. Submit a finalization + // request. + return true, errors.New("acme: unexpected 'ready' state") + + case "processing": + // "processing": The certificate is being issued. Send a GET request + // after the time given in the "Retry-After" header field of the + // response, if any. + return false, nil + + case "valid": + // "valid": The server has issued the certificate and provisioned its + // URL to the "certificate" field of the order. Download the + // certificate. + return true, nil + + default: + return true, fmt.Errorf("acme: unknown order status: %s", order.Status) + } +} + +// FinalizeOrder indicates to the acme server that the client considers an order complete and "finalizes" it. +// If the server believes the authorizations have been filled successfully, a certificate should then be available. +// This function assumes that the order status is "ready". +func (c Client) FinalizeOrder(account Account, order Order, csr *x509.CertificateRequest) (Order, error) { + finaliseReq := struct { + Csr string `json:"csr"` + }{ + Csr: base64.RawURLEncoding.EncodeToString(csr.Raw), + } + + resp, err := c.post(order.Finalize, account.URL, account.PrivateKey, finaliseReq, &order, http.StatusOK) + if err != nil { + return order, err + } + + order.URL = resp.Header.Get("Location") + + if finished, err := checkFinalizedOrderStatus(order); finished { + return order, err + } + + pollInterval, pollTimeout := c.getPollingDurations() + end := time.Now().Add(pollTimeout) + for { + if time.Now().After(end) { + return order, errors.New("acme: finalized order timeout") + } + time.Sleep(pollInterval) + + if _, err := c.post(order.URL, account.URL, account.PrivateKey, "", &order, http.StatusOK); err != nil { + // i dont think it's worth exiting the loop on this error + // it could just be connectivity issue thats resolved before the timeout duration + continue + } + + order.URL = resp.Header.Get("Location") + + if finished, err := checkFinalizedOrderStatus(order); finished { + return order, err + } + } +} diff --git a/vendor/github.com/eggsampler/acme/v3/problem.go b/vendor/github.com/eggsampler/acme/v3/problem.go new file mode 100644 index 00000000..4c3ae0d5 --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/problem.go @@ -0,0 +1,65 @@ +package acme + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// Problem document as defined in, +// https://tools.ietf.org/html/rfc7807 + +// Problem represents an error returned by an acme server. +type Problem struct { + Type string `json:"type"` + Detail string `json:"detail,omitempty"` + Status int `json:"status,omitempty"` + Instance string `json:"instance,omitempty"` + SubProblems []SubProblem `json:"subproblems,omitempty"` +} + +type SubProblem struct { + Type string `json:"type"` + Detail string `json:"detail"` + Identifier Identifier `json:"identifier"` +} + +// Returns a human readable error string. +func (err Problem) Error() string { + s := fmt.Sprintf("acme: error code %d %q: %s", err.Status, err.Type, err.Detail) + if len(err.SubProblems) > 0 { + for _, v := range err.SubProblems { + s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail) + } + } + if err.Instance != "" { + s += ", url: " + err.Instance + } + return s +} + +// Helper function to determine if a response contains an expected status code, or otherwise an error object. +func checkError(resp *http.Response, expectedStatuses ...int) error { + for _, statusCode := range expectedStatuses { + if resp.StatusCode == statusCode { + return nil + } + } + + if resp.StatusCode < 400 || resp.StatusCode >= 600 { + return fmt.Errorf("acme: expected status codes: %d, got: %d %s", expectedStatuses, resp.StatusCode, resp.Status) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("acme: error reading error body: %v", err) + } + + acmeError := Problem{} + if err := json.Unmarshal(body, &acmeError); err != nil { + return fmt.Errorf("acme: parsing error body: %v - %s", err, string(body)) + } + + return acmeError +} diff --git a/vendor/github.com/eggsampler/acme/v3/types.go b/vendor/github.com/eggsampler/acme/v3/types.go new file mode 100644 index 00000000..d15a59fe --- /dev/null +++ b/vendor/github.com/eggsampler/acme/v3/types.go @@ -0,0 +1,163 @@ +package acme + +import ( + "crypto" + "net/http" + "time" +) + +// Different possible challenge types provided by an ACME server. +// See https://tools.ietf.org/html/rfc8555#section-9.7.8 +const ( + ChallengeTypeDNS01 = "dns-01" + ChallengeTypeHTTP01 = "http-01" + ChallengeTypeTLSALPN01 = "tls-alpn-01" + + // ChallengeTypeTLSSNI01 is deprecated and should not be used. + // See: https://community.letsencrypt.org/t/important-what-you-need-to-know-about-tls-sni-validation-issues/50811 + ChallengeTypeTLSSNI01 = "tls-sni-01" +) + +// Constants used for certificate revocation, used for RevokeCertificate +// See https://tools.ietf.org/html/rfc5280#section-5.3.1 +const ( + ReasonUnspecified = iota // 0 + ReasonKeyCompromise // 1 + ReasonCaCompromise // 2 + ReasonAffiliationChanged // 3 + ReasonSuperseded // 4 + ReasonCessationOfOperation // 5 + ReasonCertificateHold // 6 + _ // 7 - Unused + ReasonRemoveFromCRL // 8 + ReasonPrivilegeWithdrawn // 9 + ReasonAaCompromise // 10 +) + +// Directory object as returned from the client's directory url upon creation of client. +// See https://tools.ietf.org/html/rfc8555#section-7.1.1 +type Directory struct { + NewNonce string `json:"newNonce"` // url to new nonce endpoint + NewAccount string `json:"newAccount"` // url to new account endpoint + NewOrder string `json:"newOrder"` // url to new order endpoint + NewAuthz string `json:"newAuthz"` // url to new authz endpoint + RevokeCert string `json:"revokeCert"` // url to revoke cert endpoint + KeyChange string `json:"keyChange"` // url to key change endpoint + + // meta object containing directory metadata + Meta struct { + TermsOfService string `json:"termsOfService"` + Website string `json:"website"` + CaaIdentities []string `json:"caaIdentities"` + ExternalAccountRequired bool `json:"externalAccountRequired"` + } `json:"meta"` + + // Directory url provided when creating a new acme client. + URL string `json:"-"` +} + +// Client structure to interact with an ACME server. +// This is typically how most, if not all, of the communication between the client and server occurs. +type Client struct { + httpClient *http.Client + nonces *nonceStack + dir Directory + userAgentSuffix string + acceptLanguage string + retryCount int + + // The amount of total time the Client will wait at most for a challenge to be updated or a certificate to be issued. + // Default 30 seconds if duration is not set or if set to 0. + PollTimeout time.Duration + + // The time between checking if a challenge has been updated or a certificate has been issued. + // Default 0.5 seconds if duration is not set or if set to 0. + PollInterval time.Duration +} + +// Account structure representing fields in an account object. +// See https://tools.ietf.org/html/rfc8555#section-7.1.2 +// See also https://tools.ietf.org/html/rfc8555#section-9.7.1 +type Account struct { + Status string `json:"status"` + Contact []string `json:"contact"` + Orders string `json:"orders"` + + // Provided by the Location http header when creating a new account or fetching an existing account. + URL string `json:"-"` + + // The private key used to create or fetch the account. + // Not fetched from server. + PrivateKey crypto.Signer `json:"-"` + + // Thumbprint is the SHA-256 digest JWK_Thumbprint of the account key. + // See https://tools.ietf.org/html/rfc8555#section-8.1 + Thumbprint string `json:"-"` +} + +// Identifier object used in order and authorization objects +// See https://tools.ietf.org/html/rfc8555#section-7.1.4 +type Identifier struct { + Type string `json:"type"` + Value string `json:"value"` +} + +// Order object returned when fetching or creating a new order. +// See https://tools.ietf.org/html/rfc8555#section-7.1.3 +type Order struct { + Status string `json:"status"` + Expires time.Time `json:"expires"` + Identifiers []Identifier `json:"identifiers"` + NotBefore time.Time `json:"notBefore"` + NotAfter time.Time `json:"notAfter"` + Error Problem `json:"error"` + Authorizations []string `json:"authorizations"` + Finalize string `json:"finalize"` + Certificate string `json:"certificate"` + + // URL for the order object. + // Provided by the rel="Location" Link http header + URL string `json:"-"` +} + +// Authorization object returned when fetching an authorization in an order. +// See https://tools.ietf.org/html/rfc8555#section-7.1.4 +type Authorization struct { + Identifier Identifier `json:"identifier"` + Status string `json:"status"` + Expires time.Time `json:"expires"` + Challenges []Challenge `json:"challenges"` + Wildcard bool `json:"wildcard"` + + // For convenience access to the provided challenges + ChallengeMap map[string]Challenge `json:"-"` + ChallengeTypes []string `json:"-"` + + URL string `json:"-"` +} + +// Challenge object fetched in an authorization or directly from the challenge url. +// See https://tools.ietf.org/html/rfc8555#section-7.1.5 +type Challenge struct { + Type string `json:"type"` + URL string `json:"url"` + Status string `json:"status"` + Validated string `json:"validated"` + Error Problem `json:"error"` + + // Based on the challenge used + Token string `json:"token"` + KeyAuthorization string `json:"keyAuthorization"` + + // Authorization url provided by the rel="up" Link http header + AuthorizationURL string `json:"-"` +} + +// OrderList of challenge objects. +type OrderList struct { + Orders []string `json:"orders"` + + // Order list pagination, url to next orders. + // Provided by the rel="next" Link http header + Next string `json:"-"` +} diff --git a/vendor/github.com/letsdebug/letsdebug/.gitignore b/vendor/github.com/letsdebug/letsdebug/.gitignore new file mode 100644 index 00000000..7d54ea20 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/.gitignore @@ -0,0 +1,6 @@ +vendor/ +.idea/ +acme-account.json +web/*_gen.go +letsdebug-server +letsdebug-cli diff --git a/vendor/github.com/letsdebug/letsdebug/.travis.yml b/vendor/github.com/letsdebug/letsdebug/.travis.yml new file mode 100644 index 00000000..27038e52 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/.travis.yml @@ -0,0 +1,23 @@ +language: go +go: + - '1.15.x' +before_install: + - sudo apt-get update + - sudo apt-get -y install libunbound-dev make + - sudo mkdir -p $GOPATH/bin + - export PATH=$PATH:$GOPATH/bin +install: + - make deps +script: + - make letsdebug-server letsdebug-cli +deploy: + provider: releases + api_key: + secure: f65vxdzq7SoIooXNAPJaHEiCcnG1Q2R7muomm/5qWyRXgaXyBu6Yo0oOGQNcYLbs22PbRCVS8xnN2cSfFV5jeilRKuhpTYq0tforjJbtjL1DEs9ODyLZnXIXH+uacIPwM/ioxFbFVTnSCkZx90+9I+WHY0taqc2AW49RvQKPTzOmWYZ4ATsQxsv5jBLXZIuhhl3cEJayeogT2yToYump3AZN+8o67kP94a/vSbPMTRcKOeLQa+gjxSoHVBfjmpYvGdFTp1iE8bWsJpfo/i2snF6eMdAig4Vy9Ajk/SVEmSzEBWk31JceDrT9n7VNmlDN9Us2PhdjQLR5KD3OCLx6QN/P72iBN1zq9bTRiHaF4TEUq6IyP3cnDQStfTYzE+IIZtl7DQQKY+Dp5mTO3QSq17Kp7Dvw9mNyGsyE7Oo4VmxHuH8XXbuCoyN2ywJ6l2rv/wuBPylIC5iuguJyVK9WnMxt8vOaBWIAPmm8HbviU3FHnHic6s4DPDpLfwpvsqbxSvEYcj+mRYKhMSD3pF2E/a9wFhph+Wj6sbPhiWkI84D4kmwH42h7WmYqaJfTMGyiZiiFdcF4J/M4c66csWbBLza1GIeNGmxPpKLjilIIBDo6gfpKqQYZllt+ZfeLdwIydE8m5NBVw1d6I0ctF9GNWUG8yfHPKHxNwY05kziW5qA= + file: + - letsdebug-server + - letsdebug-cli + skip_cleanup: true + on: + repo: letsdebug/letsdebug + tags: true diff --git a/vendor/github.com/letsdebug/letsdebug/Makefile b/vendor/github.com/letsdebug/letsdebug/Makefile new file mode 100644 index 00000000..3fcbb33c --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/Makefile @@ -0,0 +1,32 @@ +.PHONY: clean all deps server-dev server-dev-db-up deploy + +clean: + rm -f letsdebug-server + +deps: + go get -u github.com/go-bindata/go-bindata/... + +generate: + go generate ./... + +test: + go test -v ./... + +server-dev: generate + LETSDEBUG_WEB_DEBUG=1 \ + LETSDEBUG_WEB_DB_DSN="user=letsdebug dbname=letsdebug password=password sslmode=disable" \ + LETSDEBUG_DEBUG=1 go \ + run -race cmd/server/server.go + +server-dev-db-up: + docker run -d --name letsdebug-db -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_USER=letsdebug postgres:10.3-alpine + +letsdebug-server: generate + go build -o letsdebug-server cmd/server/server.go + +letsdebug-cli: + go build -o letsdebug-cli cmd/cli/cli.go + +deploy: clean letsdebug-server + rsync -vhz --progress letsdebug-server root@letsdebug.net:/usr/local/bin/ && \ + ssh root@letsdebug.net "systemctl restart letsdebug" diff --git a/vendor/github.com/letsdebug/letsdebug/README.md b/vendor/github.com/letsdebug/letsdebug/README.md new file mode 100644 index 00000000..99abc4be --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/README.md @@ -0,0 +1,170 @@ +# Let's Debug + +[![Build Status](https://travis-ci.org/letsdebug/letsdebug.svg?branch=master)](https://travis-ci.org/letsdebug/letsdebug) +[![godoc](https://godoc.org/github.com/letsdebug/letsdebug?status.svg)](https://godoc.org/github.com/letsdebug/letsdebug) + +Let's Debug is a diagnostic website, API, CLI and Go package for quickly and accurately finding and reporting issues for any domain that may prevent issuance of a Let's Encrypt SSL certificate for any ACME validation method. + +It is motivated by [this community thread](https://community.letsencrypt.org/t/creating-a-webservice-for-analysis-of-common-problems/45836). + +## Status +Currently [deployed to letsdebug.net and regularly in use](https://letsdebug.net). + +## Problems Detected + +| Name | Description | Examples +-------|-------------|--------| +| InvalidMethod, ValidationMethodDisabled, ValidationMethodNotSuitable | Checks the ACME validation method is valid and usable for the provided domain name. | [Example](https://letsdebug.net/*.letsencrypt.org/1) | +| InvalidDomain | Checks the domain is a valid domain name on a public TLD. | [Example](https://letsdebug.net/ooga.booga/2) | +| StatusNotOperational| Checks that the Let's Encrypt service is not experiencing an outage, according to status.io | - +| DNSLookupFailed, TXTRecordError | Checks that the Unbound resolver (via libunbound) is able to resolve a variety records relevant to Let's Encrypt. Discovers problems such as DNSSEC issues, 0x20 mixed case randomization, timeouts etc, in the spirit of jsha's unboundtest.com | [Example](https://letsdebug.net/dnssec-failed.org/3) | +CAAIssuanceNotAllowed | Checks that no CAA records are preventing the issuance of Let's Encrypt certificates. | [Example](https://letsdebug.net/id-rsa.pub/4) | +CAACriticalUnknown | Checks that no CAA critical flags unknown to Let's Encrypt are used | - | +RateLimit | Checks that the domain name is not currently affected by any of the domain-based rate limits imposed by Let's Encrypt, using the public certwatch Postgres interface from Comodo's crt.sh. | [Example](https://letsdebug.net/targettec.ddns.net/13) | +NoRecords, ReservedAddress | Checks that sufficient valid A/AAAA records are present to perform HTTP-01 validation | [Example](https://letsdebug.net/localtest.me/6) | +BadRedirect | Checks that no bad HTTP redirects are present. Discovers redirects that aren't accessible, unacceptable ports, unacceptable schemes, accidental missing trailing slash on redirect. | [Example](https://letsdebug.net/foo.monkas.xyz/7) | +WebserverMisconfiguration | Checks whether the server is serving the wrong protocol on the wrong port as the result of an HTTP-01 validation request. | - | +ANotWorking, AAAANotWorking | Checks whether listed IP addresses are not functioning properly for HTTP-01 validation, including timeouts and other classes of network and HTTP errors. | [Example](https://letsdebug.net/network-fail.foo.monkas.xyz/8) | +MultipleIPAddressDiscrepancy | For domains with multiple A/AAAA records, checks whether there are major discrepancies between the server responses to reveal when the addresses may be pointing to different servers accidentally. | [Example](https://letsdebug.net/v4v6fail.monkas.xyz/51916) +CloudflareCDN | Checks whether the domain is being served via Cloudflare's proxy service (and therefore SSL termination is occurring at Cloudflare) | - | +CloudflareSSLNotProvisioned | Checks whether the domain has its SSL terminated by Cloudflare and Cloudflare has not provisioned a certificate yet (leading to a TLS handshake error). | [Example](https://letsdebug.net/cf-no-ssl.fleetssl.com/10) | +IssueFromLetsEncrypt | Attempts to detect issues with a high degree of accuracy via the Let's Encrypt v2 staging service by attempting to perform an authorization for the domain. Discovers issues such as CA-based domain blacklists & other policies, specific networking issues. | [Example](https://letsdebug.net/bankofamerica.com/12) | +| TXTDoubleLabel | Checks for the presence of records that are doubled up (e.g. `_acme-challenge.example.org.example.org`). Usually indicates that the user has been incorrectly creating records in their DNS user interface. | [Example](https://letsdebug.net/double.monkas.xyz/2477) | +PortForwarding | Checks whether the domain is serving a modem-router administrative interface instead of an intended webserver, which is indicative of a port-forwarding misconfiguration. | [Example](https://letsdebug.net/cdkauffmannnextcloud.duckdns.org/11450) | +| SanctionedDomain | Checks whether the Registered Domain is present on the [USG OFAC SDN List](https://sanctionssearch.ofac.treas.gov/). Updated daily. | [Example](https://letsdebug.net/unomasuno.com.mx/48081) | +| BlockedByNginxTestCookie | Checks whether the HTTP-01 validation requests are being intercepted by [testcookie-nginx-module](https://github.com/kyprizel/testcookie-nginx-module). | [Example](https://letsdebug.net/13513427185.ifastnet.org/51860) | +| HttpOnHttpsPort | Checks whether the server reported receiving an HTTP request on an HTTPS-only port | [Example](https://letsdebug.net/clep-energy.org/107591) | + +## Web API Usage + +There is a JSON-based API available as part of the web frontend. + +### Submitting a test + +```bash +$ curl --data '{"method":"http-01","domain":"letsdebug.net"}' -H 'content-type: application/json' https://letsdebug.net +``` +```javascript +{"Domain":"letsdebug.net","ID":14} +``` + +### Submitting a test with custom options + +```bash +curl --data '{"method":"http-01","domain":"letsdebug.net","options":{"http_request_path":"custom-path","http_expect_response":"abc123"}}' -H 'content-type: application/json' https://letsdebug.net +``` + +Available options are as follows: + +| Option | Description | +-------|-------------| +`http_request_path` | What path within `/.well-known/acme-challenge/` to use instead of `letsdebug-test` (default) for the HTTP check. Max length 255. | +`http_expect_response` | What exact response to expect from each server during the HTTP check. By default, no particular response is expected. If present and the response does not match, the test will fail with an Error severity. It is highly recommended to always use a completely random value. Max length 255. | + +### Viewing tests + +```bash +$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net/14 +``` +```javascript +{"id":14,"domain":"letsdebug.net","method":"http-01","status":"Complete","created_at":"2018-04-30T01:58:34.765829Z","started_at":"2018-04-30T01:58:34.769815Z","completed_at":"2018-04-30T01:58:41.39023Z","result":{}} +``` + +or to view all recent tests + +```bash +$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net +``` + +### Performing a query against the Certwatch database + +```bash +$ curl "https://letsdebug.net/certwatch-query?q=" +``` +```javascript +{ + "query": "select c.id as crtsh_id, x509_subjectName(c.CERTIFICATE), x509_notAfter(c.CERTIFICATE) from certificate c where x509_notAfter(c.CERTIFICATE) = '2018-06-01 16:25:44' AND x509_issuerName(c.CERTIFICATE) LIKE 'C=US, O=Let''s Encrypt%';", + "results": [ + { + "crtsh_id": 346300797, + "x509_notafter": "2018-06-01T16:25:44Z", + "x509_subjectname": "CN=hivdatingzimbabwe.com" + }, + /* ... */ + ] +} +``` + +## CLI Usage + +You can download binaries for tagged releases for Linux for both the CLi and the server [from the releases page](https://github.com/letsdebug/letsdebug/releases). + + + letsdebug-cli -domain example.org -method http-01 -debug + +## Library Usage + +```go + +import "github.com/letsdebug/letsdebug" + +problems, _ := letsdebug.Check("example.org", letsdebug.HTTP01) +``` + +## Installation + +### Dependencies + +This package relies on a fairly recent version of libunbound. + +* On Debian-based distributions: + + `apt install libunbound2 libunbound-dev` + +* On EL-based distributions, you may need to build from source because the packages are ancient on e.g. CentOS, but you can try: + + `yum install unbound-libs unbound-devel` + +* On OSX, [Homebrew](https://brew.sh/) contains the latest version of unbound: + + `brew install unbound` + +You will also need Go's [dep](https://github.com/golang/dep) dependency manager. + +### Releases +You can save time by [downloading tagged releases for 64-bit Linux](https://github.com/letsdebug/letsdebug/releases). Keep in mind you will still need to have libunbound present on your system. + +### Building + + go get -u github.com/letsdebug/letsdebug/... + cd $GOPATH/src/github.com/letsdebug/letsdebug + make deps + make letsdebug-cli letsdebug-server + + +## Contributing +Any contributions containing JavaScript will be discarded, but other feedback, bug reports, suggestions and enhancements are welcome - please open an issue first. + +## LICENSE + + MIT License + + Copyright (c) 2018 Let's Debug + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/letsdebug/letsdebug/checker.go b/vendor/github.com/letsdebug/letsdebug/checker.go new file mode 100644 index 00000000..21bc88bb --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/checker.go @@ -0,0 +1,107 @@ +package letsdebug + +import ( + "crypto/sha256" + "errors" + "fmt" + "reflect" + "time" +) + +// ValidationMethod represents an ACME validation method +type ValidationMethod string + +const ( + HTTP01 ValidationMethod = "http-01" // HTTP01 represents the ACME http-01 validation method. + DNS01 ValidationMethod = "dns-01" // DNS01 represents the ACME dns-01 validation method. + TLSALPN01 ValidationMethod = "tls-alpn-01" // TLSALPN01 represents the ACME tls-alpn-01 validation method. +) + +var ( + validMethods = map[ValidationMethod]bool{HTTP01: true, DNS01: true, TLSALPN01: true} + errNotApplicable = errors.New("Checker not applicable for this domain and method") + checkers []checker +) + +func init() { + // Since the OFAC SDN checker polls, we need to initialize it + ofac := &ofacSanctionChecker{} + ofac.setup() + + // We want to launch the slowest checkers as early as possible, + // unless they have a dependency on an earlier checker + checkers = []checker{ + asyncCheckerBlock{ + validMethodChecker{}, + validDomainChecker{}, + wildcardDNS01OnlyChecker{}, + statusioChecker{}, + ofac, + }, + + asyncCheckerBlock{ + caaChecker{}, // depends on valid*Checker + &rateLimitChecker{}, // depends on valid*Checker + dnsAChecker{}, // depends on valid*Checker + txtRecordChecker{}, // depends on valid*Checker + txtDoubledLabelChecker{}, // depends on valid*Checker + }, + + asyncCheckerBlock{ + httpAccessibilityChecker{}, // depends on dnsAChecker + cloudflareChecker{}, // depends on dnsAChecker to some extent + &acmeStagingChecker{}, // Gets the final word + }, + } +} + +type checker interface { + Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) +} + +// asyncCheckerBlock represents a checker which is composed of other checkers that can be run simultaneously. +type asyncCheckerBlock []checker + +type asyncResult struct { + Problems []Problem + Error error +} + +func (c asyncCheckerBlock) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + resultCh := make(chan asyncResult, len(c)) + + id := fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%d", time.Now().UnixNano()))))[:4] + debug("[%s] Launching async\n", id) + + for _, task := range c { + go func(task checker, ctx *scanContext, domain string, method ValidationMethod) { + defer func() { + if r := recover(); r != nil { + resultCh <- asyncResult{nil, fmt.Errorf("Check %T paniced: %v", task, r)} + } + }() + t := reflect.TypeOf(task) + debug("[%s] async: + %v\n", id, t) + start := time.Now() + probs, err := task.Check(ctx, domain, method) + debug("[%s] async: - %v in %v\n", id, t, time.Since(start)) + resultCh <- asyncResult{probs, err} + }(task, ctx, domain, method) + } + + var probs []Problem + + for i := 0; i < len(c); i++ { + result := <-resultCh + if result.Error != nil && result.Error != errNotApplicable { + debug("[%s] Exiting async via error\n", id) + return nil, result.Error + } + if len(result.Problems) > 0 { + probs = append(probs, result.Problems...) + } + } + + debug("[%s] Exiting async gracefully\n", id) + return probs, nil +} diff --git a/vendor/github.com/letsdebug/letsdebug/context.go b/vendor/github.com/letsdebug/letsdebug/context.go new file mode 100644 index 00000000..48e87257 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/context.go @@ -0,0 +1,81 @@ +package letsdebug + +import ( + "fmt" + "math/rand" + "net" + "sync" + + "github.com/miekg/dns" +) + +type lookupResult struct { + RRs []dns.RR + Error error +} + +type scanContext struct { + rrs map[string]map[uint16]lookupResult + rrsMutex sync.Mutex + + httpRequestPath string + httpExpectResponse string +} + +func newScanContext() *scanContext { + return &scanContext{ + rrs: map[string]map[uint16]lookupResult{}, + httpRequestPath: "letsdebug-test", + } +} + +func (sc *scanContext) Lookup(name string, rrType uint16) ([]dns.RR, error) { + sc.rrsMutex.Lock() + rrMap, ok := sc.rrs[name] + if !ok { + rrMap = map[uint16]lookupResult{} + sc.rrs[name] = rrMap + } + result, ok := rrMap[rrType] + sc.rrsMutex.Unlock() + + if ok { + return result.RRs, result.Error + } + + resolved, err := lookup(name, rrType) + + sc.rrsMutex.Lock() + rrMap[rrType] = lookupResult{ + RRs: resolved, + Error: err, + } + sc.rrsMutex.Unlock() + + return resolved, err +} + +// Only slightly random - it will use AAAA over A if possible. +func (sc *scanContext) LookupRandomHTTPRecord(name string) (net.IP, error) { + v6RRs, err := sc.Lookup(name, dns.TypeAAAA) + if err != nil { + return net.IP{}, err + } + if len(v6RRs) > 0 { + if selected, ok := v6RRs[rand.Intn(len(v6RRs))].(*dns.AAAA); ok { + return selected.AAAA, nil + } + } + + v4RRs, err := sc.Lookup(name, dns.TypeA) + if err != nil { + return net.IP{}, err + } + if len(v4RRs) > 0 { + if selected, ok := v4RRs[rand.Intn(len(v4RRs))].(*dns.A); ok { + return selected.A, nil + } + } + + return net.IP{}, fmt.Errorf("No AAAA or A records were found for %s", name) +} diff --git a/vendor/github.com/letsdebug/letsdebug/dns01.go b/vendor/github.com/letsdebug/letsdebug/dns01.go new file mode 100644 index 00000000..f945969c --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/dns01.go @@ -0,0 +1,156 @@ +package letsdebug + +import ( + "crypto/rand" + "fmt" + "sort" + "strings" + "sync" + + "github.com/miekg/dns" + "github.com/weppos/publicsuffix-go/publicsuffix" +) + +// wildcardDNS01OnlyChecker ensures that a wildcard domain is only validated via dns-01. +type wildcardDNS01OnlyChecker struct{} + +func (c wildcardDNS01OnlyChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if !strings.HasPrefix(domain, "*.") { + return nil, errNotApplicable + } + + if method == DNS01 { + return nil, errNotApplicable + } + + return []Problem{wildcardHTTP01(domain, method)}, nil +} + +func wildcardHTTP01(domain string, method ValidationMethod) Problem { + return Problem{ + Name: "MethodNotSuitable", + Explanation: fmt.Sprintf("A wildcard domain like %s can only be issued using a dns-01 validation method.", domain), + Detail: fmt.Sprintf("Invalid method: %s", method), + Severity: SeverityFatal, + } +} + +// txtRecordChecker ensures there is no resolution errors with the _acme-challenge txt record +type txtRecordChecker struct{} + +func (c txtRecordChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if method != DNS01 { + return nil, errNotApplicable + } + + domain = strings.TrimPrefix(domain, "*.") + + if _, err := ctx.Lookup("_acme-challenge."+domain, dns.TypeTXT); err != nil { + // report this problem as a fatal problem as that is the purpose of this checker + return []Problem{txtRecordError(domain, err)}, nil + } + + return nil, nil +} + +func txtRecordError(domain string, err error) Problem { + return Problem{ + Name: "TXTRecordError", + Explanation: fmt.Sprintf(`An error occurred while attempting to lookup the TXT record on _acme-challenge.%s . `+ + `Any resolver errors that the Let's Encrypt CA encounters on this record will cause certificate issuance to fail.`, domain), + Detail: err.Error(), + Severity: SeverityFatal, + } +} + +// txtDoubledLabelChecker ensures that a record for _acme-challenge.example.org.example.org +// wasn't accidentally created +type txtDoubledLabelChecker struct{} + +func (c txtDoubledLabelChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if method != DNS01 { + return nil, errNotApplicable + } + + registeredDomain, _ := publicsuffix.Domain(domain) + + variants := []string{ + fmt.Sprintf("_acme-challenge.%s.%s", domain, domain), // _acme-challenge.www.example.org.www.example.org + fmt.Sprintf("_acme-challenge.%s.%s", domain, registeredDomain), // _acme-challenge.www.example.org.example.org + } + + var found []string + distinctCombined := map[string]struct{}{} + var randomCombined string + + var foundMu sync.Mutex + + var wg sync.WaitGroup + wg.Add(len(variants) + 1) + + doQuery := func(q string) ([]string, string) { + found := []string{} + combined := []string{} + rrs, _ := ctx.Lookup(q, dns.TypeTXT) + for _, rr := range rrs { + txt, ok := rr.(*dns.TXT) + if !ok { + continue + } + found = append(found, txt.String()) + combined = append(combined, txt.Txt...) + } + sort.Strings(combined) + return found, strings.Join(combined, "\n") + } + + // Check the double label variants + for _, variant := range variants { + go func(q string) { + defer wg.Done() + + values, combined := doQuery(q) + if len(values) == 0 { + return + } + + foundMu.Lock() + defer foundMu.Unlock() + + found = append(found, values...) + distinctCombined[combined] = struct{}{} + }(variant) + } + + // Check the response for a random subdomain, to detect the presence of a wildcard TXT record + go func() { + defer wg.Done() + + nonce := make([]byte, 4) + _, _ = rand.Read(nonce) + _, randomCombined = doQuery(fmt.Sprintf("_acme-challenge.%s.%s", fmt.Sprintf("rand-%x", nonce), domain)) + }() + + wg.Wait() + + // If a randomized subdomain has the exact same non-empty TXT response as any of the "double labels", then + // we are probably dealing with a wildcard TXT record in the zone, and it is probably not a meaningful + // misconfiguration. In this case, say nothing. + if _, ok := distinctCombined[randomCombined]; ok && randomCombined != "" { + return nil, nil + } + + if len(found) > 0 { + return []Problem{{ + Name: "TXTDoubleLabel", + Explanation: "Some DNS records were found that indicate TXT records may have been incorrectly manually entered into " + + `DNS editor interfaces. The correct way to enter these records is to either remove the domain from the label (so ` + + `enter "_acme-challenge.www.example.org" as "_acme-challenge.www") or include a period (.) at the ` + + `end of the label (enter "_acme-challenge.example.org.").`, + Detail: fmt.Sprintf("The following probably-erroneous TXT records were found:\n%s", strings.Join(found, "\n")), + Severity: SeverityWarning, + }}, nil + } + + return nil, nil +} diff --git a/vendor/github.com/letsdebug/letsdebug/dns_util.go b/vendor/github.com/letsdebug/letsdebug/dns_util.go new file mode 100644 index 00000000..662ced9f --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/dns_util.go @@ -0,0 +1,127 @@ +package letsdebug + +import ( + "fmt" + "net" + "strings" + + "github.com/miekg/dns" + "github.com/miekg/unbound" +) + +var ( + reservedNets []*net.IPNet +) + +func lookup(name string, rrType uint16) ([]dns.RR, error) { + ub := unbound.New() + defer ub.Destroy() + + if err := setUnboundConfig(ub); err != nil { + return nil, fmt.Errorf("Failed to configure Unbound resolver: %v", err) + } + + result, err := ub.Resolve(name, rrType, dns.ClassINET) + if err != nil { + return nil, err + } + + if result.Bogus { + return nil, fmt.Errorf("DNS response for %s had fatal DNSSEC issues: %v", name, result.WhyBogus) + } + + if result.Rcode == dns.RcodeServerFailure || result.Rcode == dns.RcodeRefused { + return nil, fmt.Errorf("DNS response for %s/%s did not have an acceptable response code: %s", + name, dns.TypeToString[rrType], dns.RcodeToString[result.Rcode]) + } + + return result.Rr, nil +} + +func normalizeFqdn(name string) string { + name = strings.TrimSpace(name) + name = strings.TrimSuffix(name, ".") + return strings.ToLower(name) +} + +func isAddressReserved(ip net.IP) bool { + for _, reserved := range reservedNets { + if reserved.Contains(ip) { + return true + } + } + return false +} + +func init() { + reservedNets = []*net.IPNet{} + reservedCIDRs := []string{ + "0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", + "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", + "192.0.0.0/24", "192.0.2.0/24", "192.88.99.0/24", + "192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24", + "203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4", + "255.255.255.255/32", "::/128", "::1/128", /*"::ffff:0:0/96",*/ + "64:ff9b::/96", "100::/64", "2001::/32", "2001:10::/28", + "2001:20::/28", "2001:db8::/32", "2002::/16", "fc00::/7", + "fe80::/10", "ff00::/8", + } + for _, cidr := range reservedCIDRs { + _, n, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + reservedNets = append(reservedNets, n) + } +} + +func setUnboundConfig(ub *unbound.Unbound) error { + // options need the : in the option key according to docs + opts := []struct { + Opt string + Val string + }{ + {"verbosity:", "0"}, + {"use-syslog:", "no"}, + {"do-ip4:", "yes"}, + {"do-ip6:", "yes"}, + {"do-udp:", "yes"}, + {"do-tcp:", "yes"}, + {"tcp-upstream:", "no"}, + {"harden-glue:", "yes"}, + {"harden-dnssec-stripped:", "yes"}, + {"cache-min-ttl:", "0"}, + {"cache-max-ttl:", "0"}, + {"cache-max-negative-ttl:", "0"}, + {"neg-cache-size:", "0"}, + {"prefetch:", "no"}, + {"unwanted-reply-threshold:", "10000"}, + {"do-not-query-localhost:", "yes"}, + {"val-clean-additional:", "yes"}, + {"harden-algo-downgrade:", "yes"}, + {"edns-buffer-size:", "512"}, + {"val-sig-skew-min:", "0"}, + {"val-sig-skew-max:", "0"}, + {"target-fetch-policy:", "0 0 0 0 0"}, + } + + for _, opt := range opts { + // Can't ignore these because we cant silently have policies being ignored + if err := ub.SetOption(opt.Opt, opt.Val); err != nil { + return fmt.Errorf("Failed to configure unbound with option %s %v", opt.Opt, err) + } + } + + // use-caps-for-id was bugged (no colon) < 1.7.1, try both ways in order to be compatible + // https://www.nlnetlabs.nl/bugs-script/show_bug.cgi?id=4092 + if err := ub.SetOption("use-caps-for-id:", "yes"); err != nil { + if err = ub.SetOption("use-caps-for-id", "yes"); err != nil { + return fmt.Errorf("Failed to configure unbound with use-caps-for-id: %v", err) + } + } + + return ub.AddTa(`. 172800 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU= + . 172800 IN DNSKEY 256 3 8 AwEAAdp440E6Mz7c+Vl4sPd0lTv2Qnc85dTW64j0RDD7sS/zwxWDJ3QRES2VKDO0OXLMqVJSs2YCCSDKuZXpDPuf++YfAu0j7lzYYdWTGwyNZhEaXtMQJIKYB96pW6cRkiG2Dn8S2vvo/PxW9PKQsyLbtd8PcwWglHgReBVp7kEv/Dd+3b3YMukt4jnWgDUddAySg558Zld+c9eGWkgWoOiuhg4rQRkFstMX1pRyOSHcZuH38o1WcsT4y3eT0U/SR6TOSLIB/8Ftirux/h297oS7tCcwSPt0wwry5OFNTlfMo8v7WGurogfk8hPipf7TTKHIi20LWen5RCsvYsQBkYGpF78= + . 172800 IN DNSKEY 257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0= + . 172800 IN RRSIG DNSKEY 8 0 172800 20181101000000 20181011000000 20326 . M/LTswhCjuJUTvX1CFqC+TiJ4Fez7AROa5mM+1AI2MJ+zLHhr3JaMxyydFLWrBHR0056Hz7hNqQ9i63hGeiR6uMfanF0jIRb9XqgGP8nY37T8ESpS1UiM9rJn4b40RFqDSEvuFdd4hGwK3EX0snOCLdUT8JezxtreXI0RilmqDC2g44TAKyFw+Is9Qwl+k6+fbMQ/atA8adANbYgyuHfiwQCCUtXRaTCpRgQtsAz9izO0VYIGeHIoJta0demAIrLCOHNVH2ogHTqMEQ18VqUNzTd0aGURACBdS7PeP2KogPD7N8Q970O84TFmO4ahPIvqO+milCn5OQTbbgsjHqY6Q==`) +} diff --git a/vendor/github.com/letsdebug/letsdebug/generic.go b/vendor/github.com/letsdebug/letsdebug/generic.go new file mode 100644 index 00000000..a84fe8a3 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/generic.go @@ -0,0 +1,861 @@ +package letsdebug + +import ( + "context" + "crypto/x509" + "database/sql" + "encoding/pem" + "encoding/xml" + "io/ioutil" + "net" + "os" + "sort" + "strings" + "sync" + + "github.com/eggsampler/acme/v3" + + "fmt" + + "net/http" + "net/url" + + "time" + + "encoding/json" + + // Driver for crtwatch/ratelimitChecker + _ "github.com/lib/pq" + "github.com/miekg/dns" + "github.com/weppos/publicsuffix-go/net/publicsuffix" + psl "github.com/weppos/publicsuffix-go/publicsuffix" +) + +// validMethodChecker ensures that the provided authorization method is valid and supported. +type validMethodChecker struct{} + +func (c validMethodChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if validMethods[method] { + return nil, errNotApplicable + } + + return []Problem{notValidMethod(method)}, nil +} + +func notValidMethod(method ValidationMethod) Problem { + var supportedMethods []string + for k := range validMethods { + supportedMethods = append(supportedMethods, string(k)) + } + return Problem{ + Name: "InvalidMethod", + Explanation: fmt.Sprintf(`"%s" is not a supported validation method.`, method), + Detail: fmt.Sprintf("Supported methods: %s", strings.Join(supportedMethods, ", ")), + Severity: SeverityFatal, + } +} + +// validDomainChecker ensures that the FQDN is well-formed and is part of a public suffix. +type validDomainChecker struct{} + +func (c validDomainChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + var probs []Problem + + domain = strings.TrimPrefix(domain, "*.") + + for _, ch := range []byte(domain) { + if !(('a' <= ch && ch <= 'z') || + ('A' <= ch && ch <= 'A') || + ('0' <= ch && ch <= '9') || + ch == '.' || ch == '-') { + probs = append(probs, invalidDomain(domain, fmt.Sprintf("Invalid character present: %c", ch))) + return probs, nil + } + } + + if len(domain) > 230 { + probs = append(probs, invalidDomain(domain, "Domain too long")) + return probs, nil + } + + if ip := net.ParseIP(domain); ip != nil { + probs = append(probs, invalidDomain(domain, "Domain is an IP address")) + return probs, nil + } + + rule := psl.DefaultList.Find(domain, &psl.FindOptions{IgnorePrivate: true, DefaultRule: nil}) + if rule == nil { + probs = append(probs, invalidDomain(domain, "Domain doesn't end in a public TLD")) + return probs, nil + } + + if r := rule.Decompose(domain)[1]; r == "" { + probs = append(probs, invalidDomain(domain, "Domain is a TLD")) + return probs, nil + } else { + probs = append(probs, debugProblem("PublicSuffix", "The IANA public suffix is the TLD of the Registered Domain", + fmt.Sprintf("The TLD for %s is: %s", domain, r))) + } + + return probs, nil +} + +// caaChecker ensures that any caa record on the domain, or up the domain tree, allow issuance for letsencrypt.org +type caaChecker struct{} + +func (c caaChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + var probs []Problem + + wildcard := false + if strings.HasPrefix(domain, "*.") { + wildcard = true + domain = domain[2:] + } + + rrs, err := ctx.Lookup(domain, dns.TypeCAA) + if err != nil { + probs = append(probs, dnsLookupFailed(domain, "CAA", err)) + return probs, nil + } + + // check any found caa records + if len(rrs) > 0 { + var issue []*dns.CAA + var issuewild []*dns.CAA + var criticalUnknown []*dns.CAA + + for _, rr := range rrs { + caaRr, ok := rr.(*dns.CAA) + if !ok { + continue + } + + switch caaRr.Tag { + case "issue": + issue = append(issue, caaRr) + case "issuewild": + issuewild = append(issuewild, caaRr) + default: + if caaRr.Flag == 1 { + criticalUnknown = append(criticalUnknown, caaRr) + } + } + } + + probs = append(probs, debugProblem("CAA", + "CAA records control authorization for certificate authorities to issue certificates for a domain", + collateRecords(append(issue, issuewild...)))) + + if len(criticalUnknown) > 0 { + probs = append(probs, caaCriticalUnknown(domain, wildcard, criticalUnknown)) + return probs, nil + } + + if len(issue) == 0 && !wildcard { + return probs, nil + } + + records := issue + if wildcard && len(issuewild) > 0 { + records = issuewild + } + + for _, r := range records { + if extractIssuerDomain(r.Value) == "letsencrypt.org" { + return probs, nil + } + } + + probs = append(probs, caaIssuanceNotAllowed(domain, wildcard, records)) + return probs, nil + } + + // recurse up to the public suffix domain until a caa record is found + // a.b.c.com -> b.c.com -> c.com until + if ps, _ := publicsuffix.PublicSuffix(domain); domain != ps && ps != "" { + splitDomain := strings.SplitN(domain, ".", 2) + + parentProbs, err := c.Check(ctx, splitDomain[1], method) + if err != nil { + return nil, fmt.Errorf("error checking caa record on domain: %s, %v", splitDomain[1], err) + } + + probs = append(probs, parentProbs...) + } + + return probs, nil +} + +func extractIssuerDomain(value string) string { + // record can be: + // issuedomain.tld; someparams + return strings.Trim(strings.SplitN(value, ";", 2)[0], " \t") +} + +func collateRecords(records []*dns.CAA) string { + var s []string + for _, r := range records { + s = append(s, r.String()) + } + return strings.Join(s, "\n") +} + +func caaCriticalUnknown(domain string, wildcard bool, records []*dns.CAA) Problem { + return Problem{ + Name: "CAACriticalUnknown", + Explanation: fmt.Sprintf(`CAA record(s) exist on %s (wildcard=%t) that are marked as critical but are unknown to Let's Encrypt. `+ + `These record(s) as shown in the detail must be removed, or marked as non-critical, before a certificate can be issued by the Let's Encrypt CA.`, domain, wildcard), + Detail: collateRecords(records), + Severity: SeverityFatal, + } +} + +func caaIssuanceNotAllowed(domain string, wildcard bool, records []*dns.CAA) Problem { + return Problem{ + Name: "CAAIssuanceNotAllowed", + Explanation: fmt.Sprintf(`No CAA record on %s (wildcard=%t) contains the issuance domain "letsencrypt.org". `+ + `You must either add an additional record to include "letsencrypt.org" or remove every existing CAA record. `+ + `A list of the CAA records are provided in the details.`, domain, wildcard), + Detail: collateRecords(records), + Severity: SeverityFatal, + } +} + +func invalidDomain(domain, reason string) Problem { + return Problem{ + Name: "InvalidDomain", + Explanation: fmt.Sprintf(`"%s" is not a valid domain name that Let's Encrypt would be able to issue a certificate for.`, domain), + Detail: reason, + Severity: SeverityFatal, + } +} + +// cloudflareChecker determines if the domain is using cloudflare, and whether a certificate has been provisioned by cloudflare yet. +type cloudflareChecker struct{} + +func (c cloudflareChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + var probs []Problem + + domain = strings.TrimPrefix(domain, "*.") + + cl := http.Client{ + Timeout: httpTimeout * time.Second, + Transport: makeSingleShotHTTPTransport(), + } + resp, err := cl.Get("https://" + domain) + if err == nil { // no tls error, cert must be issued + // check if it's cloudflare + if hasCloudflareHeader(resp.Header) { + probs = append(probs, cloudflareCDN(domain)) + } + + return probs, nil + } + + // disable redirects + cl.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + // attempt to connect over http with redirects disabled to check cloudflare header + resp, err = cl.Get("http://" + domain) + if err != nil { + return probs, nil + } + + if hasCloudflareHeader(resp.Header) { + probs = append(probs, cloudflareCDN(domain)) + probs = append(probs, cloudflareSslNotProvisioned(domain)) + } + + return probs, nil +} + +func hasCloudflareHeader(h http.Header) bool { + return strings.Contains(strings.ToLower(h.Get("server")), "cloudflare") +} + +func cloudflareCDN(domain string) Problem { + return Problem{ + Name: "CloudflareCDN", + Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN. Any Let's Encrypt certificate installed on the `+ + `origin server will only encrypt traffic between the server and Cloudflare. It is strongly recommended that the SSL option 'Full SSL (strict)' `+ + `be enabled.`, domain), + Detail: "https://support.cloudflare.com/hc/en-us/articles/200170416-What-do-the-SSL-options-mean-", + Severity: SeverityWarning, + } +} + +func cloudflareSslNotProvisioned(domain string) Problem { + return Problem{ + Name: "CloudflareSSLNotProvisioned", + Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN and a certificate has not yet been provisioned yet by Cloudflare.`, domain), + Detail: "https://support.cloudflare.com/hc/en-us/articles/203045244-How-long-does-it-take-for-Cloudflare-s-SSL-to-activate-", + Severity: SeverityWarning, + } +} + +// statusioChecker ensures there is no reported operational problem with the Let's Encrypt service via the status.io public api. +type statusioChecker struct{} + +// statusioSignificantStatuses denotes which statuses warrant raising a warning. +// 100 (operational) and 200 (undocumented but assume "Planned Maintenance") should not be included. +// https://kb.status.io/developers/status-codes/ +var statusioSignificantStatuses = map[int]bool{ + 300: true, // Degraded Performance + 400: true, // Partial Service Disruption + 500: true, // Service Disruption + 600: true, // Security Event +} + +func (c statusioChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + var probs []Problem + + resp, err := http.Get("https://api.status.io/1.0/status/55957a99e800baa4470002da") + if err != nil { + // some connectivity errors with status.io is probably not worth reporting + return probs, nil + } + defer resp.Body.Close() + + apiResp := struct { + Result struct { + StatusOverall struct { + Updated time.Time `json:"updated"` + Status string `json:"status"` + StatusCode int `json:"status_code"` + } `json:"status_overall"` + } `json:"result"` + }{} + + if err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil { + return probs, fmt.Errorf("error decoding status.io api response: %v", err) + } + + if statusioSignificantStatuses[apiResp.Result.StatusOverall.StatusCode] { + probs = append(probs, statusioNotOperational(apiResp.Result.StatusOverall.Status, apiResp.Result.StatusOverall.Updated)) + } + + probs = append(probs, debugProblem("StatusIO", "The current status.io status for Let's Encrypt", + fmt.Sprintf("%v", apiResp.Result.StatusOverall.Status))) + + return probs, nil +} + +func statusioNotOperational(status string, updated time.Time) Problem { + return Problem{ + Name: "StatusNotOperational", + Explanation: fmt.Sprintf(`The current status as reported by the Let's Encrypt status page is %s as at %v. `+ + `Depending on the reported problem, this may affect certificate issuance. For more information, please visit the status page.`, status, updated), + Detail: "https://letsencrypt.status.io/", + Severity: SeverityWarning, + } +} + +type crtList map[string]*x509.Certificate + +// FindCommonPSLCertificates finds any certificates which contain any DNSName +// that shares the Registered Domain `registeredDomain`. +func (l crtList) FindWithCommonRegisteredDomain(registeredDomain string) sortedCertificates { + var out sortedCertificates + + for _, cert := range l { + for _, name := range cert.DNSNames { + if nameRegDomain, _ := publicsuffix.EffectiveTLDPlusOne(name); nameRegDomain == registeredDomain { + out = append(out, cert) + break + } + } + } + + sort.Sort(out) + + return out +} + +func (l crtList) GetOldestCertificate() *x509.Certificate { + var oldest *x509.Certificate + for _, crt := range l { + if oldest == nil || crt.NotBefore.Before(oldest.NotBefore) { + oldest = crt + } + } + return oldest +} + +// CountDuplicates counts how many duplicate certificates there are +// that also contain the name `domain` +func (l crtList) CountDuplicates(domain string) map[string]int { + counts := map[string]int{} + + for _, cert := range l { + found := false + for _, name := range cert.DNSNames { + if name == domain { + found = true + break + } + } + if !found { + continue + } + names := make([]string, len(cert.DNSNames)) + copy(names, cert.DNSNames) + sort.Strings(names) + k := strings.Join(names, ",") + counts[k]++ + } + + return counts +} + +// rateLimitChecker ensures that the domain is not currently affected +// by domain-based rate limits using crtwatch's database +type rateLimitChecker struct { +} + +type sortedCertificates []*x509.Certificate + +func (certs sortedCertificates) Len() int { return len(certs) } +func (certs sortedCertificates) Swap(i, j int) { certs[i], certs[j] = certs[j], certs[i] } +func (certs sortedCertificates) Less(i, j int) bool { + return certs[j].NotBefore.Before(certs[i].NotBefore) +} + +const rateLimitCheckerQuery = ` +WITH ci AS + (SELECT min(sub.CERTIFICATE_ID) ID, + min(sub.ISSUER_CA_ID) ISSUER_CA_ID, + sub.CERTIFICATE DER + FROM + (SELECT * + FROM certificate_and_identities cai + WHERE plainto_tsquery('%s') @@ identities(cai.CERTIFICATE) + AND cai.NAME_VALUE ILIKE ('%%%s%%') + AND x509_notBefore(cai.CERTIFICATE) >= '%s' + AND cai.issuer_ca_id IN (16418, 183267, 183283) + LIMIT 1000) sub + GROUP BY sub.CERTIFICATE) +SELECT ci.DER der +FROM ci +LEFT JOIN LATERAL + (SELECT min(ctle.ENTRY_TIMESTAMP) ENTRY_TIMESTAMP + FROM ct_log_entry ctle + WHERE ctle.CERTIFICATE_ID = ci.ID ) le ON TRUE, + ca +WHERE ci.ISSUER_CA_ID = ca.ID +ORDER BY le.ENTRY_TIMESTAMP DESC;` + +// Pointer receiver because we're keeping state across runs +func (c *rateLimitChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if os.Getenv("LETSDEBUG_DISABLE_CERTWATCH") != "" { + return nil, errNotApplicable + } + + domain = strings.TrimPrefix(domain, "*.") + + db, err := sql.Open("postgres", "user=guest dbname=certwatch host=crt.sh sslmode=disable connect_timeout=5") + if err != nil { + return []Problem{ + internalProblem(fmt.Sprintf("Failed to connect to certwatch database to check rate limits: %v", err), SeverityDebug), + }, nil + } + defer db.Close() + + // Since we are checking rate limits, we need to query the Registered Domain + // for the domain in question + registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Avoiding using a prepared statement here because it's being weird with crt.sh + q := fmt.Sprintf(rateLimitCheckerQuery, + registeredDomain, registeredDomain, time.Now().Add(-168*time.Hour).Format(time.RFC3339)) + rows, err := db.QueryContext(timeoutCtx, q) + if err != nil && err != sql.ErrNoRows { + return []Problem{ + internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug), + }, nil + } + + probs := []Problem{} + + // Read in the DER-encoded certificates + certs := crtList{} + var certBytes []byte + for rows.Next() { + if err := rows.Scan(&certBytes); err != nil { + probs = append(probs, internalProblem(fmt.Sprintf("Failed to query certwatch database while checking rate limits: %v", err), SeverityDebug)) + break + } + crt, err := x509.ParseCertificate(certBytes) + if err != nil { + probs = append(probs, internalProblem(fmt.Sprintf("Failed to parse certificate while checking rate limits: %v", err), SeverityDebug)) + continue + } + certs[crt.SerialNumber.String()] = crt + } + if err := rows.Err(); err != nil { + return []Problem{ + internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug), + }, nil + } + + var debug string + + // Limit: Certificates per Registered Domain + // TODO: implement Renewal Exemption + certsTowardsRateLimit := certs.FindWithCommonRegisteredDomain(registeredDomain) + if len(certs) > 0 && len(certsTowardsRateLimit) >= 50 { + dropOff := certs.GetOldestCertificate().NotBefore.Add(7 * 24 * time.Hour) + dropOffDiff := time.Until(dropOff).Truncate(time.Minute) + + probs = append(probs, rateLimited(domain, fmt.Sprintf("The 'Certificates per Registered Domain' limit ("+ + "50 certificates per week that share the same Registered Domain: %s) has been exceeded. "+ + "There is no way to work around this rate limit. "+ + "The next non-renewal certificate for this Registered Domain should be issuable after %v (%v from now).", + registeredDomain, dropOff, dropOffDiff))) + } + + for _, cert := range certsTowardsRateLimit { + debug = fmt.Sprintf("%s\nSerial: %s\nNotBefore: %v\nNames: %v\n", debug, cert.SerialNumber.String(), cert.NotBefore, cert.DNSNames) + } + + // Limit: Duplicate Certificate limit of 5 certificates per week + for names, dupes := range certs.CountDuplicates(domain) { + if dupes < 5 { + continue + } + probs = append(probs, rateLimited(domain, + fmt.Sprintf(`The Duplicate Certificate limit (5 certificates with the exact same set of domains per week) has been `+ + `exceeded and is affecting the domain "%s". The exact set of domains affected is: "%v". It may be possible to avoid this `+ + `rate limit by issuing a certificate with an additional or different domain name.`, domain, names))) + } + + if debug != "" { + probs = append(probs, debugProblem("RateLimit", + fmt.Sprintf("%d Certificates contributing to rate limits for this domain", len(certsTowardsRateLimit)), debug)) + } + + return probs, nil +} + +func rateLimited(domain, detail string) Problem { + registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain) + return Problem{ + Name: "RateLimit", + Explanation: fmt.Sprintf(`%s is currently affected by Let's Encrypt-based rate limits (https://letsencrypt.org/docs/rate-limits/). `+ + `You may review certificates that have already been issued by visiting https://crt.sh/?q=%%%s . `+ + `Please note that it is not possible to ask for a rate limit to be manually cleared.`, domain, registeredDomain), + Detail: detail, + Severity: SeverityError, + } +} + +// acmeStagingChecker tries to create an authorization on +// Let's Encrypt's staging server and parse the error urn +// to see if there's anything interesting reported. +type acmeStagingChecker struct { + client acme.Client + account acme.Account + clientMu sync.Mutex +} + +func (c *acmeStagingChecker) buildAcmeClient() error { + cl, err := acme.NewClient("https://acme-staging-v02.api.letsencrypt.org/directory") + if err != nil { + return err + } + + // Give the ACME CA more time to complete challenges + cl.PollTimeout = 100 * time.Second + + regrPath := os.Getenv("LETSDEBUG_ACMESTAGING_ACCOUNTFILE") + if regrPath == "" { + regrPath = "acme-account.json" + } + buf, err := ioutil.ReadFile(regrPath) + if err != nil { + return err + } + + var out struct { + PEM string `json:"pem"` + URL string `json:"url"` + } + if err := json.Unmarshal(buf, &out); err != nil { + return err + } + + block, _ := pem.Decode([]byte(out.PEM)) + pk, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return err + } + + c.account = acme.Account{PrivateKey: pk, URL: out.URL} + c.client = cl + + return nil +} + +func (c *acmeStagingChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if os.Getenv("LETSDEBUG_DISABLE_ACMESTAGING") != "" { + return nil, errNotApplicable + } + + c.clientMu.Lock() + if c.account.PrivateKey == nil { + if err := c.buildAcmeClient(); err != nil { + c.clientMu.Unlock() + return []Problem{ + internalProblem(fmt.Sprintf("Couldn't setup Let's Encrypt staging checker, skipping: %v", err), SeverityWarning), + }, nil + } + } + c.clientMu.Unlock() + + probs := []Problem{} + + order, err := c.client.NewOrder(c.account, []acme.Identifier{{Type: "dns", Value: domain}}) + if err != nil { + if p := translateAcmeError(domain, err); p.Name != "" { + probs = append(probs, p) + } + probs = append(probs, debugProblem("LetsEncryptStaging", "Order creation error", err.Error())) + return probs, nil + } + + var wg sync.WaitGroup + wg.Add(len(order.Authorizations)) + var probsMu sync.Mutex + + unhandledError := func(err error) { + probsMu.Lock() + defer probsMu.Unlock() + + probs = append(probs, internalProblem("An unknown problem occurred while performing a test "+ + "authorization against the Let's Encrypt staging service: "+err.Error(), SeverityWarning)) + } + + authzFailures := []string{} + + for _, authzURL := range order.Authorizations { + go func(authzURL string) { + defer wg.Done() + + authz, err := c.client.FetchAuthorization(c.account, authzURL) + if err != nil { + unhandledError(err) + return + } + + chal, ok := authz.ChallengeMap[string(method)] + if !ok { + unhandledError(fmt.Errorf("Missing challenge method (want %v): %v", method, authz.ChallengeMap)) + return + } + + if _, err := c.client.UpdateChallenge(c.account, chal); err != nil { + probsMu.Lock() + if p := translateAcmeError(domain, err); p.Name != "" { + probs = append(probs, p) + } + authzFailures = append(authzFailures, err.Error()) + probsMu.Unlock() + } + }(authzURL) + } + + wg.Wait() + + if len(authzFailures) > 0 { + probs = append(probs, debugProblem("LetsEncryptStaging", + fmt.Sprintf("Challenge update failures for %s in order %s", domain, order.URL), + strings.Join(authzFailures, "\n"))) + } else { + probs = append(probs, debugProblem("LetsEncryptStaging", "Order for "+domain, order.URL)) + } + + return probs, nil +} + +func translateAcmeError(domain string, err error) Problem { + if acmeErr, ok := err.(acme.Problem); ok { + urn := strings.TrimPrefix(acmeErr.Type, "urn:ietf:params:acme:error:") + switch urn { + case "rejectedIdentifier", "unknownHost", "rateLimited", "caa", "dns", "connection": + // Boulder can send error:dns when _acme-challenge is NXDOMAIN, which is + // equivalent to unauthorized + if strings.Contains(acmeErr.Detail, "NXDOMAIN looking up TXT") { + return Problem{} + } + return letsencryptProblem(domain, acmeErr.Detail, SeverityError) + // When something bad is happening on staging + case "serverInternal": + return letsencryptProblem(domain, + fmt.Sprintf(`There may be internal issues on the staging service: %v`, acmeErr.Detail), SeverityWarning) + // Unauthorized is what we expect, except for these exceptions that we should handle: + // - When VA OR RA is checking Google Safe Browsing (groan) + case "unauthorized": + if strings.Contains(acmeErr.Detail, "considered an unsafe domain") { + return letsencryptProblem(domain, acmeErr.Detail, SeverityError) + } + return Problem{} + default: + return Problem{} + } + } + return internalProblem(fmt.Sprintf("An unknown issue occurred when performing a test authorization "+ + "against the Let's Encrypt staging service: %v", err), SeverityWarning) +} + +func letsencryptProblem(domain, detail string, severity SeverityLevel) Problem { + return Problem{ + Name: "IssueFromLetsEncrypt", + Explanation: fmt.Sprintf(`A test authorization for %s to the Let's Encrypt staging service has revealed `+ + `issues that may prevent any certificate for this domain being issued.`, domain), + Detail: detail, + Severity: severity, + } +} + +// ofacSanctionChecker checks whether a Registered Domain is present on the the XML sanctions list +// (https://www.treasury.gov/ofac/downloads/sdn.xml). +// It is disabled by default, and must be enabled with the environment variable LETSDEBUG_ENABLE_OFAC=1 +type ofacSanctionChecker struct { + muRefresh sync.RWMutex + domains map[string]struct{} +} + +func (c *ofacSanctionChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" { + return nil, errNotApplicable + } + c.muRefresh.RLock() + defer c.muRefresh.RUnlock() + + rd, _ := publicsuffix.EffectiveTLDPlusOne(domain) + for sanctionedRD := range c.domains { + if rd != sanctionedRD { + continue + } + + return []Problem{{ + Name: "SanctionedDomain", + Explanation: fmt.Sprintf("The Registered Domain %s was found on the United States' OFAC "+ + "Specially Designated Nationals and Blocked Persons (SDN) List. Let's Encrypt are unable to issue certificates "+ + "for sanctioned entities. Search on https://sanctionssearch.ofac.treas.gov/ for futher details.", sanctionedRD), + Severity: SeverityError, + }}, nil + } + + return nil, nil +} + +func (c *ofacSanctionChecker) setup() { + if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" { + return + } + c.domains = map[string]struct{}{} + go func() { + for { + if err := c.poll(); err != nil { + fmt.Printf("OFAC SDN poller failed: %v\n", err) + } + time.Sleep(24 * time.Hour) + } + }() +} + +func (c *ofacSanctionChecker) poll() error { + req, _ := http.NewRequest(http.MethodGet, "https://www.treasury.gov/ofac/downloads/sdn.xml", nil) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + req = req.WithContext(ctx) + req.Header.Set("User-Agent", "Let's Debug (https://letsdebug.net)") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + dec := xml.NewDecoder(resp.Body) + + registeredDomains := map[string]struct{}{} + isID := false + for { + tok, _ := dec.Token() + if tok == nil { + break + } + + switch el := tok.(type) { + case xml.StartElement: + if el.Name.Local == "id" { + isID = true + break + } + if el.Name.Local == "idType" { + next, _ := dec.Token() + if next == nil { + break + } + raw, ok := next.(xml.CharData) + if !ok { + break + } + if string(raw) != "Website" { + isID = false + break + } + break + } + if el.Name.Local == "idNumber" && isID { + next, _ := dec.Token() + if next == nil { + break + } + raw, ok := next.(xml.CharData) + if !ok { + break + } + if rd := c.extractRegisteredDomain(string(raw)); rd != "" { + registeredDomains[rd] = struct{}{} + } + } + case xml.EndElement: + if el.Name.Local == "id" { + isID = false + break + } + } + } + + c.muRefresh.Lock() + defer c.muRefresh.Unlock() + + c.domains = registeredDomains + + return nil +} + +func (c *ofacSanctionChecker) extractRegisteredDomain(d string) string { + d = strings.ToLower(strings.TrimSpace(d)) + if len(d) == 0 { + return "" + } + // If there's a protocol or path, then we need to parse the URL and extract the host + if strings.Contains(d, "/") { + u, err := url.Parse(d) + if err != nil { + return "" + } + d = u.Host + } + d, _ = publicsuffix.EffectiveTLDPlusOne(d) + return d +} diff --git a/vendor/github.com/letsdebug/letsdebug/http01.go b/vendor/github.com/letsdebug/letsdebug/http01.go new file mode 100644 index 00000000..01c65c03 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/http01.go @@ -0,0 +1,268 @@ +package letsdebug + +import ( + "bytes" + "fmt" + "net" + "strings" + "sync" + + "github.com/miekg/dns" +) + +var ( + likelyModemRouters = []string{"micro_httpd", "cisco-IOS", "LANCOM", "Mini web server 1.0 ZTE corp 2005."} + isLikelyNginxTestcookiePayloads = [][]byte{ + []byte(`src="/aes.js"`), + []byte(`src="/aes.min.js"`), + []byte(`var a=toNumbers`)} + isHTTP497Payloads = [][]byte{ + // httpd: https://github.com/apache/httpd/blob/e820d1ea4d3f1f5152574dbaa13979887a5c14b7/modules/ssl/ssl_engine_kernel.c#L322 + []byte("You're speaking plain HTTP to an SSL-enabled server port"), + // nginx: https://github.com/nginx/nginx/blob/15544440425008d5ad39a295b826665ad56fdc90/src/http/ngx_http_special_response.c#L274 + []byte("400 The plain HTTP request was sent to HTTPS port"), + } +) + +// dnsAChecker checks if there are any issues in Unbound looking up the A and +// AAAA records for a domain (such as DNSSEC issues or dead nameservers) +type dnsAChecker struct{} + +func (c dnsAChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if method != HTTP01 { + return nil, errNotApplicable + } + + var probs []Problem + var aRRs, aaaaRRs []dns.RR + var aErr, aaaaErr error + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + aaaaRRs, aaaaErr = ctx.Lookup(domain, dns.TypeAAAA) + }() + + go func() { + defer wg.Done() + aRRs, aErr = ctx.Lookup(domain, dns.TypeA) + }() + + wg.Wait() + + if aErr != nil { + probs = append(probs, dnsLookupFailed(domain, "A", aErr)) + } + if aaaaErr != nil { + probs = append(probs, dnsLookupFailed(domain, "AAAA", aaaaErr)) + } + + for _, rr := range aRRs { + if aRR, ok := rr.(*dns.A); ok && isAddressReserved(aRR.A) { + probs = append(probs, reservedAddress(domain, aRR.A.String())) + } + } + for _, rr := range aaaaRRs { + if aaaaRR, ok := rr.(*dns.AAAA); ok && isAddressReserved(aaaaRR.AAAA) { + probs = append(probs, reservedAddress(domain, aaaaRR.AAAA.String())) + } + } + + var sb []string + for _, rr := range append(aRRs, aaaaRRs...) { + sb = append(sb, rr.String()) + } + + if len(sb) > 0 { + probs = append(probs, debugProblem("HTTPRecords", "A and AAAA records found for this domain", strings.Join(sb, "\n"))) + } + + if len(sb) == 0 { + probs = append(probs, noRecords(domain, "No A or AAAA records found.")) + } + + return probs, nil +} + +// httpAccessibilityChecker checks whether an HTTP ACME validation request +// would lead to any issues such as: +// - Bad redirects +// - IPs not listening on port 80 +type httpAccessibilityChecker struct{} + +func (c httpAccessibilityChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { + if method != HTTP01 { + return nil, errNotApplicable + } + + var probs []Problem + + var ips []net.IP + + rrs, _ := ctx.Lookup(domain, dns.TypeAAAA) + for _, rr := range rrs { + aaaa, ok := rr.(*dns.AAAA) + if !ok { + continue + } + ips = append(ips, aaaa.AAAA) + } + rrs, _ = ctx.Lookup(domain, dns.TypeA) + for _, rr := range rrs { + a, ok := rr.(*dns.A) + if !ok { + continue + } + ips = append(ips, a.A) + } + + if len(ips) == 0 { + return probs, nil + } + + // Track whether responses differ between any of the A/AAAA addresses + // for the domain + allCheckResults := []httpCheckResult{} + + var debug []string + + for _, ip := range ips { + res, prob := checkHTTP(ctx, domain, ip) + allCheckResults = append(allCheckResults, res) + if !prob.IsZero() { + probs = append(probs, prob) + } + debug = append(debug, fmt.Sprintf("Request to: %s/%s, Result: %s, Issue: %s\nTrace:\n%s\n", + domain, ip.String(), res.String(), prob.Name, strings.Join(res.DialStack, "\n"))) + } + + // Filter out the servers that didn't respond at all + var nonZeroResults []httpCheckResult + for _, v := range allCheckResults { + if v.IsZero() { + continue + } + nonZeroResults = append(nonZeroResults, v) + } + if len(nonZeroResults) > 1 { + firstResult := nonZeroResults[0] + for _, otherResult := range nonZeroResults[1:] { + if firstResult.StatusCode != otherResult.StatusCode || + firstResult.ServerHeader != otherResult.ServerHeader || + firstResult.NumRedirects != otherResult.NumRedirects || + firstResult.InitialStatusCode != otherResult.InitialStatusCode { + probs = append(probs, multipleIPAddressDiscrepancy(domain, firstResult, otherResult)) + } + } + } + + probs = append(probs, debugProblem("HTTPCheck", "Requests made to the domain", strings.Join(debug, "\n"))) + + if res := isLikelyModemRouter(allCheckResults); !res.IsZero() { + probs = append(probs, Problem{ + Name: "PortForwarding", + Explanation: "A request to your domain revealed that the web server that responded may be " + + "the administrative interface of a modem or router. This can indicate an issue with the port forwarding " + + "setup on that modem or router. You may need to reconfigure the device to properly forward traffic to your " + + "intended webserver.", + Detail: fmt.Sprintf(`The web server that responded identified itself as "%s", `+ + "which is a known webserver commonly used by modems/routers.", res.ServerHeader), + Severity: SeverityWarning, + }) + } + + if res := isLikelyNginxTestcookie(allCheckResults); !res.IsZero() { + probs = append(probs, Problem{ + Name: "BlockedByNginxTestCookie", + Explanation: "The validation request to this domain was blocked by a deployment of the nginx " + + "testcookie module (https://github.com/kyprizel/testcookie-nginx-module). This module is designed to " + + "block robots, and causes the Let's Encrypt validation process to fail. The server administrator can " + + "solve this issue by disabling the module (`testcookie off;`) for requests under the path of `/.well-known" + + "/acme-challenge/`.", + Detail: fmt.Sprintf("The server at %s produced this result.", res.IP.String()), + Severity: SeverityError, + }) + } + + if res := isHTTP497(allCheckResults); !res.IsZero() { + probs = append(probs, Problem{ + Name: "HttpOnHttpsPort", + Explanation: "A validation request to this domain resulted in an HTTP request being made to a port that expects " + + "to receive HTTPS requests. This could be the result of an incorrect redirect (such as to http://example.com:443/) " + + "or it could be the result of a webserver misconfiguration, such as trying to enable SSL on a port 80 virtualhost.", + Detail: strings.Join(res.DialStack, "\n"), + Severity: SeverityError, + }) + } + + return probs, nil +} + +func noRecords(name, rrSummary string) Problem { + return Problem{ + Name: "NoRecords", + Explanation: fmt.Sprintf(`No valid A or AAAA records could be ultimately resolved for %s. `+ + `This means that Let's Encrypt would not be able to to connect to your domain to perform HTTP validation, since `+ + `it would not know where to connect to.`, name), + Detail: rrSummary, + Severity: SeverityFatal, + } +} + +func reservedAddress(name, address string) Problem { + return Problem{ + Name: "ReservedAddress", + Explanation: fmt.Sprintf(`A private, inaccessible, IANA/IETF-reserved IP address was found for %s. Let's Encrypt will always fail HTTP validation `+ + `for any domain that is pointing to an address that is not routable on the internet. You should either remove this address `+ + `and replace it with a public one or use the DNS validation method instead.`, name), + Detail: address, + Severity: SeverityFatal, + } +} + +func multipleIPAddressDiscrepancy(domain string, result1, result2 httpCheckResult) Problem { + return Problem{ + Name: "MultipleIPAddressDiscrepancy", + Explanation: fmt.Sprintf(`%s has multiple IP addresses in its DNS records. While they appear to be accessible on the network, `+ + `we have detected that they produce differing results when sent an ACME HTTP validation request. This may indicate that `+ + `some of the IP addresses may unintentionally point to different servers, which would cause validation to fail.`, + domain), + Detail: fmt.Sprintf("%s vs %s", result1.String(), result2.String()), + Severity: SeverityWarning, + } +} + +func isLikelyModemRouter(results []httpCheckResult) httpCheckResult { + for _, res := range results { + for _, toMatch := range likelyModemRouters { + if res.ServerHeader == toMatch { + return res + } + } + } + return httpCheckResult{} +} + +func isLikelyNginxTestcookie(results []httpCheckResult) httpCheckResult { + for _, res := range results { + for _, needle := range isLikelyNginxTestcookiePayloads { + if bytes.Contains(res.Content, needle) { + return res + } + } + } + return httpCheckResult{} +} + +func isHTTP497(results []httpCheckResult) httpCheckResult { + for _, res := range results { + for _, needle := range isHTTP497Payloads { + if bytes.Contains(res.Content, needle) { + return res + } + } + } + return httpCheckResult{} +} diff --git a/vendor/github.com/letsdebug/letsdebug/http_util.go b/vendor/github.com/letsdebug/letsdebug/http_util.go new file mode 100644 index 00000000..75345c4f --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/http_util.go @@ -0,0 +1,310 @@ +package letsdebug + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + httpTimeout = 10 +) + +type redirectError string + +func (e redirectError) Error() string { + return string(e) +} + +type httpCheckResult struct { + StatusCode int + ServerHeader string + IP net.IP + InitialStatusCode int + NumRedirects int + FirstDial time.Time + DialStack []string + Content []byte +} + +func (r *httpCheckResult) Trace(s string) { + if r.FirstDial.IsZero() { + r.FirstDial = time.Now() + } + r.DialStack = append(r.DialStack, + fmt.Sprintf("@%dms: %s", time.Since(r.FirstDial).Nanoseconds()/1e6, s)) +} + +func (r httpCheckResult) IsZero() bool { + return r.StatusCode == 0 +} + +func (r httpCheckResult) String() string { + addrType := "IPv6" + if r.IP.To4() != nil { + addrType = "IPv4" + } + + lines := []string{ + "Address=" + r.IP.String(), + "Address Type=" + addrType, + "Server=" + r.ServerHeader, + "HTTP Status=" + strconv.Itoa(r.InitialStatusCode), + } + if r.NumRedirects > 0 { + lines = append(lines, "Number of Redirects="+strconv.Itoa(r.NumRedirects)) + lines = append(lines, "Final HTTP Status="+strconv.Itoa(r.StatusCode)) + } + + return fmt.Sprintf("[%s]", strings.Join(lines, ",")) +} + +type checkHTTPTransport struct { + transport http.RoundTripper + result *httpCheckResult +} + +func (t checkHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := t.transport.RoundTrip(req) + + if t.result != nil && err != nil { + t.result.Trace(fmt.Sprintf("Experienced error: %v", err)) + } + + if t.result != nil && resp != nil { + if t.result.InitialStatusCode == 0 { + t.result.InitialStatusCode = resp.StatusCode + } + + t.result.Trace(fmt.Sprintf("Server response: HTTP %s", resp.Status)) + } + + return resp, err +} + +func makeSingleShotHTTPTransport() *http.Transport { + return &http.Transport{ + // Boulder VA's HTTP transport settings + // https://github.com/letsencrypt/boulder/blob/387e94407c58fe0ff65207a89304776ee7417410/va/http.go#L143-L160 + DisableKeepAlives: true, + IdleConnTimeout: time.Second, + TLSHandshakeTimeout: 10 * time.Second, + MaxIdleConns: 1, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } +} + +func checkHTTP(scanCtx *scanContext, domain string, address net.IP) (httpCheckResult, Problem) { + dialer := net.Dialer{ + Timeout: httpTimeout * time.Second, + } + + checkRes := &httpCheckResult{ + IP: address, + DialStack: []string{}, + } + + var redirErr redirectError + + baseHTTPTransport := makeSingleShotHTTPTransport() + baseHTTPTransport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + host, port, _ := net.SplitHostPort(addr) + host = normalizeFqdn(host) + + dialFunc := func(ip net.IP, port string) (net.Conn, error) { + checkRes.Trace(fmt.Sprintf("Dialing %s", ip.String())) + if ip.To4() == nil { + return dialer.DialContext(ctx, "tcp", "["+ip.String()+"]:"+port) + } + return dialer.DialContext(ctx, "tcp", ip.String()+":"+port) + } + + // Only override the address for this specific domain. + // We don't want to mangle redirects. + if host == domain { + return dialFunc(address, port) + } + + // For other hosts, we need to use Unbound to resolve the name + otherAddr, err := scanCtx.LookupRandomHTTPRecord(host) + if err != nil { + return nil, err + } + + return dialFunc(otherAddr, port) + } + + cl := http.Client{ + Transport: checkHTTPTransport{ + result: checkRes, + transport: baseHTTPTransport, + }, + // boulder: va.go fetchHTTP + CheckRedirect: func(req *http.Request, via []*http.Request) error { + checkRes.NumRedirects++ + + if len(via) >= 10 { + redirErr = redirectError(fmt.Sprintf("Too many (%d) redirects, last redirect was to: %s", len(via), req.URL.String())) + return redirErr + } + + checkRes.Trace(fmt.Sprintf("Received redirect to %s", req.URL.String())) + + host := req.URL.Host + if _, p, err := net.SplitHostPort(host); err == nil { + if port, _ := strconv.Atoi(p); port != 80 && port != 443 { + redirErr = redirectError(fmt.Sprintf("Bad port number provided when fetching %s: %s", req.URL.String(), p)) + return redirErr + } + } + + scheme := strings.ToLower(req.URL.Scheme) + if scheme != "http" && scheme != "https" { + redirErr = redirectError(fmt.Sprintf("Bad scheme provided when fetching %s: %s", req.URL.String(), scheme)) + return redirErr + } + + // Also check for domain.tld.well-known/acme-challenge + if strings.HasSuffix(req.URL.Hostname(), ".well-known") { + redirErr = redirectError(fmt.Sprintf("It appears that a redirect was generated by your web server that is missing a trailing "+ + "slash after your domain name: %v. Check your web server configuration and .htaccess for Redirect/RedirectMatch/RewriteRule.", + req.URL.String())) + return redirErr + } + + return nil + }, + } + + reqURL := "http://" + domain + "/.well-known/acme-challenge/" + scanCtx.httpRequestPath + checkRes.Trace(fmt.Sprintf("Making a request to %s (using initial IP %s)", reqURL, address)) + + req, err := http.NewRequest("GET", reqURL, nil) + if err != nil { + return *checkRes, internalProblem(fmt.Sprintf("Failed to construct validation request: %v", err), SeverityError) + } + + req.Header.Set("Accept", "*/*") + req.Header.Set("User-Agent", "Mozilla/5.0 (compatible; Let's Debug emulating Let's Encrypt validation server; +https://letsdebug.net)") + + ctx, cancel := context.WithTimeout(context.Background(), httpTimeout*time.Second) + defer cancel() + + req = req.WithContext(ctx) + + resp, err := cl.Do(req) + if resp != nil { + checkRes.StatusCode = resp.StatusCode + checkRes.ServerHeader = resp.Header.Get("Server") + } + if err != nil { + if redirErr != "" { + err = redirErr + } + return *checkRes, translateHTTPError(domain, address, err, checkRes.DialStack) + } + + defer resp.Body.Close() + + maxLen := 1024 + if l := len(scanCtx.httpExpectResponse) + 2; l > maxLen { + maxLen = l + } + r := io.LimitReader(resp.Body, int64(maxLen)) + + buf, err := ioutil.ReadAll(r) + checkRes.Content = buf + + // If we expect a certain response, check for it + if scanCtx.httpExpectResponse != "" { + if err != nil { + return *checkRes, translateHTTPError(domain, address, + fmt.Errorf(`This test expected the server to respond with "%s" but instead we experienced an error reading the response: %v`, + scanCtx.httpExpectResponse, err), + checkRes.DialStack) + } else if respStr := string(buf); respStr != scanCtx.httpExpectResponse { + return *checkRes, translateHTTPError(domain, address, + fmt.Errorf(`This test expected the server to respond with "%s" but instead we got a response beginning with "%s"`, + scanCtx.httpExpectResponse, respStr), + checkRes.DialStack) + } + } + + return *checkRes, Problem{} +} + +func translateHTTPError(domain string, address net.IP, e error, dialStack []string) Problem { + if redirErr, ok := e.(redirectError); ok { + return badRedirect(domain, redirErr, dialStack) + } + + if strings.HasSuffix(e.Error(), "http: server gave HTTP response to HTTPS client") { + return httpServerMisconfiguration(domain, "Web server is serving the wrong protocol on the wrong port: "+e.Error()+ + ". This may be due to a previous HTTP redirect rather than a webserver misconfiguration.\n\nTrace:\n"+strings.Join(dialStack, "\n")) + } + + // Make a nicer error message if it was a context timeout + if urlErr, ok := e.(*url.Error); ok && urlErr.Timeout() { + e = fmt.Errorf("A timeout was experienced while communicating with %s/%s: %v", + domain, address.String(), urlErr) + } + + if address.To4() == nil { + return aaaaNotWorking(domain, address.String(), e, dialStack) + } else { + return aNotWorking(domain, address.String(), e, dialStack) + } +} + +func httpServerMisconfiguration(domain, detail string) Problem { + return Problem{ + Name: "WebserverMisconfiguration", + Explanation: fmt.Sprintf(`%s's webserver may be misconfigured.`, domain), + Detail: detail, + Severity: SeverityError, + } +} + +func aaaaNotWorking(domain, ipv6Address string, err error, dialStack []string) Problem { + return Problem{ + Name: "AAAANotWorking", + Explanation: fmt.Sprintf(`%s has an AAAA (IPv6) record (%s) but a test request to this address over port 80 did not succeed. `+ + `Your web server must have at least one working IPv4 or IPv6 address. `+ + `You should either ensure that validation requests to this domain succeed over IPv6, or remove its AAAA record.`, + domain, ipv6Address), + Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), + Severity: SeverityError, + } +} + +func aNotWorking(domain, addr string, err error, dialStack []string) Problem { + return Problem{ + Name: "ANotWorking", + Explanation: fmt.Sprintf(`%s has an A (IPv4) record (%s) but a request to this address over port 80 did not succeed. `+ + `Your web server must have at least one working IPv4 or IPv6 address.`, + domain, addr), + Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), + Severity: SeverityError, + } +} + +func badRedirect(domain string, err error, dialStack []string) Problem { + return Problem{ + Name: "BadRedirect", + Explanation: fmt.Sprintf(`Sending an ACME HTTP validation request to %s results in an unacceptable redirect. `+ + `This is most likely a misconfiguration of your web server or your web application.`, + domain), + Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), + Severity: SeverityError, + } +} diff --git a/vendor/github.com/letsdebug/letsdebug/letsdebug.go b/vendor/github.com/letsdebug/letsdebug/letsdebug.go new file mode 100644 index 00000000..e63dd480 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/letsdebug.go @@ -0,0 +1,85 @@ +// Package letsdebug provides an library, web API and CLI to provide diagnostic +// information for why a particular (FQDN, ACME Validation Method) pair *may* fail +// when attempting to issue an SSL Certificate from Let's Encrypt (https://letsencrypt.org). +// +// The usage cannot be generalized to other ACME providers, as the policies checked by this package +// are specific to Let's Encrypt, rather than being mandated by the ACME protocol. +// +// This package relies on libunbound. +package letsdebug + +import ( + "fmt" + "os" + "reflect" + "time" +) + +// Options provide additional configuration to the various checkers +type Options struct { + // HTTPRequestPath alters the /.well-known/acme-challenge/letsdebug-test to + // /acme-challenge/acme-challenge/{{ HTTPRequestPath }} + HTTPRequestPath string + // HTTPExpectResponse causes the HTTP checker to require the remote server to + // respond with specific content. If the content does not match, then the test + // will fail with severity Error. + HTTPExpectResponse string +} + +// Check calls CheckWithOptions with default options +func Check(domain string, method ValidationMethod) (probs []Problem, retErr error) { + return CheckWithOptions(domain, method, Options{}) +} + +// CheckWithOptions will run each checker against the domain and validation method provided. +// It is expected that this method may take a long time to execute, and may not be cancelled. +func CheckWithOptions(domain string, method ValidationMethod, opts Options) (probs []Problem, retErr error) { + defer func() { + if r := recover(); r != nil { + retErr = fmt.Errorf("panic: %v", r) + } + }() + + ctx := newScanContext() + if opts.HTTPRequestPath != "" { + ctx.httpRequestPath = opts.HTTPRequestPath + } + if opts.HTTPExpectResponse != "" { + ctx.httpExpectResponse = opts.HTTPExpectResponse + } + + domain = normalizeFqdn(domain) + + for _, checker := range checkers { + t := reflect.TypeOf(checker) + debug("[*] + %v\n", t) + start := time.Now() + checkerProbs, err := checker.Check(ctx, domain, method) + debug("[*] - %v in %v\n", t, time.Since(start)) + if err == nil { + if len(checkerProbs) > 0 { + probs = append(probs, checkerProbs...) + } + // dont continue checking when a fatal error occurs + if hasFatalProblem(probs) { + break + } + } else if err != errNotApplicable { + return nil, err + } + } + return probs, nil +} + +var isDebug *bool + +func debug(format string, args ...interface{}) { + if isDebug == nil { + d := os.Getenv("LETSDEBUG_DEBUG") != "" + isDebug = &d + } + if !(*isDebug) { + return + } + fmt.Fprintf(os.Stderr, format, args...) +} diff --git a/vendor/github.com/letsdebug/letsdebug/problem.go b/vendor/github.com/letsdebug/letsdebug/problem.go new file mode 100644 index 00000000..1a2689f0 --- /dev/null +++ b/vendor/github.com/letsdebug/letsdebug/problem.go @@ -0,0 +1,75 @@ +package letsdebug + +import ( + "fmt" + "strings" +) + +// SeverityLevel represents the priority of a reported problem +type SeverityLevel string + +// Problem represents an issue found by one of the checkers in this package. +// Explanation is a human-readable explanation of the issue. +// Detail is usually the underlying machine error. +type Problem struct { + Name string `json:"name"` + Explanation string `json:"explanation"` + Detail string `json:"detail"` + Severity SeverityLevel `json:"severity"` +} + +const ( + SeverityFatal SeverityLevel = "Fatal" // Represents a fatal error which will stop any further checks + SeverityError SeverityLevel = "Error" + SeverityWarning SeverityLevel = "Warning" + SeverityDebug SeverityLevel = "Debug" // Not to be shown by default +) + +func (p Problem) String() string { + return fmt.Sprintf("[%s] %s: %s", p.Name, p.Explanation, p.Detail) +} + +func (p Problem) IsZero() bool { + return p.Name == "" +} + +func (p Problem) DetailLines() []string { + return strings.Split(p.Detail, "\n") +} + +func hasFatalProblem(probs []Problem) bool { + for _, p := range probs { + if p.Severity == SeverityFatal { + return true + } + } + + return false +} + +func internalProblem(message string, level SeverityLevel) Problem { + return Problem{ + Name: "InternalProblem", + Explanation: "An internal error occurred while checking the domain", + Detail: message, + Severity: level, + } +} + +func dnsLookupFailed(name, rrType string, err error) Problem { + return Problem{ + Name: "DNSLookupFailed", + Explanation: fmt.Sprintf(`A fatal issue occurred during the DNS lookup process for %s/%s.`, name, rrType), + Detail: err.Error(), + Severity: SeverityFatal, + } +} + +func debugProblem(name, message, detail string) Problem { + return Problem{ + Name: name, + Explanation: message, + Detail: detail, + Severity: SeverityDebug, + } +} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 00000000..0f1d00e1 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100644 index 00000000..ebf44703 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 00000000..3498c53d --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,44 @@ +language: go + +go: + - 1.13.x + - 1.14.x + - master + +sudo: true + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + matrix: + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3 + +before_script: + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - staticcheck -go 1.13 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 00000000..5773904a --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 00000000..c972a86a --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,30 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) + +## Install + + go get github.com/lib/pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Status + +This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 00000000..f0502111 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 00000000..e4933e22 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,756 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 00000000..4b0a0a8f --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 00000000..f313c149 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1996 @@ +package pq + +import ( + "bufio" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d *Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := c.opts + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' && res.colNames == nil { + res.result, res.tag = cn.parseComplete(r.string()) + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + case "krbsrvname": + return true + case "krbspn": + return true + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 00000000..09e2ea46 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,149 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + _ = cn.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + c, err := dial(ctx, cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 00000000..d7d47261 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,115 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 00000000..38d5bb69 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,307 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + driver.Result + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad() + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad() + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) + case 'N': + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad() + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.bad = true + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.bad + ci.Unlock() + return b +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.Lock() + ci.Result = result + ci.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.Lock() + result := ci.Result + if result == nil { + return driver.RowsAffected(0) + } + ci.Unlock() + return result +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.isBad() { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.isBad() { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 00000000..b5718480 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,268 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 00000000..c4dafe27 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,622 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + if is2400Time { + t = t.Add(24 * time.Hour) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset %= 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 00000000..3d66ba7c --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,515 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.bad = true + *err = v + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.bad = true + } +} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 00000000..408ec01f --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 00000000..01dd8c72 --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,71 @@ +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 00000000..5c421fdb --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,858 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'S': + // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 00000000..caaede24 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 00000000..ecc84c2c --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 00000000..c6aa5b9a --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 00000000..477216b6 --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 00000000..d9020845 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,175 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 00000000..3b7c3a2a --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 00000000..5d2c763c --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 00000000..f4d8a7c2 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 00000000..a5101920 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 00000000..2b691267 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 00000000..9a1b9e07 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/miekg/unbound/.travis.yml b/vendor/github.com/miekg/unbound/.travis.yml new file mode 100644 index 00000000..260000ad --- /dev/null +++ b/vendor/github.com/miekg/unbound/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.9 +before_install: + - sudo apt-get update -qq + - sudo apt-get install -qq libunbound-dev +script: + - go test -race -v -bench=. ./... diff --git a/vendor/github.com/miekg/unbound/README.md b/vendor/github.com/miekg/unbound/README.md new file mode 100644 index 00000000..52e67e1b --- /dev/null +++ b/vendor/github.com/miekg/unbound/README.md @@ -0,0 +1,14 @@ +# Unbound + +A wrapper for Unbound in Go. + +Unbound's `ub_result` has been extended with an slice of dns.RRs, this alleviates +the need to parse `ub_result.data` yourself. + +The website for Unbound is https://unbound.net/, where you can find further documentation. + +Tested/compiled to work for versions: 1.4.22 and 1.6.0-3+deb9u1 (Debian Stretch). + +Note: using cgo means the executables will use shared libraries (OpenSSL, ldns and libunbound). + +The tutorials found here are the originals ones adapted to Go. diff --git a/vendor/github.com/miekg/unbound/dns.go b/vendor/github.com/miekg/unbound/dns.go new file mode 100644 index 00000000..04ce826a --- /dev/null +++ b/vendor/github.com/miekg/unbound/dns.go @@ -0,0 +1,87 @@ +package unbound + +import ( + "math/rand" + "sort" + + "github.com/miekg/dns" +) + +// AddTaRR calls AddTa, but allows to directly use an dns.RR. +// This method is not found in Unbound. +func (u *Unbound) AddTaRR(ta dns.RR) error { return u.AddTa(ta.String()) } + +// DataAddRR calls DataAdd, but allows to directly use an dns.RR. +// This method is not found in Unbound. +func (u *Unbound) DataAddRR(data dns.RR) error { return u.DataAdd(data.String()) } + +// DataRemoveRR calls DataRemove, but allows to directly use an dns.RR. +// This method is not found in Unbound. +func (u *Unbound) DataRemoveRR(data dns.RR) error { return u.DataRemove(data.String()) } + +// Copied from the standard library + +// byPriorityWeight sorts SRV records by ascending priority and weight. +type byPriorityWeight []*dns.SRV + +func (s byPriorityWeight) Len() int { return len(s) } +func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPriorityWeight) Less(i, j int) bool { + return s[i].Priority < s[j].Priority || + (s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight) +} + +// shuffleByWeight shuffles SRV records by weight using the algorithm +// described in RFC 2782. +func (addrs byPriorityWeight) shuffleByWeight() { + sum := 0 + for _, addr := range addrs { + sum += int(addr.Weight) + } + for sum > 0 && len(addrs) > 1 { + s := 0 + n := rand.Intn(sum + 1) + for i := range addrs { + s += int(addrs[i].Weight) + if s >= n { + if i > 0 { + t := addrs[i] + copy(addrs[1:i+1], addrs[0:i]) + addrs[0] = t + } + break + } + } + sum -= int(addrs[0].Weight) + addrs = addrs[1:] + } +} + +// sort reorders SRV records as specified in RFC 2782. +func (addrs byPriorityWeight) sort() { + sort.Sort(addrs) + i := 0 + for j := 1; j < len(addrs); j++ { + if addrs[i].Priority != addrs[j].Priority { + addrs[i:j].shuffleByWeight() + i = j + } + } + addrs[i:].shuffleByWeight() +} + +// byPref implements sort.Interface to sort MX records by preference +type byPref []*dns.MX + +func (s byPref) Len() int { return len(s) } +func (s byPref) Less(i, j int) bool { return s[i].Preference < s[j].Preference } +func (s byPref) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort reorders MX records as specified in RFC 5321. +func (s byPref) sort() { + for i := range s { + j := rand.Intn(i + 1) + s[i], s[j] = s[j], s[i] + } + sort.Sort(s) +} diff --git a/vendor/github.com/miekg/unbound/lookup.go b/vendor/github.com/miekg/unbound/lookup.go new file mode 100644 index 00000000..421c963a --- /dev/null +++ b/vendor/github.com/miekg/unbound/lookup.go @@ -0,0 +1,164 @@ +package unbound + +import ( + "net" + + "github.com/miekg/dns" +) + +// These are function are a re-implementation of the net.Lookup* ones +// They are adapted to the package unbound and the package dns. + +// LookupAddr performs a reverse lookup for the given address, returning a +// list of names mapping to that address. +func (u *Unbound) LookupAddr(addr string) (name []string, err error) { + reverse, err := dns.ReverseAddr(addr) + if err != nil { + return nil, err + } + r, err := u.Resolve(reverse, dns.TypePTR, dns.ClassINET) + if err != nil { + return nil, err + } + for _, rr := range r.Rr { + name = append(name, rr.(*dns.PTR).Ptr) + } + return +} + +// LookupCNAME returns the canonical DNS host for the given name. Callers +// that do not care about the canonical name can call LookupHost or +// LookupIP directly; both take care of resolving the canonical name as +// part of the lookup. +func (u *Unbound) LookupCNAME(name string) (cname string, err error) { + r, err := u.Resolve(name, dns.TypeA, dns.ClassINET) + // TODO(mg): if nothing found try AAAA? + return r.CanonName, err +} + +// LookupHost looks up the given host using Unbound. It returns +// an array of that host's addresses. +func (u *Unbound) LookupHost(host string) (addrs []string, err error) { + ipaddrs, err := u.LookupIP(host) + if err != nil { + return nil, err + } + for _, ip := range ipaddrs { + addrs = append(addrs, ip.String()) + } + return addrs, nil +} + +// LookupIP looks up host using Unbound. It returns an array of +// that host's IPv4 and IPv6 addresses. +// The A and AAAA lookups are performed in parallel. +func (u *Unbound) LookupIP(host string) (addrs []net.IP, err error) { + c := make(chan *ResultError) + u.ResolveAsync(host, dns.TypeA, dns.ClassINET, c) + u.ResolveAsync(host, dns.TypeAAAA, dns.ClassINET, c) + seen := 0 + // TODO(miek): timeout? +Wait: + for { + select { + case r := <-c: + for _, rr := range r.Rr { + if x, ok := rr.(*dns.A); ok { + addrs = append(addrs, x.A) + } + if x, ok := rr.(*dns.AAAA); ok { + addrs = append(addrs, x.AAAA) + } + } + seen++ + if seen == 2 { + break Wait + } + } + } + return +} + +// LookupMX returns the DNS MX records for the given domain name sorted by +// preference. +func (u *Unbound) LookupMX(name string) (mx []*dns.MX, err error) { + r, err := u.Resolve(name, dns.TypeMX, dns.ClassINET) + if err != nil { + return nil, err + } + for _, rr := range r.Rr { + mx = append(mx, rr.(*dns.MX)) + } + byPref(mx).sort() + return +} + +// LookupNS returns the DNS NS records for the given domain name. +func (u *Unbound) LookupNS(name string) (ns []*dns.NS, err error) { + r, err := u.Resolve(name, dns.TypeNS, dns.ClassINET) + if err != nil { + return nil, err + } + for _, rr := range r.Rr { + ns = append(ns, rr.(*dns.NS)) + } + return +} + +// LookupSRV tries to resolve an SRV query of the given service, protocol, +// and domain name. The proto is "tcp" or "udp". The returned records are +// sorted by priority and randomized by weight within a priority. +// +// LookupSRV constructs the DNS name to look up following RFC 2782. That +// is, it looks up _service._proto.name. To accommodate services publishing +// SRV records under non-standard names, if both service and proto are +// empty strings, LookupSRV looks up name directly. +func (u *Unbound) LookupSRV(service, proto, name string) (cname string, srv []*dns.SRV, err error) { + r := new(Result) + if service == "" && proto == "" { + r, err = u.Resolve(name, dns.TypeSRV, dns.ClassINET) + } else { + r, err = u.Resolve("_"+service+"._"+proto+"."+name, dns.TypeSRV, dns.ClassINET) + } + if err != nil { + return "", nil, err + } + for _, rr := range r.Rr { + srv = append(srv, rr.(*dns.SRV)) + } + byPriorityWeight(srv).sort() + return "", srv, err +} + +// LookupTXT returns the DNS TXT records for the given domain name. +func (u *Unbound) LookupTXT(name string) (txt []string, err error) { + r, err := u.Resolve(name, dns.TypeTXT, dns.ClassINET) + if err != nil { + return nil, err + } + for _, rr := range r.Rr { + txt = append(txt, rr.(*dns.TXT).Txt...) + } + return +} + +// LookupTLSA returns the DNS DANE records for the given domain service, protocol +// and domainname. +// +// LookupTLSA constructs the DNS name to look up following RFC 6698. That +// is, it looks up _port._proto.name. +func (u *Unbound) LookupTLSA(service, proto, name string) (tlsa []*dns.TLSA, err error) { + tlsaname, err := dns.TLSAName(name, service, proto) + if err != nil { + return nil, err + } + + r, err := u.Resolve(tlsaname, dns.TypeTLSA, dns.ClassINET) + if err != nil { + return nil, err + } + for _, rr := range r.Rr { + tlsa = append(tlsa, rr.(*dns.TLSA)) + } + return tlsa, nil +} diff --git a/vendor/github.com/miekg/unbound/unbound.go b/vendor/github.com/miekg/unbound/unbound.go new file mode 100644 index 00000000..add965d7 --- /dev/null +++ b/vendor/github.com/miekg/unbound/unbound.go @@ -0,0 +1,386 @@ +// Package unbound implements a wrapper for libunbound(3). +// Unbound is a DNSSEC aware resolver, see https://unbound.net/ +// for more information. It's up to the caller to configure +// Unbound with trust anchors. With these anchors a DNSSEC +// answer can be validated. +// +// The method's documentation can be found in libunbound(3). +// The names of the methods are in sync with the +// names used in unbound, but the underscores are removed and they +// are in camel-case, e.g. ub_ctx_resolv_conf becomes u.ResolvConf. +// Except for ub_ctx_create() and ub_ctx_delete(), +// which become: New() and Destroy() to be more in line with the standard +// Go practice. +// +// Basic use pattern: +// u := unbound.New() +// defer u.Destroy() +// u.ResolvConf("/etc/resolv.conf") +// u.AddTaFile("trustanchor") +// r, e := u.Resolve("miek.nl.", dns.TypeA, dns.ClassINET) +// +// The asynchronous functions are implemented using goroutines. This +// means the following functions are not useful in Go and therefor +// not implemented: ub_fd, ub_wait, ub_poll, ub_process and ub_cancel. +// +// Unbound's ub_result (named Result in the package) has been modified. +// An extra field has been added, 'Rr', which is a []dns.RR. +// +// The Lookup* functions of the net package are re-implemented in this package. +package unbound + +/* +#cgo LDFLAGS: -lunbound +#include +#include +#include + +#ifndef offsetof +#define offsetof(type, member) __builtin_offsetof (type, member) +#endif + +int array_elem_int(int *l, int i) { return l[i]; } +char * array_elem_char(char **l, int i) { if (l == NULL) return NULL; return l[i]; } +char * new_char_pointer() { char *p = NULL; return p; } +struct ub_result *new_ub_result() { + struct ub_result *r; + r = calloc(sizeof(struct ub_result), 1); + return r; +} +int ub_ttl(struct ub_result *r) { + int *p; + // Go to why_bogus add the pointer and then we will find the ttl, hopefully. + p = (int*) ((char*)r + offsetof(struct ub_result, why_bogus) + sizeof(char*)); + return (int)*p; +} +*/ +import "C" + +import ( + "encoding/binary" + "os" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/miekg/dns" +) + +type Unbound struct { + ctx *C.struct_ub_ctx + version [3]int +} + +// Results is Unbound's ub_result adapted for Go. +type Result struct { + Qname string // Text string, original question + Qtype uint16 // Type code asked for + Qclass uint16 // Class code asked for + Data [][]byte // Slice of rdata items formed from the reply + Rr []dns.RR // The RR encoded from Data, Qclass, Qtype, Qname and Ttl (not in Unbound) + CanonName string // Canonical name of result + Rcode int // Additional error code in case of no data + AnswerPacket *dns.Msg // Full answer packet + HaveData bool // True if there is data + NxDomain bool // True if the name does not exist + Secure bool // True if the result is secure + Bogus bool // True if a security failure happened + WhyBogus string // String with error when bogus + Ttl uint32 // TTL for the result in seconds (0 for unbound versions < 1.4.20) + Rtt time.Duration // Time the query took (not in Unbound) +} + +// UnboundError is an error returned from Unbound, it wraps both the +// return code and the error string as returned by ub_strerror. +type UnboundError struct { + Err string + code int +} + +// ResultError encapsulates a *Result and an error. This is used to +// communicate with unbound over a channel. +type ResultError struct { + *Result + Error error +} + +func (e *UnboundError) Error() string { + return e.Err +} + +func newError(i int) error { + if i == 0 { + return nil + } + e := new(UnboundError) + e.Err = errorString(i) + e.code = i + return e +} + +func errorString(i int) string { + return C.GoString(C.ub_strerror(C.int(i))) +} + +// unbound version from 1.4.20 (inclusive) and above fill in the Tll in the result +// check if we have such a version +func (u *Unbound) haveTtlFeature() bool { + if u.version[0] < 1 { + return false + } else if u.version[0] == 1 && u.version[1] < 4 { + return false + } else if u.version[0] == 1 && u.version[1] == 4 && u.version[2] <= 20 { + return false + } else { + return true + } +} + +// New wraps Unbound's ub_ctx_create. +func New() *Unbound { + u := new(Unbound) + u.ctx = C.ub_ctx_create() + u.version = u.Version() + return u +} + +// Destroy wraps Unbound's ub_ctx_delete. +func (u *Unbound) Destroy() { + C.ub_ctx_delete(u.ctx) +} + +// ResolvConf wraps Unbound's ub_ctx_resolvconf. +func (u *Unbound) ResolvConf(fname string) error { + cfname := C.CString(fname) + defer C.free(unsafe.Pointer(cfname)) + i := C.ub_ctx_resolvconf(u.ctx, cfname) + return newError(int(i)) +} + +// SetOption wraps Unbound's ub_ctx_set_option. +func (u *Unbound) SetOption(opt, val string) error { + copt := C.CString(opt) + defer C.free(unsafe.Pointer(copt)) + cval := C.CString(val) + defer C.free(unsafe.Pointer(cval)) + i := C.ub_ctx_set_option(u.ctx, copt, cval) + return newError(int(i)) +} + +// GetOption wraps Unbound's ub_ctx_get_option. +func (u *Unbound) GetOption(opt string) (string, error) { + copt := C.CString(opt) + defer C.free(unsafe.Pointer(copt)) + + cval := C.new_char_pointer() + defer C.free(unsafe.Pointer(cval)) + i := C.ub_ctx_get_option(u.ctx, C.CString(opt), &cval) + return C.GoString(cval), newError(int(i)) +} + +// Config wraps Unbound's ub_ctx_config. +func (u *Unbound) Config(fname string) error { + cfname := C.CString(fname) + defer C.free(unsafe.Pointer(cfname)) + i := C.ub_ctx_config(u.ctx, cfname) + return newError(int(i)) +} + +// SetFwd wraps Unbound's ub_ctx_set_fwd. +func (u *Unbound) SetFwd(addr string) error { + caddr := C.CString(addr) + defer C.free(unsafe.Pointer(caddr)) + i := C.ub_ctx_set_fwd(u.ctx, caddr) + return newError(int(i)) +} + +// Hosts wraps Unbound's ub_ctx_hosts. +func (u *Unbound) Hosts(fname string) error { + cfname := C.CString(fname) + defer C.free(unsafe.Pointer(cfname)) + i := C.ub_ctx_hosts(u.ctx, cfname) + return newError(int(i)) +} + +// Resolve wraps Unbound's ub_resolve. +func (u *Unbound) Resolve(name string, rrtype, rrclass uint16) (*Result, error) { + name = dns.Fqdn(name) + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + res := C.new_ub_result() + r := new(Result) + // Normally, we would call 'defer C.ub_resolve_free(res)' here, but + // that does not work (in Go 1.6.1), see + // https://github.com/miekg/unbound/issues/8 + // This is likely related to https://github.com/golang/go/issues/15921 + t := time.Now() + i := C.ub_resolve(u.ctx, cname, C.int(rrtype), C.int(rrclass), &res) + r.Rtt = time.Since(t) + err := newError(int(i)) + if err != nil { + C.ub_resolve_free(res) + return nil, err + } + + r.Qname = C.GoString(res.qname) + r.Qtype = uint16(res.qtype) + r.Qclass = uint16(res.qclass) + + r.CanonName = C.GoString(res.canonname) + r.Rcode = int(res.rcode) + r.AnswerPacket = new(dns.Msg) + r.AnswerPacket.Unpack(C.GoBytes(res.answer_packet, res.answer_len)) // Should always work + r.HaveData = res.havedata == 1 + r.NxDomain = res.nxdomain == 1 + r.Secure = res.secure == 1 + r.Bogus = res.bogus == 1 + r.WhyBogus = C.GoString(res.why_bogus) + if u.haveTtlFeature() { + r.Ttl = uint32(C.ub_ttl(res)) + } + + // Re-create the RRs + var h dns.RR_Header + h.Name = r.Qname + h.Rrtype = r.Qtype + h.Class = r.Qclass + h.Ttl = r.Ttl + + j := 0 + if r.HaveData { + r.Data = make([][]byte, 0) + r.Rr = make([]dns.RR, 0) + b := C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j))) + + // Create the RR; write out the header details and + // the rdata to a buffer, and unpack it again into an + // actual RR, for ever rr found by resolve + hdrBuf := make([]byte, len(h.Name)+11) + off, _ := dns.PackDomainName(h.Name, hdrBuf, 0, nil, false) + binary.BigEndian.PutUint16(hdrBuf[off:], h.Rrtype) + off += 2 + binary.BigEndian.PutUint16(hdrBuf[off:], h.Class) + off += 2 + binary.BigEndian.PutUint32(hdrBuf[off:], h.Ttl) + off += 4 + + for len(b) != 0 { + h.Rdlength = uint16(len(b)) + // Note: we are rewriting the rdata len so we do not + // increase off anymore. + binary.BigEndian.PutUint16(hdrBuf[off:], h.Rdlength) + rrBuf := append(hdrBuf, b...) + + rr, _, err := dns.UnpackRR(rrBuf, 0) + if err == nil { + r.Rr = append(r.Rr, rr) + } + + r.Data = append(r.Data, b) + j++ + b = C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j))) + } + } + C.ub_resolve_free(res) + return r, err +} + +// ResolveAsync does *not* wrap the Unbound function, instead +// it utilizes Go's goroutines and channels to implement the asynchronous behavior Unbound +// implements. As a result the function signature is different. +// The result (or an error) is returned on the channel c. +// Also the ub_cancel, ub_wait_, ub_fd, ub_process are not implemented. +func (u *Unbound) ResolveAsync(name string, rrtype, rrclass uint16, c chan *ResultError) { + go func() { + r, e := u.Resolve(name, rrtype, rrclass) + c <- &ResultError{r, e} + }() + return +} + +// AddTa wraps Unbound's ub_ctx_add_ta. +func (u *Unbound) AddTa(ta string) error { + cta := C.CString(ta) + i := C.ub_ctx_add_ta(u.ctx, cta) + return newError(int(i)) +} + +// AddTaFile wraps Unbound's ub_ctx_add_ta_file. +func (u *Unbound) AddTaFile(fname string) error { + cfname := C.CString(fname) + defer C.free(unsafe.Pointer(cfname)) + i := C.ub_ctx_add_ta_file(u.ctx, cfname) + return newError(int(i)) +} + +// TrustedKeys wraps Unbound's ub_ctx_trustedkeys. +func (u *Unbound) TrustedKeys(fname string) error { + cfname := C.CString(fname) + defer C.free(unsafe.Pointer(cfname)) + i := C.ub_ctx_trustedkeys(u.ctx, cfname) + return newError(int(i)) +} + +// ZoneAdd wraps Unbound's ub_ctx_zone_add. +func (u *Unbound) ZoneAdd(zone_name, zone_type string) error { + czone_name := C.CString(zone_name) + defer C.free(unsafe.Pointer(czone_name)) + czone_type := C.CString(zone_type) + defer C.free(unsafe.Pointer(czone_type)) + i := C.ub_ctx_zone_add(u.ctx, czone_name, czone_type) + return newError(int(i)) +} + +// ZoneRemove wraps Unbound's ub_ctx_zone_remove. +func (u *Unbound) ZoneRemove(zone_name string) error { + czone_name := C.CString(zone_name) + defer C.free(unsafe.Pointer(czone_name)) + i := C.ub_ctx_zone_remove(u.ctx, czone_name) + return newError(int(i)) +} + +// DataAdd wraps Unbound's ub_ctx_data_add. +func (u *Unbound) DataAdd(data string) error { + cdata := C.CString(data) + defer C.free(unsafe.Pointer(cdata)) + i := C.ub_ctx_data_add(u.ctx, cdata) + return newError(int(i)) +} + +// DataRemove wraps Unbound's ub_ctx_data_remove. +func (u *Unbound) DataRemove(data string) error { + cdata := C.CString(data) + defer C.free(unsafe.Pointer(cdata)) + i := C.ub_ctx_data_remove(u.ctx, cdata) + return newError(int(i)) +} + +// DebugOut wraps Unbound's ub_ctx_debugout. +func (u *Unbound) DebugOut(out *os.File) error { + cmode := C.CString("a+") + defer C.free(unsafe.Pointer(cmode)) + file := C.fdopen(C.int(out.Fd()), cmode) + i := C.ub_ctx_debugout(u.ctx, unsafe.Pointer(file)) + return newError(int(i)) +} + +// DebugLevel wraps Unbound's ub_ctx_data_level. +func (u *Unbound) DebugLevel(d int) error { + i := C.ub_ctx_debuglevel(u.ctx, C.int(d)) + return newError(int(i)) +} + +// Version wrap Ubounds's ub_version. Return the version of the Unbound +// library in as integers [major, minor, patch] +func (u *Unbound) Version() (version [3]int) { + // split the string on the dots + v := strings.SplitN(C.GoString(C.ub_version()), ".", 3) + if len(v) != 3 { + return + } + version[0], _ = strconv.Atoi(v[0]) + version[1], _ = strconv.Atoi(v[1]) + version[2], _ = strconv.Atoi(v[2]) + return +} diff --git a/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt b/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt new file mode 100644 index 00000000..079a934f --- /dev/null +++ b/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016-2020 Simone Carletti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go b/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go new file mode 100644 index 00000000..4b87105d --- /dev/null +++ b/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go @@ -0,0 +1,39 @@ +// Package publicsuffix is a drop-in replacement for the golang.org/x/net/publicsuffix +// based on the weppos/publicsuffix package. +package publicsuffix + +import ( + psl "github.com/weppos/publicsuffix-go/publicsuffix" +) + +// PublicSuffix returns the public suffix of the domain +// using a copy of the publicsuffix.org database packaged into this library. +// +// Note. To maintain compatibility with the golang.org/x/net/publicsuffix +// this method doesn't return an error. However, in case of error, +// the returned value is empty. +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + //d, err := psl.Parse(domain) + //if err != nil { + // return "", false + //} + // + //return d.Rule.Value, !d.Rule.Private + + rule := psl.DefaultList.Find(domain, nil) + publicSuffix = rule.Decompose(domain)[1] + icann = !rule.Private + + // x/net/publicsuffix sets icann to false when the default rule "*" is used + if rule.Value == "" && rule.Type == psl.WildcardType { + icann = false + } + + return +} + +// EffectiveTLDPlusOne returns the effective top level domain plus one more label. +// For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org". +func EffectiveTLDPlusOne(domain string) (string, error) { + return psl.Domain(domain) +} diff --git a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go new file mode 100644 index 00000000..c10e9427 --- /dev/null +++ b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go @@ -0,0 +1,544 @@ +//go:generate go run ../cmd/gen/gen.go + +// Package publicsuffix provides a domain name parser +// based on data from the public suffix list http://publicsuffix.org/. +// A public suffix is one under which Internet users can directly register names. +package publicsuffix + +import ( + "bufio" + "fmt" + "io" + "net/http/cookiejar" + "os" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // Version identifies the current library version. + // This is a pro forma convention given that Go dependencies + // tends to be fetched directly from the repo. + Version = "0.13.0" + + // NormalType represents a normal rule such as "com" + NormalType = 1 + // WildcardType represents a wildcard rule such as "*.com" + WildcardType = 2 + // ExceptionType represents an exception to a wildard rule + ExceptionType = 3 + + listTokenPrivateDomains = "===BEGIN PRIVATE DOMAINS===" + listTokenComment = "//" +) + +// DefaultList is the default List and it is used by Parse and Domain. +var DefaultList = NewList() + +// DefaultRule is the default Rule that represents "*". +var DefaultRule = MustNewRule("*") + +// DefaultParserOptions are the default options used to parse a Public Suffix list. +var DefaultParserOptions = &ParserOption{PrivateDomains: true, ASCIIEncoded: false} + +// DefaultFindOptions are the default options used to perform the lookup of rules in the list. +var DefaultFindOptions = &FindOptions{IgnorePrivate: false, DefaultRule: DefaultRule} + +// Rule represents a single rule in a Public Suffix List. +type Rule struct { + Type int + Value string + Length int + Private bool +} + +// ParserOption are the options you can use to customize the way a List +// is parsed from a file or a string. +type ParserOption struct { + // Set to false to skip the private domains when parsing. + // Default to true, which means the private domains are included. + PrivateDomains bool + + // Set to false if the input is encoded in U-labels (Unicode) + // as opposite to A-labels. + // Default to false, which means the list is containing Unicode domains. + // This is the default because the original PSL currently contains Unicode. + ASCIIEncoded bool +} + +// FindOptions are the options you can use to customize the way a Rule +// is searched within the list. +type FindOptions struct { + // Set to true to ignore the rules within the "Private" section of the Public Suffix List. + IgnorePrivate bool + + // The default rule to use when no rule matches the input. + // The format Public Suffix algorithm states that the rule "*" should be used when no other rule matches, + // but some consumers may have different needs. + DefaultRule *Rule +} + +// List represents a Public Suffix List. +type List struct { + // rules is kept private because you should not access rules directly + rules map[string]*Rule +} + +// NewList creates a new empty list. +func NewList() *List { + return &List{ + rules: map[string]*Rule{}, + } +} + +// NewListFromString parses a string that represents a Public Suffix source +// and returns a List initialized with the rules in the source. +func NewListFromString(src string, options *ParserOption) (*List, error) { + l := NewList() + _, err := l.LoadString(src, options) + return l, err +} + +// NewListFromFile parses a string that represents a Public Suffix source +// and returns a List initialized with the rules in the source. +func NewListFromFile(path string, options *ParserOption) (*List, error) { + l := NewList() + _, err := l.LoadFile(path, options) + return l, err +} + +// Load parses and loads a set of rules from an io.Reader into the current list. +func (l *List) Load(r io.Reader, options *ParserOption) ([]Rule, error) { + return l.parse(r, options) +} + +// LoadString parses and loads a set of rules from a String into the current list. +func (l *List) LoadString(src string, options *ParserOption) ([]Rule, error) { + r := strings.NewReader(src) + return l.parse(r, options) +} + +// LoadFile parses and loads a set of rules from a File into the current list. +func (l *List) LoadFile(path string, options *ParserOption) ([]Rule, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return l.parse(f, options) +} + +// AddRule adds a new rule to the list. +// +// The exact position of the rule into the list is unpredictable. +// The list may be optimized internally for lookups, therefore the algorithm +// will decide the best position for the new rule. +func (l *List) AddRule(r *Rule) error { + l.rules[r.Value] = r + return nil +} + +// Size returns the size of the list, which is the number of rules. +func (l *List) Size() int { + return len(l.rules) +} + +func (l *List) parse(r io.Reader, options *ParserOption) ([]Rule, error) { + if options == nil { + options = DefaultParserOptions + } + var rules []Rule + + scanner := bufio.NewScanner(r) + var section int // 1 == ICANN, 2 == PRIVATE + +Scanning: + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + switch { + + // skip blank lines + case line == "": + break + + // include private domains or stop scanner + case strings.Contains(line, listTokenPrivateDomains): + if !options.PrivateDomains { + break Scanning + } + section = 2 + + // skip comments + case strings.HasPrefix(line, listTokenComment): + break + + default: + var rule *Rule + var err error + + if options.ASCIIEncoded { + rule, err = NewRule(line) + } else { + rule, err = NewRuleUnicode(line) + } + if err != nil { + return []Rule{}, err + } + + rule.Private = (section == 2) + l.AddRule(rule) + rules = append(rules, *rule) + } + + } + + return rules, scanner.Err() +} + +// Find and returns the most appropriate rule for the domain name. +func (l *List) Find(name string, options *FindOptions) *Rule { + if options == nil { + options = DefaultFindOptions + } + + part := name + for { + rule, ok := l.rules[part] + + if ok && rule.Match(name) && !(options.IgnorePrivate && rule.Private) { + return rule + } + + i := strings.IndexRune(part, '.') + if i < 0 { + return options.DefaultRule + } + + part = part[i+1:] + } + +} + +// NewRule parses the rule content, creates and returns a Rule. +// +// The content of the rule MUST be encoded in ASCII (A-labels). +func NewRule(content string) (*Rule, error) { + var rule *Rule + var value string + + switch content[0:1] { + case "*": // wildcard + if content == "*" { + value = "" + } else { + value = content[2:] + } + rule = &Rule{Type: WildcardType, Value: value, Length: len(Labels(value)) + 1} + case "!": // exception + value = content[1:] + rule = &Rule{Type: ExceptionType, Value: value, Length: len(Labels(value))} + default: // normal + value = content + rule = &Rule{Type: NormalType, Value: value, Length: len(Labels(value))} + } + + return rule, nil +} + +// NewRuleUnicode is like NewRule, but expects the content to be encoded in Unicode (U-labels). +func NewRuleUnicode(content string) (*Rule, error) { + var err error + + content, err = ToASCII(content) + if err != nil { + return nil, err + } + + return NewRule(content) +} + +// MustNewRule is like NewRule, but panics if the content cannot be parsed. +func MustNewRule(content string) *Rule { + rule, err := NewRule(content) + if err != nil { + panic(err) + } + return rule +} + +// Match checks if the rule matches the name. +// +// A domain name is said to match a rule if and only if all of the following conditions are met: +// - When the domain and rule are split into corresponding labels, +// that the domain contains as many or more labels than the rule. +// - Beginning with the right-most labels of both the domain and the rule, +// and continuing for all labels in the rule, one finds that for every pair, +// either they are identical, or that the label from the rule is "*". +// +// See https://publicsuffix.org/list/ +func (r *Rule) Match(name string) bool { + left := strings.TrimSuffix(name, r.Value) + + // the name contains as many labels than the rule + // this is a match, unless it's a wildcard + // because the wildcard requires one more label + if left == "" { + return r.Type != WildcardType + } + + // if there is one more label, the rule match + // because either the rule is shorter than the domain + // or the rule is a wildcard and there is one more label + return left[len(left)-1:] == "." +} + +// Decompose takes a name as input and decomposes it into a tuple of , +// according to the rule definition and type. +func (r *Rule) Decompose(name string) (result [2]string) { + if r == DefaultRule { + i := strings.LastIndex(name, ".") + if i < 0 { + return + } + result[0], result[1] = name[:i], name[i+1:] + return + } + switch r.Type { + case NormalType: + name = strings.TrimSuffix(name, r.Value) + if len(name) == 0 { + return + } + result[0], result[1] = name[:len(name)-1], r.Value + case WildcardType: + name := strings.TrimSuffix(name, r.Value) + if len(name) == 0 { + return + } + name = name[:len(name)-1] + i := strings.LastIndex(name, ".") + if i < 0 { + return + } + result[0], result[1] = name[:i], name[i+1:]+"."+r.Value + case ExceptionType: + i := strings.IndexRune(r.Value, '.') + if i < 0 { + return + } + suffix := r.Value[i+1:] + name = strings.TrimSuffix(name, suffix) + if len(name) == 0 { + return + } + result[0], result[1] = name[:len(name)-1], suffix + } + return +} + +// Labels decomposes given domain name into labels, +// corresponding to the dot-separated tokens. +func Labels(name string) []string { + return strings.Split(name, ".") +} + +// DomainName represents a domain name. +type DomainName struct { + TLD string + SLD string + TRD string + Rule *Rule +} + +// String joins the components of the domain name into a single string. +// Empty labels are skipped. +// +// Examples: +// +// DomainName{"com", "example"}.String() +// // example.com +// DomainName{"com", "example", "www"}.String() +// // www.example.com +// +func (d *DomainName) String() string { + switch { + case d.TLD == "": + return "" + case d.SLD == "": + return d.TLD + case d.TRD == "": + return d.SLD + "." + d.TLD + default: + return d.TRD + "." + d.SLD + "." + d.TLD + } +} + +// Domain extract and return the domain name from the input +// using the default (Public Suffix) List. +// +// Examples: +// +// publicsuffix.Domain("example.com") +// // example.com +// publicsuffix.Domain("www.example.com") +// // example.com +// publicsuffix.Domain("www.example.co.uk") +// // example.co.uk +// +func Domain(name string) (string, error) { + return DomainFromListWithOptions(DefaultList, name, DefaultFindOptions) +} + +// Parse decomposes the name into TLD, SLD, TRD +// using the default (Public Suffix) List, +// and returns the result as a DomainName +// +// Examples: +// +// list := NewList() +// +// publicsuffix.Parse("example.com") +// // &DomainName{"com", "example"} +// publicsuffix.Parse("www.example.com") +// // &DomainName{"com", "example", "www"} +// publicsuffix.Parse("www.example.co.uk") +// // &DomainName{"co.uk", "example"} +// +func Parse(name string) (*DomainName, error) { + return ParseFromListWithOptions(DefaultList, name, DefaultFindOptions) +} + +// DomainFromListWithOptions extract and return the domain name from the input +// using the (Public Suffix) list passed as argument. +// +// Examples: +// +// list := NewList() +// +// publicsuffix.DomainFromListWithOptions(list, "example.com") +// // example.com +// publicsuffix.DomainFromListWithOptions(list, "www.example.com") +// // example.com +// publicsuffix.DomainFromListWithOptions(list, "www.example.co.uk") +// // example.co.uk +// +func DomainFromListWithOptions(l *List, name string, options *FindOptions) (string, error) { + dn, err := ParseFromListWithOptions(l, name, options) + if err != nil { + return "", err + } + return dn.SLD + "." + dn.TLD, nil +} + +// ParseFromListWithOptions decomposes the name into TLD, SLD, TRD +// using the (Public Suffix) list passed as argument, +// and returns the result as a DomainName +// +// Examples: +// +// list := NewList() +// +// publicsuffix.ParseFromListWithOptions(list, "example.com") +// // &DomainName{"com", "example"} +// publicsuffix.ParseFromListWithOptions(list, "www.example.com") +// // &DomainName{"com", "example", "www"} +// publicsuffix.ParseFromListWithOptions(list, "www.example.co.uk") +// // &DomainName{"co.uk", "example"} +// +func ParseFromListWithOptions(l *List, name string, options *FindOptions) (*DomainName, error) { + n, err := normalize(name) + if err != nil { + return nil, err + } + + r := l.Find(n, options) + if r == nil { + return nil, fmt.Errorf("no rule matching name %s", name) + } + + parts := r.Decompose(n) + left, tld := parts[0], parts[1] + if tld == "" { + return nil, fmt.Errorf("%s is a suffix", n) + } + + dn := &DomainName{ + Rule: r, + TLD: tld, + } + if i := strings.LastIndex(left, "."); i < 0 { + dn.SLD = left + } else { + dn.TRD = left[:i] + dn.SLD = left[i+1:] + } + return dn, nil +} + +func normalize(name string) (string, error) { + ret := strings.ToLower(name) + + if ret == "" { + return "", fmt.Errorf("name is blank") + } + if ret[0] == '.' { + return "", fmt.Errorf("name %s starts with a dot", ret) + } + + return ret, nil +} + +// ToASCII is a wrapper for idna.ToASCII. +// +// This wrapper exists because idna.ToASCII backward-compatibility was broken twice in few months +// and I can't call this package directly anymore. The wrapper performs some terrible-but-necessary +// before-after replacements to make sure an already ASCII input always results in the same output +// even if passed through ToASCII. +// +// See golang/net@67957fd0b1, golang/net@f2499483f9, golang/net@78ebe5c8b6, +// and weppos/publicsuffix-go#66. +func ToASCII(s string) (string, error) { + // .example.com should be .example.com + // ..example.com should be ..example.com + if strings.HasPrefix(s, ".") { + dotIndex := 0 + for i := 0; i < len(s); i++ { + if s[i] == '.' { + dotIndex = i + } else { + break + } + } + out, err := idna.ToASCII(s[dotIndex+1:]) + out = s[:dotIndex+1] + out + return out, err + } + + return idna.ToASCII(s) +} + +// ToUnicode is a wrapper for idna.ToUnicode. +// +// See ToASCII for more details about why this wrapper exists. +func ToUnicode(s string) (string, error) { + return idna.ToUnicode(s) +} + +// CookieJarList implements the cookiejar.PublicSuffixList interface. +var CookieJarList cookiejar.PublicSuffixList = cookiejarList{DefaultList} + +type cookiejarList struct { + List *List +} + +// PublicSuffix implements cookiejar.PublicSuffixList. +func (l cookiejarList) PublicSuffix(domain string) string { + rule := l.List.Find(domain, nil) + return rule.Decompose(domain)[1] +} + +// PublicSuffix implements cookiejar.String. +func (cookiejarList) String() string { + return defaultListVersion +} diff --git a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go new file mode 100644 index 00000000..bb20a8ba --- /dev/null +++ b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go @@ -0,0 +1,8847 @@ +// This file is automatically generated +// Run "go run cmd/gen/gen.go" to update the list. + +package publicsuffix + +const defaultListVersion = "PSL version e7e340 (Sat Feb 15 21:59:27 2020)" + +func DefaultRules() [8828]Rule { + return r +} + +var r = [8828]Rule{ + {1, "ac", 1, false}, + {1, "com.ac", 2, false}, + {1, "edu.ac", 2, false}, + {1, "gov.ac", 2, false}, + {1, "net.ac", 2, false}, + {1, "mil.ac", 2, false}, + {1, "org.ac", 2, false}, + {1, "ad", 1, false}, + {1, "nom.ad", 2, false}, + {1, "ae", 1, false}, + {1, "co.ae", 2, false}, + {1, "net.ae", 2, false}, + {1, "org.ae", 2, false}, + {1, "sch.ae", 2, false}, + {1, "ac.ae", 2, false}, + {1, "gov.ae", 2, false}, + {1, "mil.ae", 2, false}, + {1, "aero", 1, false}, + {1, "accident-investigation.aero", 2, false}, + {1, "accident-prevention.aero", 2, false}, + {1, "aerobatic.aero", 2, false}, + {1, "aeroclub.aero", 2, false}, + {1, "aerodrome.aero", 2, false}, + {1, "agents.aero", 2, false}, + {1, "aircraft.aero", 2, false}, + {1, "airline.aero", 2, false}, + {1, "airport.aero", 2, false}, + {1, "air-surveillance.aero", 2, false}, + {1, "airtraffic.aero", 2, false}, + {1, "air-traffic-control.aero", 2, false}, + {1, "ambulance.aero", 2, false}, + {1, "amusement.aero", 2, false}, + {1, "association.aero", 2, false}, + {1, "author.aero", 2, false}, + {1, "ballooning.aero", 2, false}, + {1, "broker.aero", 2, false}, + {1, "caa.aero", 2, false}, + {1, "cargo.aero", 2, false}, + {1, "catering.aero", 2, false}, + {1, "certification.aero", 2, false}, + {1, "championship.aero", 2, false}, + {1, "charter.aero", 2, false}, + {1, "civilaviation.aero", 2, false}, + {1, "club.aero", 2, false}, + {1, "conference.aero", 2, false}, + {1, "consultant.aero", 2, false}, + {1, "consulting.aero", 2, false}, + {1, "control.aero", 2, false}, + {1, "council.aero", 2, false}, + {1, "crew.aero", 2, false}, + {1, "design.aero", 2, false}, + {1, "dgca.aero", 2, false}, + {1, "educator.aero", 2, false}, + {1, "emergency.aero", 2, false}, + {1, "engine.aero", 2, false}, + {1, "engineer.aero", 2, false}, + {1, "entertainment.aero", 2, false}, + {1, "equipment.aero", 2, false}, + {1, "exchange.aero", 2, false}, + {1, "express.aero", 2, false}, + {1, "federation.aero", 2, false}, + {1, "flight.aero", 2, false}, + {1, "freight.aero", 2, false}, + {1, "fuel.aero", 2, false}, + {1, "gliding.aero", 2, false}, + {1, "government.aero", 2, false}, + {1, "groundhandling.aero", 2, false}, + {1, "group.aero", 2, false}, + {1, "hanggliding.aero", 2, false}, + {1, "homebuilt.aero", 2, false}, + {1, "insurance.aero", 2, false}, + {1, "journal.aero", 2, false}, + {1, "journalist.aero", 2, false}, + {1, "leasing.aero", 2, false}, + {1, "logistics.aero", 2, false}, + {1, "magazine.aero", 2, false}, + {1, "maintenance.aero", 2, false}, + {1, "media.aero", 2, false}, + {1, "microlight.aero", 2, false}, + {1, "modelling.aero", 2, false}, + {1, "navigation.aero", 2, false}, + {1, "parachuting.aero", 2, false}, + {1, "paragliding.aero", 2, false}, + {1, "passenger-association.aero", 2, false}, + {1, "pilot.aero", 2, false}, + {1, "press.aero", 2, false}, + {1, "production.aero", 2, false}, + {1, "recreation.aero", 2, false}, + {1, "repbody.aero", 2, false}, + {1, "res.aero", 2, false}, + {1, "research.aero", 2, false}, + {1, "rotorcraft.aero", 2, false}, + {1, "safety.aero", 2, false}, + {1, "scientist.aero", 2, false}, + {1, "services.aero", 2, false}, + {1, "show.aero", 2, false}, + {1, "skydiving.aero", 2, false}, + {1, "software.aero", 2, false}, + {1, "student.aero", 2, false}, + {1, "trader.aero", 2, false}, + {1, "trading.aero", 2, false}, + {1, "trainer.aero", 2, false}, + {1, "union.aero", 2, false}, + {1, "workinggroup.aero", 2, false}, + {1, "works.aero", 2, false}, + {1, "af", 1, false}, + {1, "gov.af", 2, false}, + {1, "com.af", 2, false}, + {1, "org.af", 2, false}, + {1, "net.af", 2, false}, + {1, "edu.af", 2, false}, + {1, "ag", 1, false}, + {1, "com.ag", 2, false}, + {1, "org.ag", 2, false}, + {1, "net.ag", 2, false}, + {1, "co.ag", 2, false}, + {1, "nom.ag", 2, false}, + {1, "ai", 1, false}, + {1, "off.ai", 2, false}, + {1, "com.ai", 2, false}, + {1, "net.ai", 2, false}, + {1, "org.ai", 2, false}, + {1, "al", 1, false}, + {1, "com.al", 2, false}, + {1, "edu.al", 2, false}, + {1, "gov.al", 2, false}, + {1, "mil.al", 2, false}, + {1, "net.al", 2, false}, + {1, "org.al", 2, false}, + {1, "am", 1, false}, + {1, "co.am", 2, false}, + {1, "com.am", 2, false}, + {1, "commune.am", 2, false}, + {1, "net.am", 2, false}, + {1, "org.am", 2, false}, + {1, "ao", 1, false}, + {1, "ed.ao", 2, false}, + {1, "gv.ao", 2, false}, + {1, "og.ao", 2, false}, + {1, "co.ao", 2, false}, + {1, "pb.ao", 2, false}, + {1, "it.ao", 2, false}, + {1, "aq", 1, false}, + {1, "ar", 1, false}, + {1, "com.ar", 2, false}, + {1, "edu.ar", 2, false}, + {1, "gob.ar", 2, false}, + {1, "gov.ar", 2, false}, + {1, "int.ar", 2, false}, + {1, "mil.ar", 2, false}, + {1, "musica.ar", 2, false}, + {1, "net.ar", 2, false}, + {1, "org.ar", 2, false}, + {1, "tur.ar", 2, false}, + {1, "arpa", 1, false}, + {1, "e164.arpa", 2, false}, + {1, "in-addr.arpa", 2, false}, + {1, "ip6.arpa", 2, false}, + {1, "iris.arpa", 2, false}, + {1, "uri.arpa", 2, false}, + {1, "urn.arpa", 2, false}, + {1, "as", 1, false}, + {1, "gov.as", 2, false}, + {1, "asia", 1, false}, + {1, "at", 1, false}, + {1, "ac.at", 2, false}, + {1, "co.at", 2, false}, + {1, "gv.at", 2, false}, + {1, "or.at", 2, false}, + {1, "au", 1, false}, + {1, "com.au", 2, false}, + {1, "net.au", 2, false}, + {1, "org.au", 2, false}, + {1, "edu.au", 2, false}, + {1, "gov.au", 2, false}, + {1, "asn.au", 2, false}, + {1, "id.au", 2, false}, + {1, "info.au", 2, false}, + {1, "conf.au", 2, false}, + {1, "oz.au", 2, false}, + {1, "act.au", 2, false}, + {1, "nsw.au", 2, false}, + {1, "nt.au", 2, false}, + {1, "qld.au", 2, false}, + {1, "sa.au", 2, false}, + {1, "tas.au", 2, false}, + {1, "vic.au", 2, false}, + {1, "wa.au", 2, false}, + {1, "act.edu.au", 3, false}, + {1, "catholic.edu.au", 3, false}, + {1, "nsw.edu.au", 3, false}, + {1, "nt.edu.au", 3, false}, + {1, "qld.edu.au", 3, false}, + {1, "sa.edu.au", 3, false}, + {1, "tas.edu.au", 3, false}, + {1, "vic.edu.au", 3, false}, + {1, "wa.edu.au", 3, false}, + {1, "qld.gov.au", 3, false}, + {1, "sa.gov.au", 3, false}, + {1, "tas.gov.au", 3, false}, + {1, "vic.gov.au", 3, false}, + {1, "wa.gov.au", 3, false}, + {1, "education.tas.edu.au", 4, false}, + {1, "schools.nsw.edu.au", 4, false}, + {1, "aw", 1, false}, + {1, "com.aw", 2, false}, + {1, "ax", 1, false}, + {1, "az", 1, false}, + {1, "com.az", 2, false}, + {1, "net.az", 2, false}, + {1, "int.az", 2, false}, + {1, "gov.az", 2, false}, + {1, "org.az", 2, false}, + {1, "edu.az", 2, false}, + {1, "info.az", 2, false}, + {1, "pp.az", 2, false}, + {1, "mil.az", 2, false}, + {1, "name.az", 2, false}, + {1, "pro.az", 2, false}, + {1, "biz.az", 2, false}, + {1, "ba", 1, false}, + {1, "com.ba", 2, false}, + {1, "edu.ba", 2, false}, + {1, "gov.ba", 2, false}, + {1, "mil.ba", 2, false}, + {1, "net.ba", 2, false}, + {1, "org.ba", 2, false}, + {1, "bb", 1, false}, + {1, "biz.bb", 2, false}, + {1, "co.bb", 2, false}, + {1, "com.bb", 2, false}, + {1, "edu.bb", 2, false}, + {1, "gov.bb", 2, false}, + {1, "info.bb", 2, false}, + {1, "net.bb", 2, false}, + {1, "org.bb", 2, false}, + {1, "store.bb", 2, false}, + {1, "tv.bb", 2, false}, + {2, "bd", 2, false}, + {1, "be", 1, false}, + {1, "ac.be", 2, false}, + {1, "bf", 1, false}, + {1, "gov.bf", 2, false}, + {1, "bg", 1, false}, + {1, "a.bg", 2, false}, + {1, "b.bg", 2, false}, + {1, "c.bg", 2, false}, + {1, "d.bg", 2, false}, + {1, "e.bg", 2, false}, + {1, "f.bg", 2, false}, + {1, "g.bg", 2, false}, + {1, "h.bg", 2, false}, + {1, "i.bg", 2, false}, + {1, "j.bg", 2, false}, + {1, "k.bg", 2, false}, + {1, "l.bg", 2, false}, + {1, "m.bg", 2, false}, + {1, "n.bg", 2, false}, + {1, "o.bg", 2, false}, + {1, "p.bg", 2, false}, + {1, "q.bg", 2, false}, + {1, "r.bg", 2, false}, + {1, "s.bg", 2, false}, + {1, "t.bg", 2, false}, + {1, "u.bg", 2, false}, + {1, "v.bg", 2, false}, + {1, "w.bg", 2, false}, + {1, "x.bg", 2, false}, + {1, "y.bg", 2, false}, + {1, "z.bg", 2, false}, + {1, "0.bg", 2, false}, + {1, "1.bg", 2, false}, + {1, "2.bg", 2, false}, + {1, "3.bg", 2, false}, + {1, "4.bg", 2, false}, + {1, "5.bg", 2, false}, + {1, "6.bg", 2, false}, + {1, "7.bg", 2, false}, + {1, "8.bg", 2, false}, + {1, "9.bg", 2, false}, + {1, "bh", 1, false}, + {1, "com.bh", 2, false}, + {1, "edu.bh", 2, false}, + {1, "net.bh", 2, false}, + {1, "org.bh", 2, false}, + {1, "gov.bh", 2, false}, + {1, "bi", 1, false}, + {1, "co.bi", 2, false}, + {1, "com.bi", 2, false}, + {1, "edu.bi", 2, false}, + {1, "or.bi", 2, false}, + {1, "org.bi", 2, false}, + {1, "biz", 1, false}, + {1, "bj", 1, false}, + {1, "asso.bj", 2, false}, + {1, "barreau.bj", 2, false}, + {1, "gouv.bj", 2, false}, + {1, "bm", 1, false}, + {1, "com.bm", 2, false}, + {1, "edu.bm", 2, false}, + {1, "gov.bm", 2, false}, + {1, "net.bm", 2, false}, + {1, "org.bm", 2, false}, + {1, "bn", 1, false}, + {1, "com.bn", 2, false}, + {1, "edu.bn", 2, false}, + {1, "gov.bn", 2, false}, + {1, "net.bn", 2, false}, + {1, "org.bn", 2, false}, + {1, "bo", 1, false}, + {1, "com.bo", 2, false}, + {1, "edu.bo", 2, false}, + {1, "gob.bo", 2, false}, + {1, "int.bo", 2, false}, + {1, "org.bo", 2, false}, + {1, "net.bo", 2, false}, + {1, "mil.bo", 2, false}, + {1, "tv.bo", 2, false}, + {1, "web.bo", 2, false}, + {1, "academia.bo", 2, false}, + {1, "agro.bo", 2, false}, + {1, "arte.bo", 2, false}, + {1, "blog.bo", 2, false}, + {1, "bolivia.bo", 2, false}, + {1, "ciencia.bo", 2, false}, + {1, "cooperativa.bo", 2, false}, + {1, "democracia.bo", 2, false}, + {1, "deporte.bo", 2, false}, + {1, "ecologia.bo", 2, false}, + {1, "economia.bo", 2, false}, + {1, "empresa.bo", 2, false}, + {1, "indigena.bo", 2, false}, + {1, "industria.bo", 2, false}, + {1, "info.bo", 2, false}, + {1, "medicina.bo", 2, false}, + {1, "movimiento.bo", 2, false}, + {1, "musica.bo", 2, false}, + {1, "natural.bo", 2, false}, + {1, "nombre.bo", 2, false}, + {1, "noticias.bo", 2, false}, + {1, "patria.bo", 2, false}, + {1, "politica.bo", 2, false}, + {1, "profesional.bo", 2, false}, + {1, "plurinacional.bo", 2, false}, + {1, "pueblo.bo", 2, false}, + {1, "revista.bo", 2, false}, + {1, "salud.bo", 2, false}, + {1, "tecnologia.bo", 2, false}, + {1, "tksat.bo", 2, false}, + {1, "transporte.bo", 2, false}, + {1, "wiki.bo", 2, false}, + {1, "br", 1, false}, + {1, "9guacu.br", 2, false}, + {1, "abc.br", 2, false}, + {1, "adm.br", 2, false}, + {1, "adv.br", 2, false}, + {1, "agr.br", 2, false}, + {1, "aju.br", 2, false}, + {1, "am.br", 2, false}, + {1, "anani.br", 2, false}, + {1, "aparecida.br", 2, false}, + {1, "arq.br", 2, false}, + {1, "art.br", 2, false}, + {1, "ato.br", 2, false}, + {1, "b.br", 2, false}, + {1, "barueri.br", 2, false}, + {1, "belem.br", 2, false}, + {1, "bhz.br", 2, false}, + {1, "bio.br", 2, false}, + {1, "blog.br", 2, false}, + {1, "bmd.br", 2, false}, + {1, "boavista.br", 2, false}, + {1, "bsb.br", 2, false}, + {1, "campinagrande.br", 2, false}, + {1, "campinas.br", 2, false}, + {1, "caxias.br", 2, false}, + {1, "cim.br", 2, false}, + {1, "cng.br", 2, false}, + {1, "cnt.br", 2, false}, + {1, "com.br", 2, false}, + {1, "contagem.br", 2, false}, + {1, "coop.br", 2, false}, + {1, "cri.br", 2, false}, + {1, "cuiaba.br", 2, false}, + {1, "curitiba.br", 2, false}, + {1, "def.br", 2, false}, + {1, "ecn.br", 2, false}, + {1, "eco.br", 2, false}, + {1, "edu.br", 2, false}, + {1, "emp.br", 2, false}, + {1, "eng.br", 2, false}, + {1, "esp.br", 2, false}, + {1, "etc.br", 2, false}, + {1, "eti.br", 2, false}, + {1, "far.br", 2, false}, + {1, "feira.br", 2, false}, + {1, "flog.br", 2, false}, + {1, "floripa.br", 2, false}, + {1, "fm.br", 2, false}, + {1, "fnd.br", 2, false}, + {1, "fortal.br", 2, false}, + {1, "fot.br", 2, false}, + {1, "foz.br", 2, false}, + {1, "fst.br", 2, false}, + {1, "g12.br", 2, false}, + {1, "ggf.br", 2, false}, + {1, "goiania.br", 2, false}, + {1, "gov.br", 2, false}, + {1, "ac.gov.br", 3, false}, + {1, "al.gov.br", 3, false}, + {1, "am.gov.br", 3, false}, + {1, "ap.gov.br", 3, false}, + {1, "ba.gov.br", 3, false}, + {1, "ce.gov.br", 3, false}, + {1, "df.gov.br", 3, false}, + {1, "es.gov.br", 3, false}, + {1, "go.gov.br", 3, false}, + {1, "ma.gov.br", 3, false}, + {1, "mg.gov.br", 3, false}, + {1, "ms.gov.br", 3, false}, + {1, "mt.gov.br", 3, false}, + {1, "pa.gov.br", 3, false}, + {1, "pb.gov.br", 3, false}, + {1, "pe.gov.br", 3, false}, + {1, "pi.gov.br", 3, false}, + {1, "pr.gov.br", 3, false}, + {1, "rj.gov.br", 3, false}, + {1, "rn.gov.br", 3, false}, + {1, "ro.gov.br", 3, false}, + {1, "rr.gov.br", 3, false}, + {1, "rs.gov.br", 3, false}, + {1, "sc.gov.br", 3, false}, + {1, "se.gov.br", 3, false}, + {1, "sp.gov.br", 3, false}, + {1, "to.gov.br", 3, false}, + {1, "gru.br", 2, false}, + {1, "imb.br", 2, false}, + {1, "ind.br", 2, false}, + {1, "inf.br", 2, false}, + {1, "jab.br", 2, false}, + {1, "jampa.br", 2, false}, + {1, "jdf.br", 2, false}, + {1, "joinville.br", 2, false}, + {1, "jor.br", 2, false}, + {1, "jus.br", 2, false}, + {1, "leg.br", 2, false}, + {1, "lel.br", 2, false}, + {1, "londrina.br", 2, false}, + {1, "macapa.br", 2, false}, + {1, "maceio.br", 2, false}, + {1, "manaus.br", 2, false}, + {1, "maringa.br", 2, false}, + {1, "mat.br", 2, false}, + {1, "med.br", 2, false}, + {1, "mil.br", 2, false}, + {1, "morena.br", 2, false}, + {1, "mp.br", 2, false}, + {1, "mus.br", 2, false}, + {1, "natal.br", 2, false}, + {1, "net.br", 2, false}, + {1, "niteroi.br", 2, false}, + {2, "nom.br", 3, false}, + {1, "not.br", 2, false}, + {1, "ntr.br", 2, false}, + {1, "odo.br", 2, false}, + {1, "ong.br", 2, false}, + {1, "org.br", 2, false}, + {1, "osasco.br", 2, false}, + {1, "palmas.br", 2, false}, + {1, "poa.br", 2, false}, + {1, "ppg.br", 2, false}, + {1, "pro.br", 2, false}, + {1, "psc.br", 2, false}, + {1, "psi.br", 2, false}, + {1, "pvh.br", 2, false}, + {1, "qsl.br", 2, false}, + {1, "radio.br", 2, false}, + {1, "rec.br", 2, false}, + {1, "recife.br", 2, false}, + {1, "ribeirao.br", 2, false}, + {1, "rio.br", 2, false}, + {1, "riobranco.br", 2, false}, + {1, "riopreto.br", 2, false}, + {1, "salvador.br", 2, false}, + {1, "sampa.br", 2, false}, + {1, "santamaria.br", 2, false}, + {1, "santoandre.br", 2, false}, + {1, "saobernardo.br", 2, false}, + {1, "saogonca.br", 2, false}, + {1, "sjc.br", 2, false}, + {1, "slg.br", 2, false}, + {1, "slz.br", 2, false}, + {1, "sorocaba.br", 2, false}, + {1, "srv.br", 2, false}, + {1, "taxi.br", 2, false}, + {1, "tc.br", 2, false}, + {1, "teo.br", 2, false}, + {1, "the.br", 2, false}, + {1, "tmp.br", 2, false}, + {1, "trd.br", 2, false}, + {1, "tur.br", 2, false}, + {1, "tv.br", 2, false}, + {1, "udi.br", 2, false}, + {1, "vet.br", 2, false}, + {1, "vix.br", 2, false}, + {1, "vlog.br", 2, false}, + {1, "wiki.br", 2, false}, + {1, "zlg.br", 2, false}, + {1, "bs", 1, false}, + {1, "com.bs", 2, false}, + {1, "net.bs", 2, false}, + {1, "org.bs", 2, false}, + {1, "edu.bs", 2, false}, + {1, "gov.bs", 2, false}, + {1, "bt", 1, false}, + {1, "com.bt", 2, false}, + {1, "edu.bt", 2, false}, + {1, "gov.bt", 2, false}, + {1, "net.bt", 2, false}, + {1, "org.bt", 2, false}, + {1, "bv", 1, false}, + {1, "bw", 1, false}, + {1, "co.bw", 2, false}, + {1, "org.bw", 2, false}, + {1, "by", 1, false}, + {1, "gov.by", 2, false}, + {1, "mil.by", 2, false}, + {1, "com.by", 2, false}, + {1, "of.by", 2, false}, + {1, "bz", 1, false}, + {1, "com.bz", 2, false}, + {1, "net.bz", 2, false}, + {1, "org.bz", 2, false}, + {1, "edu.bz", 2, false}, + {1, "gov.bz", 2, false}, + {1, "ca", 1, false}, + {1, "ab.ca", 2, false}, + {1, "bc.ca", 2, false}, + {1, "mb.ca", 2, false}, + {1, "nb.ca", 2, false}, + {1, "nf.ca", 2, false}, + {1, "nl.ca", 2, false}, + {1, "ns.ca", 2, false}, + {1, "nt.ca", 2, false}, + {1, "nu.ca", 2, false}, + {1, "on.ca", 2, false}, + {1, "pe.ca", 2, false}, + {1, "qc.ca", 2, false}, + {1, "sk.ca", 2, false}, + {1, "yk.ca", 2, false}, + {1, "gc.ca", 2, false}, + {1, "cat", 1, false}, + {1, "cc", 1, false}, + {1, "cd", 1, false}, + {1, "gov.cd", 2, false}, + {1, "cf", 1, false}, + {1, "cg", 1, false}, + {1, "ch", 1, false}, + {1, "ci", 1, false}, + {1, "org.ci", 2, false}, + {1, "or.ci", 2, false}, + {1, "com.ci", 2, false}, + {1, "co.ci", 2, false}, + {1, "edu.ci", 2, false}, + {1, "ed.ci", 2, false}, + {1, "ac.ci", 2, false}, + {1, "net.ci", 2, false}, + {1, "go.ci", 2, false}, + {1, "asso.ci", 2, false}, + {1, "xn--aroport-bya.ci", 2, false}, + {1, "int.ci", 2, false}, + {1, "presse.ci", 2, false}, + {1, "md.ci", 2, false}, + {1, "gouv.ci", 2, false}, + {2, "ck", 2, false}, + {3, "www.ck", 2, false}, + {1, "cl", 1, false}, + {1, "gov.cl", 2, false}, + {1, "gob.cl", 2, false}, + {1, "co.cl", 2, false}, + {1, "mil.cl", 2, false}, + {1, "cm", 1, false}, + {1, "co.cm", 2, false}, + {1, "com.cm", 2, false}, + {1, "gov.cm", 2, false}, + {1, "net.cm", 2, false}, + {1, "cn", 1, false}, + {1, "ac.cn", 2, false}, + {1, "com.cn", 2, false}, + {1, "edu.cn", 2, false}, + {1, "gov.cn", 2, false}, + {1, "net.cn", 2, false}, + {1, "org.cn", 2, false}, + {1, "mil.cn", 2, false}, + {1, "xn--55qx5d.cn", 2, false}, + {1, "xn--io0a7i.cn", 2, false}, + {1, "xn--od0alg.cn", 2, false}, + {1, "ah.cn", 2, false}, + {1, "bj.cn", 2, false}, + {1, "cq.cn", 2, false}, + {1, "fj.cn", 2, false}, + {1, "gd.cn", 2, false}, + {1, "gs.cn", 2, false}, + {1, "gz.cn", 2, false}, + {1, "gx.cn", 2, false}, + {1, "ha.cn", 2, false}, + {1, "hb.cn", 2, false}, + {1, "he.cn", 2, false}, + {1, "hi.cn", 2, false}, + {1, "hl.cn", 2, false}, + {1, "hn.cn", 2, false}, + {1, "jl.cn", 2, false}, + {1, "js.cn", 2, false}, + {1, "jx.cn", 2, false}, + {1, "ln.cn", 2, false}, + {1, "nm.cn", 2, false}, + {1, "nx.cn", 2, false}, + {1, "qh.cn", 2, false}, + {1, "sc.cn", 2, false}, + {1, "sd.cn", 2, false}, + {1, "sh.cn", 2, false}, + {1, "sn.cn", 2, false}, + {1, "sx.cn", 2, false}, + {1, "tj.cn", 2, false}, + {1, "xj.cn", 2, false}, + {1, "xz.cn", 2, false}, + {1, "yn.cn", 2, false}, + {1, "zj.cn", 2, false}, + {1, "hk.cn", 2, false}, + {1, "mo.cn", 2, false}, + {1, "tw.cn", 2, false}, + {1, "co", 1, false}, + {1, "arts.co", 2, false}, + {1, "com.co", 2, false}, + {1, "edu.co", 2, false}, + {1, "firm.co", 2, false}, + {1, "gov.co", 2, false}, + {1, "info.co", 2, false}, + {1, "int.co", 2, false}, + {1, "mil.co", 2, false}, + {1, "net.co", 2, false}, + {1, "nom.co", 2, false}, + {1, "org.co", 2, false}, + {1, "rec.co", 2, false}, + {1, "web.co", 2, false}, + {1, "com", 1, false}, + {1, "coop", 1, false}, + {1, "cr", 1, false}, + {1, "ac.cr", 2, false}, + {1, "co.cr", 2, false}, + {1, "ed.cr", 2, false}, + {1, "fi.cr", 2, false}, + {1, "go.cr", 2, false}, + {1, "or.cr", 2, false}, + {1, "sa.cr", 2, false}, + {1, "cu", 1, false}, + {1, "com.cu", 2, false}, + {1, "edu.cu", 2, false}, + {1, "org.cu", 2, false}, + {1, "net.cu", 2, false}, + {1, "gov.cu", 2, false}, + {1, "inf.cu", 2, false}, + {1, "cv", 1, false}, + {1, "cw", 1, false}, + {1, "com.cw", 2, false}, + {1, "edu.cw", 2, false}, + {1, "net.cw", 2, false}, + {1, "org.cw", 2, false}, + {1, "cx", 1, false}, + {1, "gov.cx", 2, false}, + {1, "cy", 1, false}, + {1, "ac.cy", 2, false}, + {1, "biz.cy", 2, false}, + {1, "com.cy", 2, false}, + {1, "ekloges.cy", 2, false}, + {1, "gov.cy", 2, false}, + {1, "ltd.cy", 2, false}, + {1, "name.cy", 2, false}, + {1, "net.cy", 2, false}, + {1, "org.cy", 2, false}, + {1, "parliament.cy", 2, false}, + {1, "press.cy", 2, false}, + {1, "pro.cy", 2, false}, + {1, "tm.cy", 2, false}, + {1, "cz", 1, false}, + {1, "de", 1, false}, + {1, "dj", 1, false}, + {1, "dk", 1, false}, + {1, "dm", 1, false}, + {1, "com.dm", 2, false}, + {1, "net.dm", 2, false}, + {1, "org.dm", 2, false}, + {1, "edu.dm", 2, false}, + {1, "gov.dm", 2, false}, + {1, "do", 1, false}, + {1, "art.do", 2, false}, + {1, "com.do", 2, false}, + {1, "edu.do", 2, false}, + {1, "gob.do", 2, false}, + {1, "gov.do", 2, false}, + {1, "mil.do", 2, false}, + {1, "net.do", 2, false}, + {1, "org.do", 2, false}, + {1, "sld.do", 2, false}, + {1, "web.do", 2, false}, + {1, "dz", 1, false}, + {1, "com.dz", 2, false}, + {1, "org.dz", 2, false}, + {1, "net.dz", 2, false}, + {1, "gov.dz", 2, false}, + {1, "edu.dz", 2, false}, + {1, "asso.dz", 2, false}, + {1, "pol.dz", 2, false}, + {1, "art.dz", 2, false}, + {1, "ec", 1, false}, + {1, "com.ec", 2, false}, + {1, "info.ec", 2, false}, + {1, "net.ec", 2, false}, + {1, "fin.ec", 2, false}, + {1, "k12.ec", 2, false}, + {1, "med.ec", 2, false}, + {1, "pro.ec", 2, false}, + {1, "org.ec", 2, false}, + {1, "edu.ec", 2, false}, + {1, "gov.ec", 2, false}, + {1, "gob.ec", 2, false}, + {1, "mil.ec", 2, false}, + {1, "edu", 1, false}, + {1, "ee", 1, false}, + {1, "edu.ee", 2, false}, + {1, "gov.ee", 2, false}, + {1, "riik.ee", 2, false}, + {1, "lib.ee", 2, false}, + {1, "med.ee", 2, false}, + {1, "com.ee", 2, false}, + {1, "pri.ee", 2, false}, + {1, "aip.ee", 2, false}, + {1, "org.ee", 2, false}, + {1, "fie.ee", 2, false}, + {1, "eg", 1, false}, + {1, "com.eg", 2, false}, + {1, "edu.eg", 2, false}, + {1, "eun.eg", 2, false}, + {1, "gov.eg", 2, false}, + {1, "mil.eg", 2, false}, + {1, "name.eg", 2, false}, + {1, "net.eg", 2, false}, + {1, "org.eg", 2, false}, + {1, "sci.eg", 2, false}, + {2, "er", 2, false}, + {1, "es", 1, false}, + {1, "com.es", 2, false}, + {1, "nom.es", 2, false}, + {1, "org.es", 2, false}, + {1, "gob.es", 2, false}, + {1, "edu.es", 2, false}, + {1, "et", 1, false}, + {1, "com.et", 2, false}, + {1, "gov.et", 2, false}, + {1, "org.et", 2, false}, + {1, "edu.et", 2, false}, + {1, "biz.et", 2, false}, + {1, "name.et", 2, false}, + {1, "info.et", 2, false}, + {1, "net.et", 2, false}, + {1, "eu", 1, false}, + {1, "fi", 1, false}, + {1, "aland.fi", 2, false}, + {1, "fj", 1, false}, + {1, "ac.fj", 2, false}, + {1, "biz.fj", 2, false}, + {1, "com.fj", 2, false}, + {1, "gov.fj", 2, false}, + {1, "info.fj", 2, false}, + {1, "mil.fj", 2, false}, + {1, "name.fj", 2, false}, + {1, "net.fj", 2, false}, + {1, "org.fj", 2, false}, + {1, "pro.fj", 2, false}, + {2, "fk", 2, false}, + {1, "fm", 1, false}, + {1, "fo", 1, false}, + {1, "fr", 1, false}, + {1, "asso.fr", 2, false}, + {1, "com.fr", 2, false}, + {1, "gouv.fr", 2, false}, + {1, "nom.fr", 2, false}, + {1, "prd.fr", 2, false}, + {1, "tm.fr", 2, false}, + {1, "aeroport.fr", 2, false}, + {1, "avocat.fr", 2, false}, + {1, "avoues.fr", 2, false}, + {1, "cci.fr", 2, false}, + {1, "chambagri.fr", 2, false}, + {1, "chirurgiens-dentistes.fr", 2, false}, + {1, "experts-comptables.fr", 2, false}, + {1, "geometre-expert.fr", 2, false}, + {1, "greta.fr", 2, false}, + {1, "huissier-justice.fr", 2, false}, + {1, "medecin.fr", 2, false}, + {1, "notaires.fr", 2, false}, + {1, "pharmacien.fr", 2, false}, + {1, "port.fr", 2, false}, + {1, "veterinaire.fr", 2, false}, + {1, "ga", 1, false}, + {1, "gb", 1, false}, + {1, "gd", 1, false}, + {1, "ge", 1, false}, + {1, "com.ge", 2, false}, + {1, "edu.ge", 2, false}, + {1, "gov.ge", 2, false}, + {1, "org.ge", 2, false}, + {1, "mil.ge", 2, false}, + {1, "net.ge", 2, false}, + {1, "pvt.ge", 2, false}, + {1, "gf", 1, false}, + {1, "gg", 1, false}, + {1, "co.gg", 2, false}, + {1, "net.gg", 2, false}, + {1, "org.gg", 2, false}, + {1, "gh", 1, false}, + {1, "com.gh", 2, false}, + {1, "edu.gh", 2, false}, + {1, "gov.gh", 2, false}, + {1, "org.gh", 2, false}, + {1, "mil.gh", 2, false}, + {1, "gi", 1, false}, + {1, "com.gi", 2, false}, + {1, "ltd.gi", 2, false}, + {1, "gov.gi", 2, false}, + {1, "mod.gi", 2, false}, + {1, "edu.gi", 2, false}, + {1, "org.gi", 2, false}, + {1, "gl", 1, false}, + {1, "co.gl", 2, false}, + {1, "com.gl", 2, false}, + {1, "edu.gl", 2, false}, + {1, "net.gl", 2, false}, + {1, "org.gl", 2, false}, + {1, "gm", 1, false}, + {1, "gn", 1, false}, + {1, "ac.gn", 2, false}, + {1, "com.gn", 2, false}, + {1, "edu.gn", 2, false}, + {1, "gov.gn", 2, false}, + {1, "org.gn", 2, false}, + {1, "net.gn", 2, false}, + {1, "gov", 1, false}, + {1, "gp", 1, false}, + {1, "com.gp", 2, false}, + {1, "net.gp", 2, false}, + {1, "mobi.gp", 2, false}, + {1, "edu.gp", 2, false}, + {1, "org.gp", 2, false}, + {1, "asso.gp", 2, false}, + {1, "gq", 1, false}, + {1, "gr", 1, false}, + {1, "com.gr", 2, false}, + {1, "edu.gr", 2, false}, + {1, "net.gr", 2, false}, + {1, "org.gr", 2, false}, + {1, "gov.gr", 2, false}, + {1, "gs", 1, false}, + {1, "gt", 1, false}, + {1, "com.gt", 2, false}, + {1, "edu.gt", 2, false}, + {1, "gob.gt", 2, false}, + {1, "ind.gt", 2, false}, + {1, "mil.gt", 2, false}, + {1, "net.gt", 2, false}, + {1, "org.gt", 2, false}, + {1, "gu", 1, false}, + {1, "com.gu", 2, false}, + {1, "edu.gu", 2, false}, + {1, "gov.gu", 2, false}, + {1, "guam.gu", 2, false}, + {1, "info.gu", 2, false}, + {1, "net.gu", 2, false}, + {1, "org.gu", 2, false}, + {1, "web.gu", 2, false}, + {1, "gw", 1, false}, + {1, "gy", 1, false}, + {1, "co.gy", 2, false}, + {1, "com.gy", 2, false}, + {1, "edu.gy", 2, false}, + {1, "gov.gy", 2, false}, + {1, "net.gy", 2, false}, + {1, "org.gy", 2, false}, + {1, "hk", 1, false}, + {1, "com.hk", 2, false}, + {1, "edu.hk", 2, false}, + {1, "gov.hk", 2, false}, + {1, "idv.hk", 2, false}, + {1, "net.hk", 2, false}, + {1, "org.hk", 2, false}, + {1, "xn--55qx5d.hk", 2, false}, + {1, "xn--wcvs22d.hk", 2, false}, + {1, "xn--lcvr32d.hk", 2, false}, + {1, "xn--mxtq1m.hk", 2, false}, + {1, "xn--gmqw5a.hk", 2, false}, + {1, "xn--ciqpn.hk", 2, false}, + {1, "xn--gmq050i.hk", 2, false}, + {1, "xn--zf0avx.hk", 2, false}, + {1, "xn--io0a7i.hk", 2, false}, + {1, "xn--mk0axi.hk", 2, false}, + {1, "xn--od0alg.hk", 2, false}, + {1, "xn--od0aq3b.hk", 2, false}, + {1, "xn--tn0ag.hk", 2, false}, + {1, "xn--uc0atv.hk", 2, false}, + {1, "xn--uc0ay4a.hk", 2, false}, + {1, "hm", 1, false}, + {1, "hn", 1, false}, + {1, "com.hn", 2, false}, + {1, "edu.hn", 2, false}, + {1, "org.hn", 2, false}, + {1, "net.hn", 2, false}, + {1, "mil.hn", 2, false}, + {1, "gob.hn", 2, false}, + {1, "hr", 1, false}, + {1, "iz.hr", 2, false}, + {1, "from.hr", 2, false}, + {1, "name.hr", 2, false}, + {1, "com.hr", 2, false}, + {1, "ht", 1, false}, + {1, "com.ht", 2, false}, + {1, "shop.ht", 2, false}, + {1, "firm.ht", 2, false}, + {1, "info.ht", 2, false}, + {1, "adult.ht", 2, false}, + {1, "net.ht", 2, false}, + {1, "pro.ht", 2, false}, + {1, "org.ht", 2, false}, + {1, "med.ht", 2, false}, + {1, "art.ht", 2, false}, + {1, "coop.ht", 2, false}, + {1, "pol.ht", 2, false}, + {1, "asso.ht", 2, false}, + {1, "edu.ht", 2, false}, + {1, "rel.ht", 2, false}, + {1, "gouv.ht", 2, false}, + {1, "perso.ht", 2, false}, + {1, "hu", 1, false}, + {1, "co.hu", 2, false}, + {1, "info.hu", 2, false}, + {1, "org.hu", 2, false}, + {1, "priv.hu", 2, false}, + {1, "sport.hu", 2, false}, + {1, "tm.hu", 2, false}, + {1, "2000.hu", 2, false}, + {1, "agrar.hu", 2, false}, + {1, "bolt.hu", 2, false}, + {1, "casino.hu", 2, false}, + {1, "city.hu", 2, false}, + {1, "erotica.hu", 2, false}, + {1, "erotika.hu", 2, false}, + {1, "film.hu", 2, false}, + {1, "forum.hu", 2, false}, + {1, "games.hu", 2, false}, + {1, "hotel.hu", 2, false}, + {1, "ingatlan.hu", 2, false}, + {1, "jogasz.hu", 2, false}, + {1, "konyvelo.hu", 2, false}, + {1, "lakas.hu", 2, false}, + {1, "media.hu", 2, false}, + {1, "news.hu", 2, false}, + {1, "reklam.hu", 2, false}, + {1, "sex.hu", 2, false}, + {1, "shop.hu", 2, false}, + {1, "suli.hu", 2, false}, + {1, "szex.hu", 2, false}, + {1, "tozsde.hu", 2, false}, + {1, "utazas.hu", 2, false}, + {1, "video.hu", 2, false}, + {1, "id", 1, false}, + {1, "ac.id", 2, false}, + {1, "biz.id", 2, false}, + {1, "co.id", 2, false}, + {1, "desa.id", 2, false}, + {1, "go.id", 2, false}, + {1, "mil.id", 2, false}, + {1, "my.id", 2, false}, + {1, "net.id", 2, false}, + {1, "or.id", 2, false}, + {1, "ponpes.id", 2, false}, + {1, "sch.id", 2, false}, + {1, "web.id", 2, false}, + {1, "ie", 1, false}, + {1, "gov.ie", 2, false}, + {1, "il", 1, false}, + {1, "ac.il", 2, false}, + {1, "co.il", 2, false}, + {1, "gov.il", 2, false}, + {1, "idf.il", 2, false}, + {1, "k12.il", 2, false}, + {1, "muni.il", 2, false}, + {1, "net.il", 2, false}, + {1, "org.il", 2, false}, + {1, "im", 1, false}, + {1, "ac.im", 2, false}, + {1, "co.im", 2, false}, + {1, "com.im", 2, false}, + {1, "ltd.co.im", 3, false}, + {1, "net.im", 2, false}, + {1, "org.im", 2, false}, + {1, "plc.co.im", 3, false}, + {1, "tt.im", 2, false}, + {1, "tv.im", 2, false}, + {1, "in", 1, false}, + {1, "co.in", 2, false}, + {1, "firm.in", 2, false}, + {1, "net.in", 2, false}, + {1, "org.in", 2, false}, + {1, "gen.in", 2, false}, + {1, "ind.in", 2, false}, + {1, "nic.in", 2, false}, + {1, "ac.in", 2, false}, + {1, "edu.in", 2, false}, + {1, "res.in", 2, false}, + {1, "gov.in", 2, false}, + {1, "mil.in", 2, false}, + {1, "info", 1, false}, + {1, "int", 1, false}, + {1, "eu.int", 2, false}, + {1, "io", 1, false}, + {1, "com.io", 2, false}, + {1, "iq", 1, false}, + {1, "gov.iq", 2, false}, + {1, "edu.iq", 2, false}, + {1, "mil.iq", 2, false}, + {1, "com.iq", 2, false}, + {1, "org.iq", 2, false}, + {1, "net.iq", 2, false}, + {1, "ir", 1, false}, + {1, "ac.ir", 2, false}, + {1, "co.ir", 2, false}, + {1, "gov.ir", 2, false}, + {1, "id.ir", 2, false}, + {1, "net.ir", 2, false}, + {1, "org.ir", 2, false}, + {1, "sch.ir", 2, false}, + {1, "xn--mgba3a4f16a.ir", 2, false}, + {1, "xn--mgba3a4fra.ir", 2, false}, + {1, "is", 1, false}, + {1, "net.is", 2, false}, + {1, "com.is", 2, false}, + {1, "edu.is", 2, false}, + {1, "gov.is", 2, false}, + {1, "org.is", 2, false}, + {1, "int.is", 2, false}, + {1, "it", 1, false}, + {1, "gov.it", 2, false}, + {1, "edu.it", 2, false}, + {1, "abr.it", 2, false}, + {1, "abruzzo.it", 2, false}, + {1, "aosta-valley.it", 2, false}, + {1, "aostavalley.it", 2, false}, + {1, "bas.it", 2, false}, + {1, "basilicata.it", 2, false}, + {1, "cal.it", 2, false}, + {1, "calabria.it", 2, false}, + {1, "cam.it", 2, false}, + {1, "campania.it", 2, false}, + {1, "emilia-romagna.it", 2, false}, + {1, "emiliaromagna.it", 2, false}, + {1, "emr.it", 2, false}, + {1, "friuli-v-giulia.it", 2, false}, + {1, "friuli-ve-giulia.it", 2, false}, + {1, "friuli-vegiulia.it", 2, false}, + {1, "friuli-venezia-giulia.it", 2, false}, + {1, "friuli-veneziagiulia.it", 2, false}, + {1, "friuli-vgiulia.it", 2, false}, + {1, "friuliv-giulia.it", 2, false}, + {1, "friulive-giulia.it", 2, false}, + {1, "friulivegiulia.it", 2, false}, + {1, "friulivenezia-giulia.it", 2, false}, + {1, "friuliveneziagiulia.it", 2, false}, + {1, "friulivgiulia.it", 2, false}, + {1, "fvg.it", 2, false}, + {1, "laz.it", 2, false}, + {1, "lazio.it", 2, false}, + {1, "lig.it", 2, false}, + {1, "liguria.it", 2, false}, + {1, "lom.it", 2, false}, + {1, "lombardia.it", 2, false}, + {1, "lombardy.it", 2, false}, + {1, "lucania.it", 2, false}, + {1, "mar.it", 2, false}, + {1, "marche.it", 2, false}, + {1, "mol.it", 2, false}, + {1, "molise.it", 2, false}, + {1, "piedmont.it", 2, false}, + {1, "piemonte.it", 2, false}, + {1, "pmn.it", 2, false}, + {1, "pug.it", 2, false}, + {1, "puglia.it", 2, false}, + {1, "sar.it", 2, false}, + {1, "sardegna.it", 2, false}, + {1, "sardinia.it", 2, false}, + {1, "sic.it", 2, false}, + {1, "sicilia.it", 2, false}, + {1, "sicily.it", 2, false}, + {1, "taa.it", 2, false}, + {1, "tos.it", 2, false}, + {1, "toscana.it", 2, false}, + {1, "trentin-sud-tirol.it", 2, false}, + {1, "xn--trentin-sd-tirol-rzb.it", 2, false}, + {1, "trentin-sudtirol.it", 2, false}, + {1, "xn--trentin-sdtirol-7vb.it", 2, false}, + {1, "trentin-sued-tirol.it", 2, false}, + {1, "trentin-suedtirol.it", 2, false}, + {1, "trentino-a-adige.it", 2, false}, + {1, "trentino-aadige.it", 2, false}, + {1, "trentino-alto-adige.it", 2, false}, + {1, "trentino-altoadige.it", 2, false}, + {1, "trentino-s-tirol.it", 2, false}, + {1, "trentino-stirol.it", 2, false}, + {1, "trentino-sud-tirol.it", 2, false}, + {1, "xn--trentino-sd-tirol-c3b.it", 2, false}, + {1, "trentino-sudtirol.it", 2, false}, + {1, "xn--trentino-sdtirol-szb.it", 2, false}, + {1, "trentino-sued-tirol.it", 2, false}, + {1, "trentino-suedtirol.it", 2, false}, + {1, "trentino.it", 2, false}, + {1, "trentinoa-adige.it", 2, false}, + {1, "trentinoaadige.it", 2, false}, + {1, "trentinoalto-adige.it", 2, false}, + {1, "trentinoaltoadige.it", 2, false}, + {1, "trentinos-tirol.it", 2, false}, + {1, "trentinostirol.it", 2, false}, + {1, "trentinosud-tirol.it", 2, false}, + {1, "xn--trentinosd-tirol-rzb.it", 2, false}, + {1, "trentinosudtirol.it", 2, false}, + {1, "xn--trentinosdtirol-7vb.it", 2, false}, + {1, "trentinosued-tirol.it", 2, false}, + {1, "trentinosuedtirol.it", 2, false}, + {1, "trentinsud-tirol.it", 2, false}, + {1, "xn--trentinsd-tirol-6vb.it", 2, false}, + {1, "trentinsudtirol.it", 2, false}, + {1, "xn--trentinsdtirol-nsb.it", 2, false}, + {1, "trentinsued-tirol.it", 2, false}, + {1, "trentinsuedtirol.it", 2, false}, + {1, "tuscany.it", 2, false}, + {1, "umb.it", 2, false}, + {1, "umbria.it", 2, false}, + {1, "val-d-aosta.it", 2, false}, + {1, "val-daosta.it", 2, false}, + {1, "vald-aosta.it", 2, false}, + {1, "valdaosta.it", 2, false}, + {1, "valle-aosta.it", 2, false}, + {1, "valle-d-aosta.it", 2, false}, + {1, "valle-daosta.it", 2, false}, + {1, "valleaosta.it", 2, false}, + {1, "valled-aosta.it", 2, false}, + {1, "valledaosta.it", 2, false}, + {1, "vallee-aoste.it", 2, false}, + {1, "xn--valle-aoste-ebb.it", 2, false}, + {1, "vallee-d-aoste.it", 2, false}, + {1, "xn--valle-d-aoste-ehb.it", 2, false}, + {1, "valleeaoste.it", 2, false}, + {1, "xn--valleaoste-e7a.it", 2, false}, + {1, "valleedaoste.it", 2, false}, + {1, "xn--valledaoste-ebb.it", 2, false}, + {1, "vao.it", 2, false}, + {1, "vda.it", 2, false}, + {1, "ven.it", 2, false}, + {1, "veneto.it", 2, false}, + {1, "ag.it", 2, false}, + {1, "agrigento.it", 2, false}, + {1, "al.it", 2, false}, + {1, "alessandria.it", 2, false}, + {1, "alto-adige.it", 2, false}, + {1, "altoadige.it", 2, false}, + {1, "an.it", 2, false}, + {1, "ancona.it", 2, false}, + {1, "andria-barletta-trani.it", 2, false}, + {1, "andria-trani-barletta.it", 2, false}, + {1, "andriabarlettatrani.it", 2, false}, + {1, "andriatranibarletta.it", 2, false}, + {1, "ao.it", 2, false}, + {1, "aosta.it", 2, false}, + {1, "aoste.it", 2, false}, + {1, "ap.it", 2, false}, + {1, "aq.it", 2, false}, + {1, "aquila.it", 2, false}, + {1, "ar.it", 2, false}, + {1, "arezzo.it", 2, false}, + {1, "ascoli-piceno.it", 2, false}, + {1, "ascolipiceno.it", 2, false}, + {1, "asti.it", 2, false}, + {1, "at.it", 2, false}, + {1, "av.it", 2, false}, + {1, "avellino.it", 2, false}, + {1, "ba.it", 2, false}, + {1, "balsan-sudtirol.it", 2, false}, + {1, "xn--balsan-sdtirol-nsb.it", 2, false}, + {1, "balsan-suedtirol.it", 2, false}, + {1, "balsan.it", 2, false}, + {1, "bari.it", 2, false}, + {1, "barletta-trani-andria.it", 2, false}, + {1, "barlettatraniandria.it", 2, false}, + {1, "belluno.it", 2, false}, + {1, "benevento.it", 2, false}, + {1, "bergamo.it", 2, false}, + {1, "bg.it", 2, false}, + {1, "bi.it", 2, false}, + {1, "biella.it", 2, false}, + {1, "bl.it", 2, false}, + {1, "bn.it", 2, false}, + {1, "bo.it", 2, false}, + {1, "bologna.it", 2, false}, + {1, "bolzano-altoadige.it", 2, false}, + {1, "bolzano.it", 2, false}, + {1, "bozen-sudtirol.it", 2, false}, + {1, "xn--bozen-sdtirol-2ob.it", 2, false}, + {1, "bozen-suedtirol.it", 2, false}, + {1, "bozen.it", 2, false}, + {1, "br.it", 2, false}, + {1, "brescia.it", 2, false}, + {1, "brindisi.it", 2, false}, + {1, "bs.it", 2, false}, + {1, "bt.it", 2, false}, + {1, "bulsan-sudtirol.it", 2, false}, + {1, "xn--bulsan-sdtirol-nsb.it", 2, false}, + {1, "bulsan-suedtirol.it", 2, false}, + {1, "bulsan.it", 2, false}, + {1, "bz.it", 2, false}, + {1, "ca.it", 2, false}, + {1, "cagliari.it", 2, false}, + {1, "caltanissetta.it", 2, false}, + {1, "campidano-medio.it", 2, false}, + {1, "campidanomedio.it", 2, false}, + {1, "campobasso.it", 2, false}, + {1, "carbonia-iglesias.it", 2, false}, + {1, "carboniaiglesias.it", 2, false}, + {1, "carrara-massa.it", 2, false}, + {1, "carraramassa.it", 2, false}, + {1, "caserta.it", 2, false}, + {1, "catania.it", 2, false}, + {1, "catanzaro.it", 2, false}, + {1, "cb.it", 2, false}, + {1, "ce.it", 2, false}, + {1, "cesena-forli.it", 2, false}, + {1, "xn--cesena-forl-mcb.it", 2, false}, + {1, "cesenaforli.it", 2, false}, + {1, "xn--cesenaforl-i8a.it", 2, false}, + {1, "ch.it", 2, false}, + {1, "chieti.it", 2, false}, + {1, "ci.it", 2, false}, + {1, "cl.it", 2, false}, + {1, "cn.it", 2, false}, + {1, "co.it", 2, false}, + {1, "como.it", 2, false}, + {1, "cosenza.it", 2, false}, + {1, "cr.it", 2, false}, + {1, "cremona.it", 2, false}, + {1, "crotone.it", 2, false}, + {1, "cs.it", 2, false}, + {1, "ct.it", 2, false}, + {1, "cuneo.it", 2, false}, + {1, "cz.it", 2, false}, + {1, "dell-ogliastra.it", 2, false}, + {1, "dellogliastra.it", 2, false}, + {1, "en.it", 2, false}, + {1, "enna.it", 2, false}, + {1, "fc.it", 2, false}, + {1, "fe.it", 2, false}, + {1, "fermo.it", 2, false}, + {1, "ferrara.it", 2, false}, + {1, "fg.it", 2, false}, + {1, "fi.it", 2, false}, + {1, "firenze.it", 2, false}, + {1, "florence.it", 2, false}, + {1, "fm.it", 2, false}, + {1, "foggia.it", 2, false}, + {1, "forli-cesena.it", 2, false}, + {1, "xn--forl-cesena-fcb.it", 2, false}, + {1, "forlicesena.it", 2, false}, + {1, "xn--forlcesena-c8a.it", 2, false}, + {1, "fr.it", 2, false}, + {1, "frosinone.it", 2, false}, + {1, "ge.it", 2, false}, + {1, "genoa.it", 2, false}, + {1, "genova.it", 2, false}, + {1, "go.it", 2, false}, + {1, "gorizia.it", 2, false}, + {1, "gr.it", 2, false}, + {1, "grosseto.it", 2, false}, + {1, "iglesias-carbonia.it", 2, false}, + {1, "iglesiascarbonia.it", 2, false}, + {1, "im.it", 2, false}, + {1, "imperia.it", 2, false}, + {1, "is.it", 2, false}, + {1, "isernia.it", 2, false}, + {1, "kr.it", 2, false}, + {1, "la-spezia.it", 2, false}, + {1, "laquila.it", 2, false}, + {1, "laspezia.it", 2, false}, + {1, "latina.it", 2, false}, + {1, "lc.it", 2, false}, + {1, "le.it", 2, false}, + {1, "lecce.it", 2, false}, + {1, "lecco.it", 2, false}, + {1, "li.it", 2, false}, + {1, "livorno.it", 2, false}, + {1, "lo.it", 2, false}, + {1, "lodi.it", 2, false}, + {1, "lt.it", 2, false}, + {1, "lu.it", 2, false}, + {1, "lucca.it", 2, false}, + {1, "macerata.it", 2, false}, + {1, "mantova.it", 2, false}, + {1, "massa-carrara.it", 2, false}, + {1, "massacarrara.it", 2, false}, + {1, "matera.it", 2, false}, + {1, "mb.it", 2, false}, + {1, "mc.it", 2, false}, + {1, "me.it", 2, false}, + {1, "medio-campidano.it", 2, false}, + {1, "mediocampidano.it", 2, false}, + {1, "messina.it", 2, false}, + {1, "mi.it", 2, false}, + {1, "milan.it", 2, false}, + {1, "milano.it", 2, false}, + {1, "mn.it", 2, false}, + {1, "mo.it", 2, false}, + {1, "modena.it", 2, false}, + {1, "monza-brianza.it", 2, false}, + {1, "monza-e-della-brianza.it", 2, false}, + {1, "monza.it", 2, false}, + {1, "monzabrianza.it", 2, false}, + {1, "monzaebrianza.it", 2, false}, + {1, "monzaedellabrianza.it", 2, false}, + {1, "ms.it", 2, false}, + {1, "mt.it", 2, false}, + {1, "na.it", 2, false}, + {1, "naples.it", 2, false}, + {1, "napoli.it", 2, false}, + {1, "no.it", 2, false}, + {1, "novara.it", 2, false}, + {1, "nu.it", 2, false}, + {1, "nuoro.it", 2, false}, + {1, "og.it", 2, false}, + {1, "ogliastra.it", 2, false}, + {1, "olbia-tempio.it", 2, false}, + {1, "olbiatempio.it", 2, false}, + {1, "or.it", 2, false}, + {1, "oristano.it", 2, false}, + {1, "ot.it", 2, false}, + {1, "pa.it", 2, false}, + {1, "padova.it", 2, false}, + {1, "padua.it", 2, false}, + {1, "palermo.it", 2, false}, + {1, "parma.it", 2, false}, + {1, "pavia.it", 2, false}, + {1, "pc.it", 2, false}, + {1, "pd.it", 2, false}, + {1, "pe.it", 2, false}, + {1, "perugia.it", 2, false}, + {1, "pesaro-urbino.it", 2, false}, + {1, "pesarourbino.it", 2, false}, + {1, "pescara.it", 2, false}, + {1, "pg.it", 2, false}, + {1, "pi.it", 2, false}, + {1, "piacenza.it", 2, false}, + {1, "pisa.it", 2, false}, + {1, "pistoia.it", 2, false}, + {1, "pn.it", 2, false}, + {1, "po.it", 2, false}, + {1, "pordenone.it", 2, false}, + {1, "potenza.it", 2, false}, + {1, "pr.it", 2, false}, + {1, "prato.it", 2, false}, + {1, "pt.it", 2, false}, + {1, "pu.it", 2, false}, + {1, "pv.it", 2, false}, + {1, "pz.it", 2, false}, + {1, "ra.it", 2, false}, + {1, "ragusa.it", 2, false}, + {1, "ravenna.it", 2, false}, + {1, "rc.it", 2, false}, + {1, "re.it", 2, false}, + {1, "reggio-calabria.it", 2, false}, + {1, "reggio-emilia.it", 2, false}, + {1, "reggiocalabria.it", 2, false}, + {1, "reggioemilia.it", 2, false}, + {1, "rg.it", 2, false}, + {1, "ri.it", 2, false}, + {1, "rieti.it", 2, false}, + {1, "rimini.it", 2, false}, + {1, "rm.it", 2, false}, + {1, "rn.it", 2, false}, + {1, "ro.it", 2, false}, + {1, "roma.it", 2, false}, + {1, "rome.it", 2, false}, + {1, "rovigo.it", 2, false}, + {1, "sa.it", 2, false}, + {1, "salerno.it", 2, false}, + {1, "sassari.it", 2, false}, + {1, "savona.it", 2, false}, + {1, "si.it", 2, false}, + {1, "siena.it", 2, false}, + {1, "siracusa.it", 2, false}, + {1, "so.it", 2, false}, + {1, "sondrio.it", 2, false}, + {1, "sp.it", 2, false}, + {1, "sr.it", 2, false}, + {1, "ss.it", 2, false}, + {1, "suedtirol.it", 2, false}, + {1, "xn--sdtirol-n2a.it", 2, false}, + {1, "sv.it", 2, false}, + {1, "ta.it", 2, false}, + {1, "taranto.it", 2, false}, + {1, "te.it", 2, false}, + {1, "tempio-olbia.it", 2, false}, + {1, "tempioolbia.it", 2, false}, + {1, "teramo.it", 2, false}, + {1, "terni.it", 2, false}, + {1, "tn.it", 2, false}, + {1, "to.it", 2, false}, + {1, "torino.it", 2, false}, + {1, "tp.it", 2, false}, + {1, "tr.it", 2, false}, + {1, "trani-andria-barletta.it", 2, false}, + {1, "trani-barletta-andria.it", 2, false}, + {1, "traniandriabarletta.it", 2, false}, + {1, "tranibarlettaandria.it", 2, false}, + {1, "trapani.it", 2, false}, + {1, "trento.it", 2, false}, + {1, "treviso.it", 2, false}, + {1, "trieste.it", 2, false}, + {1, "ts.it", 2, false}, + {1, "turin.it", 2, false}, + {1, "tv.it", 2, false}, + {1, "ud.it", 2, false}, + {1, "udine.it", 2, false}, + {1, "urbino-pesaro.it", 2, false}, + {1, "urbinopesaro.it", 2, false}, + {1, "va.it", 2, false}, + {1, "varese.it", 2, false}, + {1, "vb.it", 2, false}, + {1, "vc.it", 2, false}, + {1, "ve.it", 2, false}, + {1, "venezia.it", 2, false}, + {1, "venice.it", 2, false}, + {1, "verbania.it", 2, false}, + {1, "vercelli.it", 2, false}, + {1, "verona.it", 2, false}, + {1, "vi.it", 2, false}, + {1, "vibo-valentia.it", 2, false}, + {1, "vibovalentia.it", 2, false}, + {1, "vicenza.it", 2, false}, + {1, "viterbo.it", 2, false}, + {1, "vr.it", 2, false}, + {1, "vs.it", 2, false}, + {1, "vt.it", 2, false}, + {1, "vv.it", 2, false}, + {1, "je", 1, false}, + {1, "co.je", 2, false}, + {1, "net.je", 2, false}, + {1, "org.je", 2, false}, + {2, "jm", 2, false}, + {1, "jo", 1, false}, + {1, "com.jo", 2, false}, + {1, "org.jo", 2, false}, + {1, "net.jo", 2, false}, + {1, "edu.jo", 2, false}, + {1, "sch.jo", 2, false}, + {1, "gov.jo", 2, false}, + {1, "mil.jo", 2, false}, + {1, "name.jo", 2, false}, + {1, "jobs", 1, false}, + {1, "jp", 1, false}, + {1, "ac.jp", 2, false}, + {1, "ad.jp", 2, false}, + {1, "co.jp", 2, false}, + {1, "ed.jp", 2, false}, + {1, "go.jp", 2, false}, + {1, "gr.jp", 2, false}, + {1, "lg.jp", 2, false}, + {1, "ne.jp", 2, false}, + {1, "or.jp", 2, false}, + {1, "aichi.jp", 2, false}, + {1, "akita.jp", 2, false}, + {1, "aomori.jp", 2, false}, + {1, "chiba.jp", 2, false}, + {1, "ehime.jp", 2, false}, + {1, "fukui.jp", 2, false}, + {1, "fukuoka.jp", 2, false}, + {1, "fukushima.jp", 2, false}, + {1, "gifu.jp", 2, false}, + {1, "gunma.jp", 2, false}, + {1, "hiroshima.jp", 2, false}, + {1, "hokkaido.jp", 2, false}, + {1, "hyogo.jp", 2, false}, + {1, "ibaraki.jp", 2, false}, + {1, "ishikawa.jp", 2, false}, + {1, "iwate.jp", 2, false}, + {1, "kagawa.jp", 2, false}, + {1, "kagoshima.jp", 2, false}, + {1, "kanagawa.jp", 2, false}, + {1, "kochi.jp", 2, false}, + {1, "kumamoto.jp", 2, false}, + {1, "kyoto.jp", 2, false}, + {1, "mie.jp", 2, false}, + {1, "miyagi.jp", 2, false}, + {1, "miyazaki.jp", 2, false}, + {1, "nagano.jp", 2, false}, + {1, "nagasaki.jp", 2, false}, + {1, "nara.jp", 2, false}, + {1, "niigata.jp", 2, false}, + {1, "oita.jp", 2, false}, + {1, "okayama.jp", 2, false}, + {1, "okinawa.jp", 2, false}, + {1, "osaka.jp", 2, false}, + {1, "saga.jp", 2, false}, + {1, "saitama.jp", 2, false}, + {1, "shiga.jp", 2, false}, + {1, "shimane.jp", 2, false}, + {1, "shizuoka.jp", 2, false}, + {1, "tochigi.jp", 2, false}, + {1, "tokushima.jp", 2, false}, + {1, "tokyo.jp", 2, false}, + {1, "tottori.jp", 2, false}, + {1, "toyama.jp", 2, false}, + {1, "wakayama.jp", 2, false}, + {1, "yamagata.jp", 2, false}, + {1, "yamaguchi.jp", 2, false}, + {1, "yamanashi.jp", 2, false}, + {1, "xn--4pvxs.jp", 2, false}, + {1, "xn--vgu402c.jp", 2, false}, + {1, "xn--c3s14m.jp", 2, false}, + {1, "xn--f6qx53a.jp", 2, false}, + {1, "xn--8pvr4u.jp", 2, false}, + {1, "xn--uist22h.jp", 2, false}, + {1, "xn--djrs72d6uy.jp", 2, false}, + {1, "xn--mkru45i.jp", 2, false}, + {1, "xn--0trq7p7nn.jp", 2, false}, + {1, "xn--8ltr62k.jp", 2, false}, + {1, "xn--2m4a15e.jp", 2, false}, + {1, "xn--efvn9s.jp", 2, false}, + {1, "xn--32vp30h.jp", 2, false}, + {1, "xn--4it797k.jp", 2, false}, + {1, "xn--1lqs71d.jp", 2, false}, + {1, "xn--5rtp49c.jp", 2, false}, + {1, "xn--5js045d.jp", 2, false}, + {1, "xn--ehqz56n.jp", 2, false}, + {1, "xn--1lqs03n.jp", 2, false}, + {1, "xn--qqqt11m.jp", 2, false}, + {1, "xn--kbrq7o.jp", 2, false}, + {1, "xn--pssu33l.jp", 2, false}, + {1, "xn--ntsq17g.jp", 2, false}, + {1, "xn--uisz3g.jp", 2, false}, + {1, "xn--6btw5a.jp", 2, false}, + {1, "xn--1ctwo.jp", 2, false}, + {1, "xn--6orx2r.jp", 2, false}, + {1, "xn--rht61e.jp", 2, false}, + {1, "xn--rht27z.jp", 2, false}, + {1, "xn--djty4k.jp", 2, false}, + {1, "xn--nit225k.jp", 2, false}, + {1, "xn--rht3d.jp", 2, false}, + {1, "xn--klty5x.jp", 2, false}, + {1, "xn--kltx9a.jp", 2, false}, + {1, "xn--kltp7d.jp", 2, false}, + {1, "xn--uuwu58a.jp", 2, false}, + {1, "xn--zbx025d.jp", 2, false}, + {1, "xn--ntso0iqx3a.jp", 2, false}, + {1, "xn--elqq16h.jp", 2, false}, + {1, "xn--4it168d.jp", 2, false}, + {1, "xn--klt787d.jp", 2, false}, + {1, "xn--rny31h.jp", 2, false}, + {1, "xn--7t0a264c.jp", 2, false}, + {1, "xn--5rtq34k.jp", 2, false}, + {1, "xn--k7yn95e.jp", 2, false}, + {1, "xn--tor131o.jp", 2, false}, + {1, "xn--d5qv7z876c.jp", 2, false}, + {2, "kawasaki.jp", 3, false}, + {2, "kitakyushu.jp", 3, false}, + {2, "kobe.jp", 3, false}, + {2, "nagoya.jp", 3, false}, + {2, "sapporo.jp", 3, false}, + {2, "sendai.jp", 3, false}, + {2, "yokohama.jp", 3, false}, + {3, "city.kawasaki.jp", 3, false}, + {3, "city.kitakyushu.jp", 3, false}, + {3, "city.kobe.jp", 3, false}, + {3, "city.nagoya.jp", 3, false}, + {3, "city.sapporo.jp", 3, false}, + {3, "city.sendai.jp", 3, false}, + {3, "city.yokohama.jp", 3, false}, + {1, "aisai.aichi.jp", 3, false}, + {1, "ama.aichi.jp", 3, false}, + {1, "anjo.aichi.jp", 3, false}, + {1, "asuke.aichi.jp", 3, false}, + {1, "chiryu.aichi.jp", 3, false}, + {1, "chita.aichi.jp", 3, false}, + {1, "fuso.aichi.jp", 3, false}, + {1, "gamagori.aichi.jp", 3, false}, + {1, "handa.aichi.jp", 3, false}, + {1, "hazu.aichi.jp", 3, false}, + {1, "hekinan.aichi.jp", 3, false}, + {1, "higashiura.aichi.jp", 3, false}, + {1, "ichinomiya.aichi.jp", 3, false}, + {1, "inazawa.aichi.jp", 3, false}, + {1, "inuyama.aichi.jp", 3, false}, + {1, "isshiki.aichi.jp", 3, false}, + {1, "iwakura.aichi.jp", 3, false}, + {1, "kanie.aichi.jp", 3, false}, + {1, "kariya.aichi.jp", 3, false}, + {1, "kasugai.aichi.jp", 3, false}, + {1, "kira.aichi.jp", 3, false}, + {1, "kiyosu.aichi.jp", 3, false}, + {1, "komaki.aichi.jp", 3, false}, + {1, "konan.aichi.jp", 3, false}, + {1, "kota.aichi.jp", 3, false}, + {1, "mihama.aichi.jp", 3, false}, + {1, "miyoshi.aichi.jp", 3, false}, + {1, "nishio.aichi.jp", 3, false}, + {1, "nisshin.aichi.jp", 3, false}, + {1, "obu.aichi.jp", 3, false}, + {1, "oguchi.aichi.jp", 3, false}, + {1, "oharu.aichi.jp", 3, false}, + {1, "okazaki.aichi.jp", 3, false}, + {1, "owariasahi.aichi.jp", 3, false}, + {1, "seto.aichi.jp", 3, false}, + {1, "shikatsu.aichi.jp", 3, false}, + {1, "shinshiro.aichi.jp", 3, false}, + {1, "shitara.aichi.jp", 3, false}, + {1, "tahara.aichi.jp", 3, false}, + {1, "takahama.aichi.jp", 3, false}, + {1, "tobishima.aichi.jp", 3, false}, + {1, "toei.aichi.jp", 3, false}, + {1, "togo.aichi.jp", 3, false}, + {1, "tokai.aichi.jp", 3, false}, + {1, "tokoname.aichi.jp", 3, false}, + {1, "toyoake.aichi.jp", 3, false}, + {1, "toyohashi.aichi.jp", 3, false}, + {1, "toyokawa.aichi.jp", 3, false}, + {1, "toyone.aichi.jp", 3, false}, + {1, "toyota.aichi.jp", 3, false}, + {1, "tsushima.aichi.jp", 3, false}, + {1, "yatomi.aichi.jp", 3, false}, + {1, "akita.akita.jp", 3, false}, + {1, "daisen.akita.jp", 3, false}, + {1, "fujisato.akita.jp", 3, false}, + {1, "gojome.akita.jp", 3, false}, + {1, "hachirogata.akita.jp", 3, false}, + {1, "happou.akita.jp", 3, false}, + {1, "higashinaruse.akita.jp", 3, false}, + {1, "honjo.akita.jp", 3, false}, + {1, "honjyo.akita.jp", 3, false}, + {1, "ikawa.akita.jp", 3, false}, + {1, "kamikoani.akita.jp", 3, false}, + {1, "kamioka.akita.jp", 3, false}, + {1, "katagami.akita.jp", 3, false}, + {1, "kazuno.akita.jp", 3, false}, + {1, "kitaakita.akita.jp", 3, false}, + {1, "kosaka.akita.jp", 3, false}, + {1, "kyowa.akita.jp", 3, false}, + {1, "misato.akita.jp", 3, false}, + {1, "mitane.akita.jp", 3, false}, + {1, "moriyoshi.akita.jp", 3, false}, + {1, "nikaho.akita.jp", 3, false}, + {1, "noshiro.akita.jp", 3, false}, + {1, "odate.akita.jp", 3, false}, + {1, "oga.akita.jp", 3, false}, + {1, "ogata.akita.jp", 3, false}, + {1, "semboku.akita.jp", 3, false}, + {1, "yokote.akita.jp", 3, false}, + {1, "yurihonjo.akita.jp", 3, false}, + {1, "aomori.aomori.jp", 3, false}, + {1, "gonohe.aomori.jp", 3, false}, + {1, "hachinohe.aomori.jp", 3, false}, + {1, "hashikami.aomori.jp", 3, false}, + {1, "hiranai.aomori.jp", 3, false}, + {1, "hirosaki.aomori.jp", 3, false}, + {1, "itayanagi.aomori.jp", 3, false}, + {1, "kuroishi.aomori.jp", 3, false}, + {1, "misawa.aomori.jp", 3, false}, + {1, "mutsu.aomori.jp", 3, false}, + {1, "nakadomari.aomori.jp", 3, false}, + {1, "noheji.aomori.jp", 3, false}, + {1, "oirase.aomori.jp", 3, false}, + {1, "owani.aomori.jp", 3, false}, + {1, "rokunohe.aomori.jp", 3, false}, + {1, "sannohe.aomori.jp", 3, false}, + {1, "shichinohe.aomori.jp", 3, false}, + {1, "shingo.aomori.jp", 3, false}, + {1, "takko.aomori.jp", 3, false}, + {1, "towada.aomori.jp", 3, false}, + {1, "tsugaru.aomori.jp", 3, false}, + {1, "tsuruta.aomori.jp", 3, false}, + {1, "abiko.chiba.jp", 3, false}, + {1, "asahi.chiba.jp", 3, false}, + {1, "chonan.chiba.jp", 3, false}, + {1, "chosei.chiba.jp", 3, false}, + {1, "choshi.chiba.jp", 3, false}, + {1, "chuo.chiba.jp", 3, false}, + {1, "funabashi.chiba.jp", 3, false}, + {1, "futtsu.chiba.jp", 3, false}, + {1, "hanamigawa.chiba.jp", 3, false}, + {1, "ichihara.chiba.jp", 3, false}, + {1, "ichikawa.chiba.jp", 3, false}, + {1, "ichinomiya.chiba.jp", 3, false}, + {1, "inzai.chiba.jp", 3, false}, + {1, "isumi.chiba.jp", 3, false}, + {1, "kamagaya.chiba.jp", 3, false}, + {1, "kamogawa.chiba.jp", 3, false}, + {1, "kashiwa.chiba.jp", 3, false}, + {1, "katori.chiba.jp", 3, false}, + {1, "katsuura.chiba.jp", 3, false}, + {1, "kimitsu.chiba.jp", 3, false}, + {1, "kisarazu.chiba.jp", 3, false}, + {1, "kozaki.chiba.jp", 3, false}, + {1, "kujukuri.chiba.jp", 3, false}, + {1, "kyonan.chiba.jp", 3, false}, + {1, "matsudo.chiba.jp", 3, false}, + {1, "midori.chiba.jp", 3, false}, + {1, "mihama.chiba.jp", 3, false}, + {1, "minamiboso.chiba.jp", 3, false}, + {1, "mobara.chiba.jp", 3, false}, + {1, "mutsuzawa.chiba.jp", 3, false}, + {1, "nagara.chiba.jp", 3, false}, + {1, "nagareyama.chiba.jp", 3, false}, + {1, "narashino.chiba.jp", 3, false}, + {1, "narita.chiba.jp", 3, false}, + {1, "noda.chiba.jp", 3, false}, + {1, "oamishirasato.chiba.jp", 3, false}, + {1, "omigawa.chiba.jp", 3, false}, + {1, "onjuku.chiba.jp", 3, false}, + {1, "otaki.chiba.jp", 3, false}, + {1, "sakae.chiba.jp", 3, false}, + {1, "sakura.chiba.jp", 3, false}, + {1, "shimofusa.chiba.jp", 3, false}, + {1, "shirako.chiba.jp", 3, false}, + {1, "shiroi.chiba.jp", 3, false}, + {1, "shisui.chiba.jp", 3, false}, + {1, "sodegaura.chiba.jp", 3, false}, + {1, "sosa.chiba.jp", 3, false}, + {1, "tako.chiba.jp", 3, false}, + {1, "tateyama.chiba.jp", 3, false}, + {1, "togane.chiba.jp", 3, false}, + {1, "tohnosho.chiba.jp", 3, false}, + {1, "tomisato.chiba.jp", 3, false}, + {1, "urayasu.chiba.jp", 3, false}, + {1, "yachimata.chiba.jp", 3, false}, + {1, "yachiyo.chiba.jp", 3, false}, + {1, "yokaichiba.chiba.jp", 3, false}, + {1, "yokoshibahikari.chiba.jp", 3, false}, + {1, "yotsukaido.chiba.jp", 3, false}, + {1, "ainan.ehime.jp", 3, false}, + {1, "honai.ehime.jp", 3, false}, + {1, "ikata.ehime.jp", 3, false}, + {1, "imabari.ehime.jp", 3, false}, + {1, "iyo.ehime.jp", 3, false}, + {1, "kamijima.ehime.jp", 3, false}, + {1, "kihoku.ehime.jp", 3, false}, + {1, "kumakogen.ehime.jp", 3, false}, + {1, "masaki.ehime.jp", 3, false}, + {1, "matsuno.ehime.jp", 3, false}, + {1, "matsuyama.ehime.jp", 3, false}, + {1, "namikata.ehime.jp", 3, false}, + {1, "niihama.ehime.jp", 3, false}, + {1, "ozu.ehime.jp", 3, false}, + {1, "saijo.ehime.jp", 3, false}, + {1, "seiyo.ehime.jp", 3, false}, + {1, "shikokuchuo.ehime.jp", 3, false}, + {1, "tobe.ehime.jp", 3, false}, + {1, "toon.ehime.jp", 3, false}, + {1, "uchiko.ehime.jp", 3, false}, + {1, "uwajima.ehime.jp", 3, false}, + {1, "yawatahama.ehime.jp", 3, false}, + {1, "echizen.fukui.jp", 3, false}, + {1, "eiheiji.fukui.jp", 3, false}, + {1, "fukui.fukui.jp", 3, false}, + {1, "ikeda.fukui.jp", 3, false}, + {1, "katsuyama.fukui.jp", 3, false}, + {1, "mihama.fukui.jp", 3, false}, + {1, "minamiechizen.fukui.jp", 3, false}, + {1, "obama.fukui.jp", 3, false}, + {1, "ohi.fukui.jp", 3, false}, + {1, "ono.fukui.jp", 3, false}, + {1, "sabae.fukui.jp", 3, false}, + {1, "sakai.fukui.jp", 3, false}, + {1, "takahama.fukui.jp", 3, false}, + {1, "tsuruga.fukui.jp", 3, false}, + {1, "wakasa.fukui.jp", 3, false}, + {1, "ashiya.fukuoka.jp", 3, false}, + {1, "buzen.fukuoka.jp", 3, false}, + {1, "chikugo.fukuoka.jp", 3, false}, + {1, "chikuho.fukuoka.jp", 3, false}, + {1, "chikujo.fukuoka.jp", 3, false}, + {1, "chikushino.fukuoka.jp", 3, false}, + {1, "chikuzen.fukuoka.jp", 3, false}, + {1, "chuo.fukuoka.jp", 3, false}, + {1, "dazaifu.fukuoka.jp", 3, false}, + {1, "fukuchi.fukuoka.jp", 3, false}, + {1, "hakata.fukuoka.jp", 3, false}, + {1, "higashi.fukuoka.jp", 3, false}, + {1, "hirokawa.fukuoka.jp", 3, false}, + {1, "hisayama.fukuoka.jp", 3, false}, + {1, "iizuka.fukuoka.jp", 3, false}, + {1, "inatsuki.fukuoka.jp", 3, false}, + {1, "kaho.fukuoka.jp", 3, false}, + {1, "kasuga.fukuoka.jp", 3, false}, + {1, "kasuya.fukuoka.jp", 3, false}, + {1, "kawara.fukuoka.jp", 3, false}, + {1, "keisen.fukuoka.jp", 3, false}, + {1, "koga.fukuoka.jp", 3, false}, + {1, "kurate.fukuoka.jp", 3, false}, + {1, "kurogi.fukuoka.jp", 3, false}, + {1, "kurume.fukuoka.jp", 3, false}, + {1, "minami.fukuoka.jp", 3, false}, + {1, "miyako.fukuoka.jp", 3, false}, + {1, "miyama.fukuoka.jp", 3, false}, + {1, "miyawaka.fukuoka.jp", 3, false}, + {1, "mizumaki.fukuoka.jp", 3, false}, + {1, "munakata.fukuoka.jp", 3, false}, + {1, "nakagawa.fukuoka.jp", 3, false}, + {1, "nakama.fukuoka.jp", 3, false}, + {1, "nishi.fukuoka.jp", 3, false}, + {1, "nogata.fukuoka.jp", 3, false}, + {1, "ogori.fukuoka.jp", 3, false}, + {1, "okagaki.fukuoka.jp", 3, false}, + {1, "okawa.fukuoka.jp", 3, false}, + {1, "oki.fukuoka.jp", 3, false}, + {1, "omuta.fukuoka.jp", 3, false}, + {1, "onga.fukuoka.jp", 3, false}, + {1, "onojo.fukuoka.jp", 3, false}, + {1, "oto.fukuoka.jp", 3, false}, + {1, "saigawa.fukuoka.jp", 3, false}, + {1, "sasaguri.fukuoka.jp", 3, false}, + {1, "shingu.fukuoka.jp", 3, false}, + {1, "shinyoshitomi.fukuoka.jp", 3, false}, + {1, "shonai.fukuoka.jp", 3, false}, + {1, "soeda.fukuoka.jp", 3, false}, + {1, "sue.fukuoka.jp", 3, false}, + {1, "tachiarai.fukuoka.jp", 3, false}, + {1, "tagawa.fukuoka.jp", 3, false}, + {1, "takata.fukuoka.jp", 3, false}, + {1, "toho.fukuoka.jp", 3, false}, + {1, "toyotsu.fukuoka.jp", 3, false}, + {1, "tsuiki.fukuoka.jp", 3, false}, + {1, "ukiha.fukuoka.jp", 3, false}, + {1, "umi.fukuoka.jp", 3, false}, + {1, "usui.fukuoka.jp", 3, false}, + {1, "yamada.fukuoka.jp", 3, false}, + {1, "yame.fukuoka.jp", 3, false}, + {1, "yanagawa.fukuoka.jp", 3, false}, + {1, "yukuhashi.fukuoka.jp", 3, false}, + {1, "aizubange.fukushima.jp", 3, false}, + {1, "aizumisato.fukushima.jp", 3, false}, + {1, "aizuwakamatsu.fukushima.jp", 3, false}, + {1, "asakawa.fukushima.jp", 3, false}, + {1, "bandai.fukushima.jp", 3, false}, + {1, "date.fukushima.jp", 3, false}, + {1, "fukushima.fukushima.jp", 3, false}, + {1, "furudono.fukushima.jp", 3, false}, + {1, "futaba.fukushima.jp", 3, false}, + {1, "hanawa.fukushima.jp", 3, false}, + {1, "higashi.fukushima.jp", 3, false}, + {1, "hirata.fukushima.jp", 3, false}, + {1, "hirono.fukushima.jp", 3, false}, + {1, "iitate.fukushima.jp", 3, false}, + {1, "inawashiro.fukushima.jp", 3, false}, + {1, "ishikawa.fukushima.jp", 3, false}, + {1, "iwaki.fukushima.jp", 3, false}, + {1, "izumizaki.fukushima.jp", 3, false}, + {1, "kagamiishi.fukushima.jp", 3, false}, + {1, "kaneyama.fukushima.jp", 3, false}, + {1, "kawamata.fukushima.jp", 3, false}, + {1, "kitakata.fukushima.jp", 3, false}, + {1, "kitashiobara.fukushima.jp", 3, false}, + {1, "koori.fukushima.jp", 3, false}, + {1, "koriyama.fukushima.jp", 3, false}, + {1, "kunimi.fukushima.jp", 3, false}, + {1, "miharu.fukushima.jp", 3, false}, + {1, "mishima.fukushima.jp", 3, false}, + {1, "namie.fukushima.jp", 3, false}, + {1, "nango.fukushima.jp", 3, false}, + {1, "nishiaizu.fukushima.jp", 3, false}, + {1, "nishigo.fukushima.jp", 3, false}, + {1, "okuma.fukushima.jp", 3, false}, + {1, "omotego.fukushima.jp", 3, false}, + {1, "ono.fukushima.jp", 3, false}, + {1, "otama.fukushima.jp", 3, false}, + {1, "samegawa.fukushima.jp", 3, false}, + {1, "shimogo.fukushima.jp", 3, false}, + {1, "shirakawa.fukushima.jp", 3, false}, + {1, "showa.fukushima.jp", 3, false}, + {1, "soma.fukushima.jp", 3, false}, + {1, "sukagawa.fukushima.jp", 3, false}, + {1, "taishin.fukushima.jp", 3, false}, + {1, "tamakawa.fukushima.jp", 3, false}, + {1, "tanagura.fukushima.jp", 3, false}, + {1, "tenei.fukushima.jp", 3, false}, + {1, "yabuki.fukushima.jp", 3, false}, + {1, "yamato.fukushima.jp", 3, false}, + {1, "yamatsuri.fukushima.jp", 3, false}, + {1, "yanaizu.fukushima.jp", 3, false}, + {1, "yugawa.fukushima.jp", 3, false}, + {1, "anpachi.gifu.jp", 3, false}, + {1, "ena.gifu.jp", 3, false}, + {1, "gifu.gifu.jp", 3, false}, + {1, "ginan.gifu.jp", 3, false}, + {1, "godo.gifu.jp", 3, false}, + {1, "gujo.gifu.jp", 3, false}, + {1, "hashima.gifu.jp", 3, false}, + {1, "hichiso.gifu.jp", 3, false}, + {1, "hida.gifu.jp", 3, false}, + {1, "higashishirakawa.gifu.jp", 3, false}, + {1, "ibigawa.gifu.jp", 3, false}, + {1, "ikeda.gifu.jp", 3, false}, + {1, "kakamigahara.gifu.jp", 3, false}, + {1, "kani.gifu.jp", 3, false}, + {1, "kasahara.gifu.jp", 3, false}, + {1, "kasamatsu.gifu.jp", 3, false}, + {1, "kawaue.gifu.jp", 3, false}, + {1, "kitagata.gifu.jp", 3, false}, + {1, "mino.gifu.jp", 3, false}, + {1, "minokamo.gifu.jp", 3, false}, + {1, "mitake.gifu.jp", 3, false}, + {1, "mizunami.gifu.jp", 3, false}, + {1, "motosu.gifu.jp", 3, false}, + {1, "nakatsugawa.gifu.jp", 3, false}, + {1, "ogaki.gifu.jp", 3, false}, + {1, "sakahogi.gifu.jp", 3, false}, + {1, "seki.gifu.jp", 3, false}, + {1, "sekigahara.gifu.jp", 3, false}, + {1, "shirakawa.gifu.jp", 3, false}, + {1, "tajimi.gifu.jp", 3, false}, + {1, "takayama.gifu.jp", 3, false}, + {1, "tarui.gifu.jp", 3, false}, + {1, "toki.gifu.jp", 3, false}, + {1, "tomika.gifu.jp", 3, false}, + {1, "wanouchi.gifu.jp", 3, false}, + {1, "yamagata.gifu.jp", 3, false}, + {1, "yaotsu.gifu.jp", 3, false}, + {1, "yoro.gifu.jp", 3, false}, + {1, "annaka.gunma.jp", 3, false}, + {1, "chiyoda.gunma.jp", 3, false}, + {1, "fujioka.gunma.jp", 3, false}, + {1, "higashiagatsuma.gunma.jp", 3, false}, + {1, "isesaki.gunma.jp", 3, false}, + {1, "itakura.gunma.jp", 3, false}, + {1, "kanna.gunma.jp", 3, false}, + {1, "kanra.gunma.jp", 3, false}, + {1, "katashina.gunma.jp", 3, false}, + {1, "kawaba.gunma.jp", 3, false}, + {1, "kiryu.gunma.jp", 3, false}, + {1, "kusatsu.gunma.jp", 3, false}, + {1, "maebashi.gunma.jp", 3, false}, + {1, "meiwa.gunma.jp", 3, false}, + {1, "midori.gunma.jp", 3, false}, + {1, "minakami.gunma.jp", 3, false}, + {1, "naganohara.gunma.jp", 3, false}, + {1, "nakanojo.gunma.jp", 3, false}, + {1, "nanmoku.gunma.jp", 3, false}, + {1, "numata.gunma.jp", 3, false}, + {1, "oizumi.gunma.jp", 3, false}, + {1, "ora.gunma.jp", 3, false}, + {1, "ota.gunma.jp", 3, false}, + {1, "shibukawa.gunma.jp", 3, false}, + {1, "shimonita.gunma.jp", 3, false}, + {1, "shinto.gunma.jp", 3, false}, + {1, "showa.gunma.jp", 3, false}, + {1, "takasaki.gunma.jp", 3, false}, + {1, "takayama.gunma.jp", 3, false}, + {1, "tamamura.gunma.jp", 3, false}, + {1, "tatebayashi.gunma.jp", 3, false}, + {1, "tomioka.gunma.jp", 3, false}, + {1, "tsukiyono.gunma.jp", 3, false}, + {1, "tsumagoi.gunma.jp", 3, false}, + {1, "ueno.gunma.jp", 3, false}, + {1, "yoshioka.gunma.jp", 3, false}, + {1, "asaminami.hiroshima.jp", 3, false}, + {1, "daiwa.hiroshima.jp", 3, false}, + {1, "etajima.hiroshima.jp", 3, false}, + {1, "fuchu.hiroshima.jp", 3, false}, + {1, "fukuyama.hiroshima.jp", 3, false}, + {1, "hatsukaichi.hiroshima.jp", 3, false}, + {1, "higashihiroshima.hiroshima.jp", 3, false}, + {1, "hongo.hiroshima.jp", 3, false}, + {1, "jinsekikogen.hiroshima.jp", 3, false}, + {1, "kaita.hiroshima.jp", 3, false}, + {1, "kui.hiroshima.jp", 3, false}, + {1, "kumano.hiroshima.jp", 3, false}, + {1, "kure.hiroshima.jp", 3, false}, + {1, "mihara.hiroshima.jp", 3, false}, + {1, "miyoshi.hiroshima.jp", 3, false}, + {1, "naka.hiroshima.jp", 3, false}, + {1, "onomichi.hiroshima.jp", 3, false}, + {1, "osakikamijima.hiroshima.jp", 3, false}, + {1, "otake.hiroshima.jp", 3, false}, + {1, "saka.hiroshima.jp", 3, false}, + {1, "sera.hiroshima.jp", 3, false}, + {1, "seranishi.hiroshima.jp", 3, false}, + {1, "shinichi.hiroshima.jp", 3, false}, + {1, "shobara.hiroshima.jp", 3, false}, + {1, "takehara.hiroshima.jp", 3, false}, + {1, "abashiri.hokkaido.jp", 3, false}, + {1, "abira.hokkaido.jp", 3, false}, + {1, "aibetsu.hokkaido.jp", 3, false}, + {1, "akabira.hokkaido.jp", 3, false}, + {1, "akkeshi.hokkaido.jp", 3, false}, + {1, "asahikawa.hokkaido.jp", 3, false}, + {1, "ashibetsu.hokkaido.jp", 3, false}, + {1, "ashoro.hokkaido.jp", 3, false}, + {1, "assabu.hokkaido.jp", 3, false}, + {1, "atsuma.hokkaido.jp", 3, false}, + {1, "bibai.hokkaido.jp", 3, false}, + {1, "biei.hokkaido.jp", 3, false}, + {1, "bifuka.hokkaido.jp", 3, false}, + {1, "bihoro.hokkaido.jp", 3, false}, + {1, "biratori.hokkaido.jp", 3, false}, + {1, "chippubetsu.hokkaido.jp", 3, false}, + {1, "chitose.hokkaido.jp", 3, false}, + {1, "date.hokkaido.jp", 3, false}, + {1, "ebetsu.hokkaido.jp", 3, false}, + {1, "embetsu.hokkaido.jp", 3, false}, + {1, "eniwa.hokkaido.jp", 3, false}, + {1, "erimo.hokkaido.jp", 3, false}, + {1, "esan.hokkaido.jp", 3, false}, + {1, "esashi.hokkaido.jp", 3, false}, + {1, "fukagawa.hokkaido.jp", 3, false}, + {1, "fukushima.hokkaido.jp", 3, false}, + {1, "furano.hokkaido.jp", 3, false}, + {1, "furubira.hokkaido.jp", 3, false}, + {1, "haboro.hokkaido.jp", 3, false}, + {1, "hakodate.hokkaido.jp", 3, false}, + {1, "hamatonbetsu.hokkaido.jp", 3, false}, + {1, "hidaka.hokkaido.jp", 3, false}, + {1, "higashikagura.hokkaido.jp", 3, false}, + {1, "higashikawa.hokkaido.jp", 3, false}, + {1, "hiroo.hokkaido.jp", 3, false}, + {1, "hokuryu.hokkaido.jp", 3, false}, + {1, "hokuto.hokkaido.jp", 3, false}, + {1, "honbetsu.hokkaido.jp", 3, false}, + {1, "horokanai.hokkaido.jp", 3, false}, + {1, "horonobe.hokkaido.jp", 3, false}, + {1, "ikeda.hokkaido.jp", 3, false}, + {1, "imakane.hokkaido.jp", 3, false}, + {1, "ishikari.hokkaido.jp", 3, false}, + {1, "iwamizawa.hokkaido.jp", 3, false}, + {1, "iwanai.hokkaido.jp", 3, false}, + {1, "kamifurano.hokkaido.jp", 3, false}, + {1, "kamikawa.hokkaido.jp", 3, false}, + {1, "kamishihoro.hokkaido.jp", 3, false}, + {1, "kamisunagawa.hokkaido.jp", 3, false}, + {1, "kamoenai.hokkaido.jp", 3, false}, + {1, "kayabe.hokkaido.jp", 3, false}, + {1, "kembuchi.hokkaido.jp", 3, false}, + {1, "kikonai.hokkaido.jp", 3, false}, + {1, "kimobetsu.hokkaido.jp", 3, false}, + {1, "kitahiroshima.hokkaido.jp", 3, false}, + {1, "kitami.hokkaido.jp", 3, false}, + {1, "kiyosato.hokkaido.jp", 3, false}, + {1, "koshimizu.hokkaido.jp", 3, false}, + {1, "kunneppu.hokkaido.jp", 3, false}, + {1, "kuriyama.hokkaido.jp", 3, false}, + {1, "kuromatsunai.hokkaido.jp", 3, false}, + {1, "kushiro.hokkaido.jp", 3, false}, + {1, "kutchan.hokkaido.jp", 3, false}, + {1, "kyowa.hokkaido.jp", 3, false}, + {1, "mashike.hokkaido.jp", 3, false}, + {1, "matsumae.hokkaido.jp", 3, false}, + {1, "mikasa.hokkaido.jp", 3, false}, + {1, "minamifurano.hokkaido.jp", 3, false}, + {1, "mombetsu.hokkaido.jp", 3, false}, + {1, "moseushi.hokkaido.jp", 3, false}, + {1, "mukawa.hokkaido.jp", 3, false}, + {1, "muroran.hokkaido.jp", 3, false}, + {1, "naie.hokkaido.jp", 3, false}, + {1, "nakagawa.hokkaido.jp", 3, false}, + {1, "nakasatsunai.hokkaido.jp", 3, false}, + {1, "nakatombetsu.hokkaido.jp", 3, false}, + {1, "nanae.hokkaido.jp", 3, false}, + {1, "nanporo.hokkaido.jp", 3, false}, + {1, "nayoro.hokkaido.jp", 3, false}, + {1, "nemuro.hokkaido.jp", 3, false}, + {1, "niikappu.hokkaido.jp", 3, false}, + {1, "niki.hokkaido.jp", 3, false}, + {1, "nishiokoppe.hokkaido.jp", 3, false}, + {1, "noboribetsu.hokkaido.jp", 3, false}, + {1, "numata.hokkaido.jp", 3, false}, + {1, "obihiro.hokkaido.jp", 3, false}, + {1, "obira.hokkaido.jp", 3, false}, + {1, "oketo.hokkaido.jp", 3, false}, + {1, "okoppe.hokkaido.jp", 3, false}, + {1, "otaru.hokkaido.jp", 3, false}, + {1, "otobe.hokkaido.jp", 3, false}, + {1, "otofuke.hokkaido.jp", 3, false}, + {1, "otoineppu.hokkaido.jp", 3, false}, + {1, "oumu.hokkaido.jp", 3, false}, + {1, "ozora.hokkaido.jp", 3, false}, + {1, "pippu.hokkaido.jp", 3, false}, + {1, "rankoshi.hokkaido.jp", 3, false}, + {1, "rebun.hokkaido.jp", 3, false}, + {1, "rikubetsu.hokkaido.jp", 3, false}, + {1, "rishiri.hokkaido.jp", 3, false}, + {1, "rishirifuji.hokkaido.jp", 3, false}, + {1, "saroma.hokkaido.jp", 3, false}, + {1, "sarufutsu.hokkaido.jp", 3, false}, + {1, "shakotan.hokkaido.jp", 3, false}, + {1, "shari.hokkaido.jp", 3, false}, + {1, "shibecha.hokkaido.jp", 3, false}, + {1, "shibetsu.hokkaido.jp", 3, false}, + {1, "shikabe.hokkaido.jp", 3, false}, + {1, "shikaoi.hokkaido.jp", 3, false}, + {1, "shimamaki.hokkaido.jp", 3, false}, + {1, "shimizu.hokkaido.jp", 3, false}, + {1, "shimokawa.hokkaido.jp", 3, false}, + {1, "shinshinotsu.hokkaido.jp", 3, false}, + {1, "shintoku.hokkaido.jp", 3, false}, + {1, "shiranuka.hokkaido.jp", 3, false}, + {1, "shiraoi.hokkaido.jp", 3, false}, + {1, "shiriuchi.hokkaido.jp", 3, false}, + {1, "sobetsu.hokkaido.jp", 3, false}, + {1, "sunagawa.hokkaido.jp", 3, false}, + {1, "taiki.hokkaido.jp", 3, false}, + {1, "takasu.hokkaido.jp", 3, false}, + {1, "takikawa.hokkaido.jp", 3, false}, + {1, "takinoue.hokkaido.jp", 3, false}, + {1, "teshikaga.hokkaido.jp", 3, false}, + {1, "tobetsu.hokkaido.jp", 3, false}, + {1, "tohma.hokkaido.jp", 3, false}, + {1, "tomakomai.hokkaido.jp", 3, false}, + {1, "tomari.hokkaido.jp", 3, false}, + {1, "toya.hokkaido.jp", 3, false}, + {1, "toyako.hokkaido.jp", 3, false}, + {1, "toyotomi.hokkaido.jp", 3, false}, + {1, "toyoura.hokkaido.jp", 3, false}, + {1, "tsubetsu.hokkaido.jp", 3, false}, + {1, "tsukigata.hokkaido.jp", 3, false}, + {1, "urakawa.hokkaido.jp", 3, false}, + {1, "urausu.hokkaido.jp", 3, false}, + {1, "uryu.hokkaido.jp", 3, false}, + {1, "utashinai.hokkaido.jp", 3, false}, + {1, "wakkanai.hokkaido.jp", 3, false}, + {1, "wassamu.hokkaido.jp", 3, false}, + {1, "yakumo.hokkaido.jp", 3, false}, + {1, "yoichi.hokkaido.jp", 3, false}, + {1, "aioi.hyogo.jp", 3, false}, + {1, "akashi.hyogo.jp", 3, false}, + {1, "ako.hyogo.jp", 3, false}, + {1, "amagasaki.hyogo.jp", 3, false}, + {1, "aogaki.hyogo.jp", 3, false}, + {1, "asago.hyogo.jp", 3, false}, + {1, "ashiya.hyogo.jp", 3, false}, + {1, "awaji.hyogo.jp", 3, false}, + {1, "fukusaki.hyogo.jp", 3, false}, + {1, "goshiki.hyogo.jp", 3, false}, + {1, "harima.hyogo.jp", 3, false}, + {1, "himeji.hyogo.jp", 3, false}, + {1, "ichikawa.hyogo.jp", 3, false}, + {1, "inagawa.hyogo.jp", 3, false}, + {1, "itami.hyogo.jp", 3, false}, + {1, "kakogawa.hyogo.jp", 3, false}, + {1, "kamigori.hyogo.jp", 3, false}, + {1, "kamikawa.hyogo.jp", 3, false}, + {1, "kasai.hyogo.jp", 3, false}, + {1, "kasuga.hyogo.jp", 3, false}, + {1, "kawanishi.hyogo.jp", 3, false}, + {1, "miki.hyogo.jp", 3, false}, + {1, "minamiawaji.hyogo.jp", 3, false}, + {1, "nishinomiya.hyogo.jp", 3, false}, + {1, "nishiwaki.hyogo.jp", 3, false}, + {1, "ono.hyogo.jp", 3, false}, + {1, "sanda.hyogo.jp", 3, false}, + {1, "sannan.hyogo.jp", 3, false}, + {1, "sasayama.hyogo.jp", 3, false}, + {1, "sayo.hyogo.jp", 3, false}, + {1, "shingu.hyogo.jp", 3, false}, + {1, "shinonsen.hyogo.jp", 3, false}, + {1, "shiso.hyogo.jp", 3, false}, + {1, "sumoto.hyogo.jp", 3, false}, + {1, "taishi.hyogo.jp", 3, false}, + {1, "taka.hyogo.jp", 3, false}, + {1, "takarazuka.hyogo.jp", 3, false}, + {1, "takasago.hyogo.jp", 3, false}, + {1, "takino.hyogo.jp", 3, false}, + {1, "tamba.hyogo.jp", 3, false}, + {1, "tatsuno.hyogo.jp", 3, false}, + {1, "toyooka.hyogo.jp", 3, false}, + {1, "yabu.hyogo.jp", 3, false}, + {1, "yashiro.hyogo.jp", 3, false}, + {1, "yoka.hyogo.jp", 3, false}, + {1, "yokawa.hyogo.jp", 3, false}, + {1, "ami.ibaraki.jp", 3, false}, + {1, "asahi.ibaraki.jp", 3, false}, + {1, "bando.ibaraki.jp", 3, false}, + {1, "chikusei.ibaraki.jp", 3, false}, + {1, "daigo.ibaraki.jp", 3, false}, + {1, "fujishiro.ibaraki.jp", 3, false}, + {1, "hitachi.ibaraki.jp", 3, false}, + {1, "hitachinaka.ibaraki.jp", 3, false}, + {1, "hitachiomiya.ibaraki.jp", 3, false}, + {1, "hitachiota.ibaraki.jp", 3, false}, + {1, "ibaraki.ibaraki.jp", 3, false}, + {1, "ina.ibaraki.jp", 3, false}, + {1, "inashiki.ibaraki.jp", 3, false}, + {1, "itako.ibaraki.jp", 3, false}, + {1, "iwama.ibaraki.jp", 3, false}, + {1, "joso.ibaraki.jp", 3, false}, + {1, "kamisu.ibaraki.jp", 3, false}, + {1, "kasama.ibaraki.jp", 3, false}, + {1, "kashima.ibaraki.jp", 3, false}, + {1, "kasumigaura.ibaraki.jp", 3, false}, + {1, "koga.ibaraki.jp", 3, false}, + {1, "miho.ibaraki.jp", 3, false}, + {1, "mito.ibaraki.jp", 3, false}, + {1, "moriya.ibaraki.jp", 3, false}, + {1, "naka.ibaraki.jp", 3, false}, + {1, "namegata.ibaraki.jp", 3, false}, + {1, "oarai.ibaraki.jp", 3, false}, + {1, "ogawa.ibaraki.jp", 3, false}, + {1, "omitama.ibaraki.jp", 3, false}, + {1, "ryugasaki.ibaraki.jp", 3, false}, + {1, "sakai.ibaraki.jp", 3, false}, + {1, "sakuragawa.ibaraki.jp", 3, false}, + {1, "shimodate.ibaraki.jp", 3, false}, + {1, "shimotsuma.ibaraki.jp", 3, false}, + {1, "shirosato.ibaraki.jp", 3, false}, + {1, "sowa.ibaraki.jp", 3, false}, + {1, "suifu.ibaraki.jp", 3, false}, + {1, "takahagi.ibaraki.jp", 3, false}, + {1, "tamatsukuri.ibaraki.jp", 3, false}, + {1, "tokai.ibaraki.jp", 3, false}, + {1, "tomobe.ibaraki.jp", 3, false}, + {1, "tone.ibaraki.jp", 3, false}, + {1, "toride.ibaraki.jp", 3, false}, + {1, "tsuchiura.ibaraki.jp", 3, false}, + {1, "tsukuba.ibaraki.jp", 3, false}, + {1, "uchihara.ibaraki.jp", 3, false}, + {1, "ushiku.ibaraki.jp", 3, false}, + {1, "yachiyo.ibaraki.jp", 3, false}, + {1, "yamagata.ibaraki.jp", 3, false}, + {1, "yawara.ibaraki.jp", 3, false}, + {1, "yuki.ibaraki.jp", 3, false}, + {1, "anamizu.ishikawa.jp", 3, false}, + {1, "hakui.ishikawa.jp", 3, false}, + {1, "hakusan.ishikawa.jp", 3, false}, + {1, "kaga.ishikawa.jp", 3, false}, + {1, "kahoku.ishikawa.jp", 3, false}, + {1, "kanazawa.ishikawa.jp", 3, false}, + {1, "kawakita.ishikawa.jp", 3, false}, + {1, "komatsu.ishikawa.jp", 3, false}, + {1, "nakanoto.ishikawa.jp", 3, false}, + {1, "nanao.ishikawa.jp", 3, false}, + {1, "nomi.ishikawa.jp", 3, false}, + {1, "nonoichi.ishikawa.jp", 3, false}, + {1, "noto.ishikawa.jp", 3, false}, + {1, "shika.ishikawa.jp", 3, false}, + {1, "suzu.ishikawa.jp", 3, false}, + {1, "tsubata.ishikawa.jp", 3, false}, + {1, "tsurugi.ishikawa.jp", 3, false}, + {1, "uchinada.ishikawa.jp", 3, false}, + {1, "wajima.ishikawa.jp", 3, false}, + {1, "fudai.iwate.jp", 3, false}, + {1, "fujisawa.iwate.jp", 3, false}, + {1, "hanamaki.iwate.jp", 3, false}, + {1, "hiraizumi.iwate.jp", 3, false}, + {1, "hirono.iwate.jp", 3, false}, + {1, "ichinohe.iwate.jp", 3, false}, + {1, "ichinoseki.iwate.jp", 3, false}, + {1, "iwaizumi.iwate.jp", 3, false}, + {1, "iwate.iwate.jp", 3, false}, + {1, "joboji.iwate.jp", 3, false}, + {1, "kamaishi.iwate.jp", 3, false}, + {1, "kanegasaki.iwate.jp", 3, false}, + {1, "karumai.iwate.jp", 3, false}, + {1, "kawai.iwate.jp", 3, false}, + {1, "kitakami.iwate.jp", 3, false}, + {1, "kuji.iwate.jp", 3, false}, + {1, "kunohe.iwate.jp", 3, false}, + {1, "kuzumaki.iwate.jp", 3, false}, + {1, "miyako.iwate.jp", 3, false}, + {1, "mizusawa.iwate.jp", 3, false}, + {1, "morioka.iwate.jp", 3, false}, + {1, "ninohe.iwate.jp", 3, false}, + {1, "noda.iwate.jp", 3, false}, + {1, "ofunato.iwate.jp", 3, false}, + {1, "oshu.iwate.jp", 3, false}, + {1, "otsuchi.iwate.jp", 3, false}, + {1, "rikuzentakata.iwate.jp", 3, false}, + {1, "shiwa.iwate.jp", 3, false}, + {1, "shizukuishi.iwate.jp", 3, false}, + {1, "sumita.iwate.jp", 3, false}, + {1, "tanohata.iwate.jp", 3, false}, + {1, "tono.iwate.jp", 3, false}, + {1, "yahaba.iwate.jp", 3, false}, + {1, "yamada.iwate.jp", 3, false}, + {1, "ayagawa.kagawa.jp", 3, false}, + {1, "higashikagawa.kagawa.jp", 3, false}, + {1, "kanonji.kagawa.jp", 3, false}, + {1, "kotohira.kagawa.jp", 3, false}, + {1, "manno.kagawa.jp", 3, false}, + {1, "marugame.kagawa.jp", 3, false}, + {1, "mitoyo.kagawa.jp", 3, false}, + {1, "naoshima.kagawa.jp", 3, false}, + {1, "sanuki.kagawa.jp", 3, false}, + {1, "tadotsu.kagawa.jp", 3, false}, + {1, "takamatsu.kagawa.jp", 3, false}, + {1, "tonosho.kagawa.jp", 3, false}, + {1, "uchinomi.kagawa.jp", 3, false}, + {1, "utazu.kagawa.jp", 3, false}, + {1, "zentsuji.kagawa.jp", 3, false}, + {1, "akune.kagoshima.jp", 3, false}, + {1, "amami.kagoshima.jp", 3, false}, + {1, "hioki.kagoshima.jp", 3, false}, + {1, "isa.kagoshima.jp", 3, false}, + {1, "isen.kagoshima.jp", 3, false}, + {1, "izumi.kagoshima.jp", 3, false}, + {1, "kagoshima.kagoshima.jp", 3, false}, + {1, "kanoya.kagoshima.jp", 3, false}, + {1, "kawanabe.kagoshima.jp", 3, false}, + {1, "kinko.kagoshima.jp", 3, false}, + {1, "kouyama.kagoshima.jp", 3, false}, + {1, "makurazaki.kagoshima.jp", 3, false}, + {1, "matsumoto.kagoshima.jp", 3, false}, + {1, "minamitane.kagoshima.jp", 3, false}, + {1, "nakatane.kagoshima.jp", 3, false}, + {1, "nishinoomote.kagoshima.jp", 3, false}, + {1, "satsumasendai.kagoshima.jp", 3, false}, + {1, "soo.kagoshima.jp", 3, false}, + {1, "tarumizu.kagoshima.jp", 3, false}, + {1, "yusui.kagoshima.jp", 3, false}, + {1, "aikawa.kanagawa.jp", 3, false}, + {1, "atsugi.kanagawa.jp", 3, false}, + {1, "ayase.kanagawa.jp", 3, false}, + {1, "chigasaki.kanagawa.jp", 3, false}, + {1, "ebina.kanagawa.jp", 3, false}, + {1, "fujisawa.kanagawa.jp", 3, false}, + {1, "hadano.kanagawa.jp", 3, false}, + {1, "hakone.kanagawa.jp", 3, false}, + {1, "hiratsuka.kanagawa.jp", 3, false}, + {1, "isehara.kanagawa.jp", 3, false}, + {1, "kaisei.kanagawa.jp", 3, false}, + {1, "kamakura.kanagawa.jp", 3, false}, + {1, "kiyokawa.kanagawa.jp", 3, false}, + {1, "matsuda.kanagawa.jp", 3, false}, + {1, "minamiashigara.kanagawa.jp", 3, false}, + {1, "miura.kanagawa.jp", 3, false}, + {1, "nakai.kanagawa.jp", 3, false}, + {1, "ninomiya.kanagawa.jp", 3, false}, + {1, "odawara.kanagawa.jp", 3, false}, + {1, "oi.kanagawa.jp", 3, false}, + {1, "oiso.kanagawa.jp", 3, false}, + {1, "sagamihara.kanagawa.jp", 3, false}, + {1, "samukawa.kanagawa.jp", 3, false}, + {1, "tsukui.kanagawa.jp", 3, false}, + {1, "yamakita.kanagawa.jp", 3, false}, + {1, "yamato.kanagawa.jp", 3, false}, + {1, "yokosuka.kanagawa.jp", 3, false}, + {1, "yugawara.kanagawa.jp", 3, false}, + {1, "zama.kanagawa.jp", 3, false}, + {1, "zushi.kanagawa.jp", 3, false}, + {1, "aki.kochi.jp", 3, false}, + {1, "geisei.kochi.jp", 3, false}, + {1, "hidaka.kochi.jp", 3, false}, + {1, "higashitsuno.kochi.jp", 3, false}, + {1, "ino.kochi.jp", 3, false}, + {1, "kagami.kochi.jp", 3, false}, + {1, "kami.kochi.jp", 3, false}, + {1, "kitagawa.kochi.jp", 3, false}, + {1, "kochi.kochi.jp", 3, false}, + {1, "mihara.kochi.jp", 3, false}, + {1, "motoyama.kochi.jp", 3, false}, + {1, "muroto.kochi.jp", 3, false}, + {1, "nahari.kochi.jp", 3, false}, + {1, "nakamura.kochi.jp", 3, false}, + {1, "nankoku.kochi.jp", 3, false}, + {1, "nishitosa.kochi.jp", 3, false}, + {1, "niyodogawa.kochi.jp", 3, false}, + {1, "ochi.kochi.jp", 3, false}, + {1, "okawa.kochi.jp", 3, false}, + {1, "otoyo.kochi.jp", 3, false}, + {1, "otsuki.kochi.jp", 3, false}, + {1, "sakawa.kochi.jp", 3, false}, + {1, "sukumo.kochi.jp", 3, false}, + {1, "susaki.kochi.jp", 3, false}, + {1, "tosa.kochi.jp", 3, false}, + {1, "tosashimizu.kochi.jp", 3, false}, + {1, "toyo.kochi.jp", 3, false}, + {1, "tsuno.kochi.jp", 3, false}, + {1, "umaji.kochi.jp", 3, false}, + {1, "yasuda.kochi.jp", 3, false}, + {1, "yusuhara.kochi.jp", 3, false}, + {1, "amakusa.kumamoto.jp", 3, false}, + {1, "arao.kumamoto.jp", 3, false}, + {1, "aso.kumamoto.jp", 3, false}, + {1, "choyo.kumamoto.jp", 3, false}, + {1, "gyokuto.kumamoto.jp", 3, false}, + {1, "kamiamakusa.kumamoto.jp", 3, false}, + {1, "kikuchi.kumamoto.jp", 3, false}, + {1, "kumamoto.kumamoto.jp", 3, false}, + {1, "mashiki.kumamoto.jp", 3, false}, + {1, "mifune.kumamoto.jp", 3, false}, + {1, "minamata.kumamoto.jp", 3, false}, + {1, "minamioguni.kumamoto.jp", 3, false}, + {1, "nagasu.kumamoto.jp", 3, false}, + {1, "nishihara.kumamoto.jp", 3, false}, + {1, "oguni.kumamoto.jp", 3, false}, + {1, "ozu.kumamoto.jp", 3, false}, + {1, "sumoto.kumamoto.jp", 3, false}, + {1, "takamori.kumamoto.jp", 3, false}, + {1, "uki.kumamoto.jp", 3, false}, + {1, "uto.kumamoto.jp", 3, false}, + {1, "yamaga.kumamoto.jp", 3, false}, + {1, "yamato.kumamoto.jp", 3, false}, + {1, "yatsushiro.kumamoto.jp", 3, false}, + {1, "ayabe.kyoto.jp", 3, false}, + {1, "fukuchiyama.kyoto.jp", 3, false}, + {1, "higashiyama.kyoto.jp", 3, false}, + {1, "ide.kyoto.jp", 3, false}, + {1, "ine.kyoto.jp", 3, false}, + {1, "joyo.kyoto.jp", 3, false}, + {1, "kameoka.kyoto.jp", 3, false}, + {1, "kamo.kyoto.jp", 3, false}, + {1, "kita.kyoto.jp", 3, false}, + {1, "kizu.kyoto.jp", 3, false}, + {1, "kumiyama.kyoto.jp", 3, false}, + {1, "kyotamba.kyoto.jp", 3, false}, + {1, "kyotanabe.kyoto.jp", 3, false}, + {1, "kyotango.kyoto.jp", 3, false}, + {1, "maizuru.kyoto.jp", 3, false}, + {1, "minami.kyoto.jp", 3, false}, + {1, "minamiyamashiro.kyoto.jp", 3, false}, + {1, "miyazu.kyoto.jp", 3, false}, + {1, "muko.kyoto.jp", 3, false}, + {1, "nagaokakyo.kyoto.jp", 3, false}, + {1, "nakagyo.kyoto.jp", 3, false}, + {1, "nantan.kyoto.jp", 3, false}, + {1, "oyamazaki.kyoto.jp", 3, false}, + {1, "sakyo.kyoto.jp", 3, false}, + {1, "seika.kyoto.jp", 3, false}, + {1, "tanabe.kyoto.jp", 3, false}, + {1, "uji.kyoto.jp", 3, false}, + {1, "ujitawara.kyoto.jp", 3, false}, + {1, "wazuka.kyoto.jp", 3, false}, + {1, "yamashina.kyoto.jp", 3, false}, + {1, "yawata.kyoto.jp", 3, false}, + {1, "asahi.mie.jp", 3, false}, + {1, "inabe.mie.jp", 3, false}, + {1, "ise.mie.jp", 3, false}, + {1, "kameyama.mie.jp", 3, false}, + {1, "kawagoe.mie.jp", 3, false}, + {1, "kiho.mie.jp", 3, false}, + {1, "kisosaki.mie.jp", 3, false}, + {1, "kiwa.mie.jp", 3, false}, + {1, "komono.mie.jp", 3, false}, + {1, "kumano.mie.jp", 3, false}, + {1, "kuwana.mie.jp", 3, false}, + {1, "matsusaka.mie.jp", 3, false}, + {1, "meiwa.mie.jp", 3, false}, + {1, "mihama.mie.jp", 3, false}, + {1, "minamiise.mie.jp", 3, false}, + {1, "misugi.mie.jp", 3, false}, + {1, "miyama.mie.jp", 3, false}, + {1, "nabari.mie.jp", 3, false}, + {1, "shima.mie.jp", 3, false}, + {1, "suzuka.mie.jp", 3, false}, + {1, "tado.mie.jp", 3, false}, + {1, "taiki.mie.jp", 3, false}, + {1, "taki.mie.jp", 3, false}, + {1, "tamaki.mie.jp", 3, false}, + {1, "toba.mie.jp", 3, false}, + {1, "tsu.mie.jp", 3, false}, + {1, "udono.mie.jp", 3, false}, + {1, "ureshino.mie.jp", 3, false}, + {1, "watarai.mie.jp", 3, false}, + {1, "yokkaichi.mie.jp", 3, false}, + {1, "furukawa.miyagi.jp", 3, false}, + {1, "higashimatsushima.miyagi.jp", 3, false}, + {1, "ishinomaki.miyagi.jp", 3, false}, + {1, "iwanuma.miyagi.jp", 3, false}, + {1, "kakuda.miyagi.jp", 3, false}, + {1, "kami.miyagi.jp", 3, false}, + {1, "kawasaki.miyagi.jp", 3, false}, + {1, "marumori.miyagi.jp", 3, false}, + {1, "matsushima.miyagi.jp", 3, false}, + {1, "minamisanriku.miyagi.jp", 3, false}, + {1, "misato.miyagi.jp", 3, false}, + {1, "murata.miyagi.jp", 3, false}, + {1, "natori.miyagi.jp", 3, false}, + {1, "ogawara.miyagi.jp", 3, false}, + {1, "ohira.miyagi.jp", 3, false}, + {1, "onagawa.miyagi.jp", 3, false}, + {1, "osaki.miyagi.jp", 3, false}, + {1, "rifu.miyagi.jp", 3, false}, + {1, "semine.miyagi.jp", 3, false}, + {1, "shibata.miyagi.jp", 3, false}, + {1, "shichikashuku.miyagi.jp", 3, false}, + {1, "shikama.miyagi.jp", 3, false}, + {1, "shiogama.miyagi.jp", 3, false}, + {1, "shiroishi.miyagi.jp", 3, false}, + {1, "tagajo.miyagi.jp", 3, false}, + {1, "taiwa.miyagi.jp", 3, false}, + {1, "tome.miyagi.jp", 3, false}, + {1, "tomiya.miyagi.jp", 3, false}, + {1, "wakuya.miyagi.jp", 3, false}, + {1, "watari.miyagi.jp", 3, false}, + {1, "yamamoto.miyagi.jp", 3, false}, + {1, "zao.miyagi.jp", 3, false}, + {1, "aya.miyazaki.jp", 3, false}, + {1, "ebino.miyazaki.jp", 3, false}, + {1, "gokase.miyazaki.jp", 3, false}, + {1, "hyuga.miyazaki.jp", 3, false}, + {1, "kadogawa.miyazaki.jp", 3, false}, + {1, "kawaminami.miyazaki.jp", 3, false}, + {1, "kijo.miyazaki.jp", 3, false}, + {1, "kitagawa.miyazaki.jp", 3, false}, + {1, "kitakata.miyazaki.jp", 3, false}, + {1, "kitaura.miyazaki.jp", 3, false}, + {1, "kobayashi.miyazaki.jp", 3, false}, + {1, "kunitomi.miyazaki.jp", 3, false}, + {1, "kushima.miyazaki.jp", 3, false}, + {1, "mimata.miyazaki.jp", 3, false}, + {1, "miyakonojo.miyazaki.jp", 3, false}, + {1, "miyazaki.miyazaki.jp", 3, false}, + {1, "morotsuka.miyazaki.jp", 3, false}, + {1, "nichinan.miyazaki.jp", 3, false}, + {1, "nishimera.miyazaki.jp", 3, false}, + {1, "nobeoka.miyazaki.jp", 3, false}, + {1, "saito.miyazaki.jp", 3, false}, + {1, "shiiba.miyazaki.jp", 3, false}, + {1, "shintomi.miyazaki.jp", 3, false}, + {1, "takaharu.miyazaki.jp", 3, false}, + {1, "takanabe.miyazaki.jp", 3, false}, + {1, "takazaki.miyazaki.jp", 3, false}, + {1, "tsuno.miyazaki.jp", 3, false}, + {1, "achi.nagano.jp", 3, false}, + {1, "agematsu.nagano.jp", 3, false}, + {1, "anan.nagano.jp", 3, false}, + {1, "aoki.nagano.jp", 3, false}, + {1, "asahi.nagano.jp", 3, false}, + {1, "azumino.nagano.jp", 3, false}, + {1, "chikuhoku.nagano.jp", 3, false}, + {1, "chikuma.nagano.jp", 3, false}, + {1, "chino.nagano.jp", 3, false}, + {1, "fujimi.nagano.jp", 3, false}, + {1, "hakuba.nagano.jp", 3, false}, + {1, "hara.nagano.jp", 3, false}, + {1, "hiraya.nagano.jp", 3, false}, + {1, "iida.nagano.jp", 3, false}, + {1, "iijima.nagano.jp", 3, false}, + {1, "iiyama.nagano.jp", 3, false}, + {1, "iizuna.nagano.jp", 3, false}, + {1, "ikeda.nagano.jp", 3, false}, + {1, "ikusaka.nagano.jp", 3, false}, + {1, "ina.nagano.jp", 3, false}, + {1, "karuizawa.nagano.jp", 3, false}, + {1, "kawakami.nagano.jp", 3, false}, + {1, "kiso.nagano.jp", 3, false}, + {1, "kisofukushima.nagano.jp", 3, false}, + {1, "kitaaiki.nagano.jp", 3, false}, + {1, "komagane.nagano.jp", 3, false}, + {1, "komoro.nagano.jp", 3, false}, + {1, "matsukawa.nagano.jp", 3, false}, + {1, "matsumoto.nagano.jp", 3, false}, + {1, "miasa.nagano.jp", 3, false}, + {1, "minamiaiki.nagano.jp", 3, false}, + {1, "minamimaki.nagano.jp", 3, false}, + {1, "minamiminowa.nagano.jp", 3, false}, + {1, "minowa.nagano.jp", 3, false}, + {1, "miyada.nagano.jp", 3, false}, + {1, "miyota.nagano.jp", 3, false}, + {1, "mochizuki.nagano.jp", 3, false}, + {1, "nagano.nagano.jp", 3, false}, + {1, "nagawa.nagano.jp", 3, false}, + {1, "nagiso.nagano.jp", 3, false}, + {1, "nakagawa.nagano.jp", 3, false}, + {1, "nakano.nagano.jp", 3, false}, + {1, "nozawaonsen.nagano.jp", 3, false}, + {1, "obuse.nagano.jp", 3, false}, + {1, "ogawa.nagano.jp", 3, false}, + {1, "okaya.nagano.jp", 3, false}, + {1, "omachi.nagano.jp", 3, false}, + {1, "omi.nagano.jp", 3, false}, + {1, "ookuwa.nagano.jp", 3, false}, + {1, "ooshika.nagano.jp", 3, false}, + {1, "otaki.nagano.jp", 3, false}, + {1, "otari.nagano.jp", 3, false}, + {1, "sakae.nagano.jp", 3, false}, + {1, "sakaki.nagano.jp", 3, false}, + {1, "saku.nagano.jp", 3, false}, + {1, "sakuho.nagano.jp", 3, false}, + {1, "shimosuwa.nagano.jp", 3, false}, + {1, "shinanomachi.nagano.jp", 3, false}, + {1, "shiojiri.nagano.jp", 3, false}, + {1, "suwa.nagano.jp", 3, false}, + {1, "suzaka.nagano.jp", 3, false}, + {1, "takagi.nagano.jp", 3, false}, + {1, "takamori.nagano.jp", 3, false}, + {1, "takayama.nagano.jp", 3, false}, + {1, "tateshina.nagano.jp", 3, false}, + {1, "tatsuno.nagano.jp", 3, false}, + {1, "togakushi.nagano.jp", 3, false}, + {1, "togura.nagano.jp", 3, false}, + {1, "tomi.nagano.jp", 3, false}, + {1, "ueda.nagano.jp", 3, false}, + {1, "wada.nagano.jp", 3, false}, + {1, "yamagata.nagano.jp", 3, false}, + {1, "yamanouchi.nagano.jp", 3, false}, + {1, "yasaka.nagano.jp", 3, false}, + {1, "yasuoka.nagano.jp", 3, false}, + {1, "chijiwa.nagasaki.jp", 3, false}, + {1, "futsu.nagasaki.jp", 3, false}, + {1, "goto.nagasaki.jp", 3, false}, + {1, "hasami.nagasaki.jp", 3, false}, + {1, "hirado.nagasaki.jp", 3, false}, + {1, "iki.nagasaki.jp", 3, false}, + {1, "isahaya.nagasaki.jp", 3, false}, + {1, "kawatana.nagasaki.jp", 3, false}, + {1, "kuchinotsu.nagasaki.jp", 3, false}, + {1, "matsuura.nagasaki.jp", 3, false}, + {1, "nagasaki.nagasaki.jp", 3, false}, + {1, "obama.nagasaki.jp", 3, false}, + {1, "omura.nagasaki.jp", 3, false}, + {1, "oseto.nagasaki.jp", 3, false}, + {1, "saikai.nagasaki.jp", 3, false}, + {1, "sasebo.nagasaki.jp", 3, false}, + {1, "seihi.nagasaki.jp", 3, false}, + {1, "shimabara.nagasaki.jp", 3, false}, + {1, "shinkamigoto.nagasaki.jp", 3, false}, + {1, "togitsu.nagasaki.jp", 3, false}, + {1, "tsushima.nagasaki.jp", 3, false}, + {1, "unzen.nagasaki.jp", 3, false}, + {1, "ando.nara.jp", 3, false}, + {1, "gose.nara.jp", 3, false}, + {1, "heguri.nara.jp", 3, false}, + {1, "higashiyoshino.nara.jp", 3, false}, + {1, "ikaruga.nara.jp", 3, false}, + {1, "ikoma.nara.jp", 3, false}, + {1, "kamikitayama.nara.jp", 3, false}, + {1, "kanmaki.nara.jp", 3, false}, + {1, "kashiba.nara.jp", 3, false}, + {1, "kashihara.nara.jp", 3, false}, + {1, "katsuragi.nara.jp", 3, false}, + {1, "kawai.nara.jp", 3, false}, + {1, "kawakami.nara.jp", 3, false}, + {1, "kawanishi.nara.jp", 3, false}, + {1, "koryo.nara.jp", 3, false}, + {1, "kurotaki.nara.jp", 3, false}, + {1, "mitsue.nara.jp", 3, false}, + {1, "miyake.nara.jp", 3, false}, + {1, "nara.nara.jp", 3, false}, + {1, "nosegawa.nara.jp", 3, false}, + {1, "oji.nara.jp", 3, false}, + {1, "ouda.nara.jp", 3, false}, + {1, "oyodo.nara.jp", 3, false}, + {1, "sakurai.nara.jp", 3, false}, + {1, "sango.nara.jp", 3, false}, + {1, "shimoichi.nara.jp", 3, false}, + {1, "shimokitayama.nara.jp", 3, false}, + {1, "shinjo.nara.jp", 3, false}, + {1, "soni.nara.jp", 3, false}, + {1, "takatori.nara.jp", 3, false}, + {1, "tawaramoto.nara.jp", 3, false}, + {1, "tenkawa.nara.jp", 3, false}, + {1, "tenri.nara.jp", 3, false}, + {1, "uda.nara.jp", 3, false}, + {1, "yamatokoriyama.nara.jp", 3, false}, + {1, "yamatotakada.nara.jp", 3, false}, + {1, "yamazoe.nara.jp", 3, false}, + {1, "yoshino.nara.jp", 3, false}, + {1, "aga.niigata.jp", 3, false}, + {1, "agano.niigata.jp", 3, false}, + {1, "gosen.niigata.jp", 3, false}, + {1, "itoigawa.niigata.jp", 3, false}, + {1, "izumozaki.niigata.jp", 3, false}, + {1, "joetsu.niigata.jp", 3, false}, + {1, "kamo.niigata.jp", 3, false}, + {1, "kariwa.niigata.jp", 3, false}, + {1, "kashiwazaki.niigata.jp", 3, false}, + {1, "minamiuonuma.niigata.jp", 3, false}, + {1, "mitsuke.niigata.jp", 3, false}, + {1, "muika.niigata.jp", 3, false}, + {1, "murakami.niigata.jp", 3, false}, + {1, "myoko.niigata.jp", 3, false}, + {1, "nagaoka.niigata.jp", 3, false}, + {1, "niigata.niigata.jp", 3, false}, + {1, "ojiya.niigata.jp", 3, false}, + {1, "omi.niigata.jp", 3, false}, + {1, "sado.niigata.jp", 3, false}, + {1, "sanjo.niigata.jp", 3, false}, + {1, "seiro.niigata.jp", 3, false}, + {1, "seirou.niigata.jp", 3, false}, + {1, "sekikawa.niigata.jp", 3, false}, + {1, "shibata.niigata.jp", 3, false}, + {1, "tagami.niigata.jp", 3, false}, + {1, "tainai.niigata.jp", 3, false}, + {1, "tochio.niigata.jp", 3, false}, + {1, "tokamachi.niigata.jp", 3, false}, + {1, "tsubame.niigata.jp", 3, false}, + {1, "tsunan.niigata.jp", 3, false}, + {1, "uonuma.niigata.jp", 3, false}, + {1, "yahiko.niigata.jp", 3, false}, + {1, "yoita.niigata.jp", 3, false}, + {1, "yuzawa.niigata.jp", 3, false}, + {1, "beppu.oita.jp", 3, false}, + {1, "bungoono.oita.jp", 3, false}, + {1, "bungotakada.oita.jp", 3, false}, + {1, "hasama.oita.jp", 3, false}, + {1, "hiji.oita.jp", 3, false}, + {1, "himeshima.oita.jp", 3, false}, + {1, "hita.oita.jp", 3, false}, + {1, "kamitsue.oita.jp", 3, false}, + {1, "kokonoe.oita.jp", 3, false}, + {1, "kuju.oita.jp", 3, false}, + {1, "kunisaki.oita.jp", 3, false}, + {1, "kusu.oita.jp", 3, false}, + {1, "oita.oita.jp", 3, false}, + {1, "saiki.oita.jp", 3, false}, + {1, "taketa.oita.jp", 3, false}, + {1, "tsukumi.oita.jp", 3, false}, + {1, "usa.oita.jp", 3, false}, + {1, "usuki.oita.jp", 3, false}, + {1, "yufu.oita.jp", 3, false}, + {1, "akaiwa.okayama.jp", 3, false}, + {1, "asakuchi.okayama.jp", 3, false}, + {1, "bizen.okayama.jp", 3, false}, + {1, "hayashima.okayama.jp", 3, false}, + {1, "ibara.okayama.jp", 3, false}, + {1, "kagamino.okayama.jp", 3, false}, + {1, "kasaoka.okayama.jp", 3, false}, + {1, "kibichuo.okayama.jp", 3, false}, + {1, "kumenan.okayama.jp", 3, false}, + {1, "kurashiki.okayama.jp", 3, false}, + {1, "maniwa.okayama.jp", 3, false}, + {1, "misaki.okayama.jp", 3, false}, + {1, "nagi.okayama.jp", 3, false}, + {1, "niimi.okayama.jp", 3, false}, + {1, "nishiawakura.okayama.jp", 3, false}, + {1, "okayama.okayama.jp", 3, false}, + {1, "satosho.okayama.jp", 3, false}, + {1, "setouchi.okayama.jp", 3, false}, + {1, "shinjo.okayama.jp", 3, false}, + {1, "shoo.okayama.jp", 3, false}, + {1, "soja.okayama.jp", 3, false}, + {1, "takahashi.okayama.jp", 3, false}, + {1, "tamano.okayama.jp", 3, false}, + {1, "tsuyama.okayama.jp", 3, false}, + {1, "wake.okayama.jp", 3, false}, + {1, "yakage.okayama.jp", 3, false}, + {1, "aguni.okinawa.jp", 3, false}, + {1, "ginowan.okinawa.jp", 3, false}, + {1, "ginoza.okinawa.jp", 3, false}, + {1, "gushikami.okinawa.jp", 3, false}, + {1, "haebaru.okinawa.jp", 3, false}, + {1, "higashi.okinawa.jp", 3, false}, + {1, "hirara.okinawa.jp", 3, false}, + {1, "iheya.okinawa.jp", 3, false}, + {1, "ishigaki.okinawa.jp", 3, false}, + {1, "ishikawa.okinawa.jp", 3, false}, + {1, "itoman.okinawa.jp", 3, false}, + {1, "izena.okinawa.jp", 3, false}, + {1, "kadena.okinawa.jp", 3, false}, + {1, "kin.okinawa.jp", 3, false}, + {1, "kitadaito.okinawa.jp", 3, false}, + {1, "kitanakagusuku.okinawa.jp", 3, false}, + {1, "kumejima.okinawa.jp", 3, false}, + {1, "kunigami.okinawa.jp", 3, false}, + {1, "minamidaito.okinawa.jp", 3, false}, + {1, "motobu.okinawa.jp", 3, false}, + {1, "nago.okinawa.jp", 3, false}, + {1, "naha.okinawa.jp", 3, false}, + {1, "nakagusuku.okinawa.jp", 3, false}, + {1, "nakijin.okinawa.jp", 3, false}, + {1, "nanjo.okinawa.jp", 3, false}, + {1, "nishihara.okinawa.jp", 3, false}, + {1, "ogimi.okinawa.jp", 3, false}, + {1, "okinawa.okinawa.jp", 3, false}, + {1, "onna.okinawa.jp", 3, false}, + {1, "shimoji.okinawa.jp", 3, false}, + {1, "taketomi.okinawa.jp", 3, false}, + {1, "tarama.okinawa.jp", 3, false}, + {1, "tokashiki.okinawa.jp", 3, false}, + {1, "tomigusuku.okinawa.jp", 3, false}, + {1, "tonaki.okinawa.jp", 3, false}, + {1, "urasoe.okinawa.jp", 3, false}, + {1, "uruma.okinawa.jp", 3, false}, + {1, "yaese.okinawa.jp", 3, false}, + {1, "yomitan.okinawa.jp", 3, false}, + {1, "yonabaru.okinawa.jp", 3, false}, + {1, "yonaguni.okinawa.jp", 3, false}, + {1, "zamami.okinawa.jp", 3, false}, + {1, "abeno.osaka.jp", 3, false}, + {1, "chihayaakasaka.osaka.jp", 3, false}, + {1, "chuo.osaka.jp", 3, false}, + {1, "daito.osaka.jp", 3, false}, + {1, "fujiidera.osaka.jp", 3, false}, + {1, "habikino.osaka.jp", 3, false}, + {1, "hannan.osaka.jp", 3, false}, + {1, "higashiosaka.osaka.jp", 3, false}, + {1, "higashisumiyoshi.osaka.jp", 3, false}, + {1, "higashiyodogawa.osaka.jp", 3, false}, + {1, "hirakata.osaka.jp", 3, false}, + {1, "ibaraki.osaka.jp", 3, false}, + {1, "ikeda.osaka.jp", 3, false}, + {1, "izumi.osaka.jp", 3, false}, + {1, "izumiotsu.osaka.jp", 3, false}, + {1, "izumisano.osaka.jp", 3, false}, + {1, "kadoma.osaka.jp", 3, false}, + {1, "kaizuka.osaka.jp", 3, false}, + {1, "kanan.osaka.jp", 3, false}, + {1, "kashiwara.osaka.jp", 3, false}, + {1, "katano.osaka.jp", 3, false}, + {1, "kawachinagano.osaka.jp", 3, false}, + {1, "kishiwada.osaka.jp", 3, false}, + {1, "kita.osaka.jp", 3, false}, + {1, "kumatori.osaka.jp", 3, false}, + {1, "matsubara.osaka.jp", 3, false}, + {1, "minato.osaka.jp", 3, false}, + {1, "minoh.osaka.jp", 3, false}, + {1, "misaki.osaka.jp", 3, false}, + {1, "moriguchi.osaka.jp", 3, false}, + {1, "neyagawa.osaka.jp", 3, false}, + {1, "nishi.osaka.jp", 3, false}, + {1, "nose.osaka.jp", 3, false}, + {1, "osakasayama.osaka.jp", 3, false}, + {1, "sakai.osaka.jp", 3, false}, + {1, "sayama.osaka.jp", 3, false}, + {1, "sennan.osaka.jp", 3, false}, + {1, "settsu.osaka.jp", 3, false}, + {1, "shijonawate.osaka.jp", 3, false}, + {1, "shimamoto.osaka.jp", 3, false}, + {1, "suita.osaka.jp", 3, false}, + {1, "tadaoka.osaka.jp", 3, false}, + {1, "taishi.osaka.jp", 3, false}, + {1, "tajiri.osaka.jp", 3, false}, + {1, "takaishi.osaka.jp", 3, false}, + {1, "takatsuki.osaka.jp", 3, false}, + {1, "tondabayashi.osaka.jp", 3, false}, + {1, "toyonaka.osaka.jp", 3, false}, + {1, "toyono.osaka.jp", 3, false}, + {1, "yao.osaka.jp", 3, false}, + {1, "ariake.saga.jp", 3, false}, + {1, "arita.saga.jp", 3, false}, + {1, "fukudomi.saga.jp", 3, false}, + {1, "genkai.saga.jp", 3, false}, + {1, "hamatama.saga.jp", 3, false}, + {1, "hizen.saga.jp", 3, false}, + {1, "imari.saga.jp", 3, false}, + {1, "kamimine.saga.jp", 3, false}, + {1, "kanzaki.saga.jp", 3, false}, + {1, "karatsu.saga.jp", 3, false}, + {1, "kashima.saga.jp", 3, false}, + {1, "kitagata.saga.jp", 3, false}, + {1, "kitahata.saga.jp", 3, false}, + {1, "kiyama.saga.jp", 3, false}, + {1, "kouhoku.saga.jp", 3, false}, + {1, "kyuragi.saga.jp", 3, false}, + {1, "nishiarita.saga.jp", 3, false}, + {1, "ogi.saga.jp", 3, false}, + {1, "omachi.saga.jp", 3, false}, + {1, "ouchi.saga.jp", 3, false}, + {1, "saga.saga.jp", 3, false}, + {1, "shiroishi.saga.jp", 3, false}, + {1, "taku.saga.jp", 3, false}, + {1, "tara.saga.jp", 3, false}, + {1, "tosu.saga.jp", 3, false}, + {1, "yoshinogari.saga.jp", 3, false}, + {1, "arakawa.saitama.jp", 3, false}, + {1, "asaka.saitama.jp", 3, false}, + {1, "chichibu.saitama.jp", 3, false}, + {1, "fujimi.saitama.jp", 3, false}, + {1, "fujimino.saitama.jp", 3, false}, + {1, "fukaya.saitama.jp", 3, false}, + {1, "hanno.saitama.jp", 3, false}, + {1, "hanyu.saitama.jp", 3, false}, + {1, "hasuda.saitama.jp", 3, false}, + {1, "hatogaya.saitama.jp", 3, false}, + {1, "hatoyama.saitama.jp", 3, false}, + {1, "hidaka.saitama.jp", 3, false}, + {1, "higashichichibu.saitama.jp", 3, false}, + {1, "higashimatsuyama.saitama.jp", 3, false}, + {1, "honjo.saitama.jp", 3, false}, + {1, "ina.saitama.jp", 3, false}, + {1, "iruma.saitama.jp", 3, false}, + {1, "iwatsuki.saitama.jp", 3, false}, + {1, "kamiizumi.saitama.jp", 3, false}, + {1, "kamikawa.saitama.jp", 3, false}, + {1, "kamisato.saitama.jp", 3, false}, + {1, "kasukabe.saitama.jp", 3, false}, + {1, "kawagoe.saitama.jp", 3, false}, + {1, "kawaguchi.saitama.jp", 3, false}, + {1, "kawajima.saitama.jp", 3, false}, + {1, "kazo.saitama.jp", 3, false}, + {1, "kitamoto.saitama.jp", 3, false}, + {1, "koshigaya.saitama.jp", 3, false}, + {1, "kounosu.saitama.jp", 3, false}, + {1, "kuki.saitama.jp", 3, false}, + {1, "kumagaya.saitama.jp", 3, false}, + {1, "matsubushi.saitama.jp", 3, false}, + {1, "minano.saitama.jp", 3, false}, + {1, "misato.saitama.jp", 3, false}, + {1, "miyashiro.saitama.jp", 3, false}, + {1, "miyoshi.saitama.jp", 3, false}, + {1, "moroyama.saitama.jp", 3, false}, + {1, "nagatoro.saitama.jp", 3, false}, + {1, "namegawa.saitama.jp", 3, false}, + {1, "niiza.saitama.jp", 3, false}, + {1, "ogano.saitama.jp", 3, false}, + {1, "ogawa.saitama.jp", 3, false}, + {1, "ogose.saitama.jp", 3, false}, + {1, "okegawa.saitama.jp", 3, false}, + {1, "omiya.saitama.jp", 3, false}, + {1, "otaki.saitama.jp", 3, false}, + {1, "ranzan.saitama.jp", 3, false}, + {1, "ryokami.saitama.jp", 3, false}, + {1, "saitama.saitama.jp", 3, false}, + {1, "sakado.saitama.jp", 3, false}, + {1, "satte.saitama.jp", 3, false}, + {1, "sayama.saitama.jp", 3, false}, + {1, "shiki.saitama.jp", 3, false}, + {1, "shiraoka.saitama.jp", 3, false}, + {1, "soka.saitama.jp", 3, false}, + {1, "sugito.saitama.jp", 3, false}, + {1, "toda.saitama.jp", 3, false}, + {1, "tokigawa.saitama.jp", 3, false}, + {1, "tokorozawa.saitama.jp", 3, false}, + {1, "tsurugashima.saitama.jp", 3, false}, + {1, "urawa.saitama.jp", 3, false}, + {1, "warabi.saitama.jp", 3, false}, + {1, "yashio.saitama.jp", 3, false}, + {1, "yokoze.saitama.jp", 3, false}, + {1, "yono.saitama.jp", 3, false}, + {1, "yorii.saitama.jp", 3, false}, + {1, "yoshida.saitama.jp", 3, false}, + {1, "yoshikawa.saitama.jp", 3, false}, + {1, "yoshimi.saitama.jp", 3, false}, + {1, "aisho.shiga.jp", 3, false}, + {1, "gamo.shiga.jp", 3, false}, + {1, "higashiomi.shiga.jp", 3, false}, + {1, "hikone.shiga.jp", 3, false}, + {1, "koka.shiga.jp", 3, false}, + {1, "konan.shiga.jp", 3, false}, + {1, "kosei.shiga.jp", 3, false}, + {1, "koto.shiga.jp", 3, false}, + {1, "kusatsu.shiga.jp", 3, false}, + {1, "maibara.shiga.jp", 3, false}, + {1, "moriyama.shiga.jp", 3, false}, + {1, "nagahama.shiga.jp", 3, false}, + {1, "nishiazai.shiga.jp", 3, false}, + {1, "notogawa.shiga.jp", 3, false}, + {1, "omihachiman.shiga.jp", 3, false}, + {1, "otsu.shiga.jp", 3, false}, + {1, "ritto.shiga.jp", 3, false}, + {1, "ryuoh.shiga.jp", 3, false}, + {1, "takashima.shiga.jp", 3, false}, + {1, "takatsuki.shiga.jp", 3, false}, + {1, "torahime.shiga.jp", 3, false}, + {1, "toyosato.shiga.jp", 3, false}, + {1, "yasu.shiga.jp", 3, false}, + {1, "akagi.shimane.jp", 3, false}, + {1, "ama.shimane.jp", 3, false}, + {1, "gotsu.shimane.jp", 3, false}, + {1, "hamada.shimane.jp", 3, false}, + {1, "higashiizumo.shimane.jp", 3, false}, + {1, "hikawa.shimane.jp", 3, false}, + {1, "hikimi.shimane.jp", 3, false}, + {1, "izumo.shimane.jp", 3, false}, + {1, "kakinoki.shimane.jp", 3, false}, + {1, "masuda.shimane.jp", 3, false}, + {1, "matsue.shimane.jp", 3, false}, + {1, "misato.shimane.jp", 3, false}, + {1, "nishinoshima.shimane.jp", 3, false}, + {1, "ohda.shimane.jp", 3, false}, + {1, "okinoshima.shimane.jp", 3, false}, + {1, "okuizumo.shimane.jp", 3, false}, + {1, "shimane.shimane.jp", 3, false}, + {1, "tamayu.shimane.jp", 3, false}, + {1, "tsuwano.shimane.jp", 3, false}, + {1, "unnan.shimane.jp", 3, false}, + {1, "yakumo.shimane.jp", 3, false}, + {1, "yasugi.shimane.jp", 3, false}, + {1, "yatsuka.shimane.jp", 3, false}, + {1, "arai.shizuoka.jp", 3, false}, + {1, "atami.shizuoka.jp", 3, false}, + {1, "fuji.shizuoka.jp", 3, false}, + {1, "fujieda.shizuoka.jp", 3, false}, + {1, "fujikawa.shizuoka.jp", 3, false}, + {1, "fujinomiya.shizuoka.jp", 3, false}, + {1, "fukuroi.shizuoka.jp", 3, false}, + {1, "gotemba.shizuoka.jp", 3, false}, + {1, "haibara.shizuoka.jp", 3, false}, + {1, "hamamatsu.shizuoka.jp", 3, false}, + {1, "higashiizu.shizuoka.jp", 3, false}, + {1, "ito.shizuoka.jp", 3, false}, + {1, "iwata.shizuoka.jp", 3, false}, + {1, "izu.shizuoka.jp", 3, false}, + {1, "izunokuni.shizuoka.jp", 3, false}, + {1, "kakegawa.shizuoka.jp", 3, false}, + {1, "kannami.shizuoka.jp", 3, false}, + {1, "kawanehon.shizuoka.jp", 3, false}, + {1, "kawazu.shizuoka.jp", 3, false}, + {1, "kikugawa.shizuoka.jp", 3, false}, + {1, "kosai.shizuoka.jp", 3, false}, + {1, "makinohara.shizuoka.jp", 3, false}, + {1, "matsuzaki.shizuoka.jp", 3, false}, + {1, "minamiizu.shizuoka.jp", 3, false}, + {1, "mishima.shizuoka.jp", 3, false}, + {1, "morimachi.shizuoka.jp", 3, false}, + {1, "nishiizu.shizuoka.jp", 3, false}, + {1, "numazu.shizuoka.jp", 3, false}, + {1, "omaezaki.shizuoka.jp", 3, false}, + {1, "shimada.shizuoka.jp", 3, false}, + {1, "shimizu.shizuoka.jp", 3, false}, + {1, "shimoda.shizuoka.jp", 3, false}, + {1, "shizuoka.shizuoka.jp", 3, false}, + {1, "susono.shizuoka.jp", 3, false}, + {1, "yaizu.shizuoka.jp", 3, false}, + {1, "yoshida.shizuoka.jp", 3, false}, + {1, "ashikaga.tochigi.jp", 3, false}, + {1, "bato.tochigi.jp", 3, false}, + {1, "haga.tochigi.jp", 3, false}, + {1, "ichikai.tochigi.jp", 3, false}, + {1, "iwafune.tochigi.jp", 3, false}, + {1, "kaminokawa.tochigi.jp", 3, false}, + {1, "kanuma.tochigi.jp", 3, false}, + {1, "karasuyama.tochigi.jp", 3, false}, + {1, "kuroiso.tochigi.jp", 3, false}, + {1, "mashiko.tochigi.jp", 3, false}, + {1, "mibu.tochigi.jp", 3, false}, + {1, "moka.tochigi.jp", 3, false}, + {1, "motegi.tochigi.jp", 3, false}, + {1, "nasu.tochigi.jp", 3, false}, + {1, "nasushiobara.tochigi.jp", 3, false}, + {1, "nikko.tochigi.jp", 3, false}, + {1, "nishikata.tochigi.jp", 3, false}, + {1, "nogi.tochigi.jp", 3, false}, + {1, "ohira.tochigi.jp", 3, false}, + {1, "ohtawara.tochigi.jp", 3, false}, + {1, "oyama.tochigi.jp", 3, false}, + {1, "sakura.tochigi.jp", 3, false}, + {1, "sano.tochigi.jp", 3, false}, + {1, "shimotsuke.tochigi.jp", 3, false}, + {1, "shioya.tochigi.jp", 3, false}, + {1, "takanezawa.tochigi.jp", 3, false}, + {1, "tochigi.tochigi.jp", 3, false}, + {1, "tsuga.tochigi.jp", 3, false}, + {1, "ujiie.tochigi.jp", 3, false}, + {1, "utsunomiya.tochigi.jp", 3, false}, + {1, "yaita.tochigi.jp", 3, false}, + {1, "aizumi.tokushima.jp", 3, false}, + {1, "anan.tokushima.jp", 3, false}, + {1, "ichiba.tokushima.jp", 3, false}, + {1, "itano.tokushima.jp", 3, false}, + {1, "kainan.tokushima.jp", 3, false}, + {1, "komatsushima.tokushima.jp", 3, false}, + {1, "matsushige.tokushima.jp", 3, false}, + {1, "mima.tokushima.jp", 3, false}, + {1, "minami.tokushima.jp", 3, false}, + {1, "miyoshi.tokushima.jp", 3, false}, + {1, "mugi.tokushima.jp", 3, false}, + {1, "nakagawa.tokushima.jp", 3, false}, + {1, "naruto.tokushima.jp", 3, false}, + {1, "sanagochi.tokushima.jp", 3, false}, + {1, "shishikui.tokushima.jp", 3, false}, + {1, "tokushima.tokushima.jp", 3, false}, + {1, "wajiki.tokushima.jp", 3, false}, + {1, "adachi.tokyo.jp", 3, false}, + {1, "akiruno.tokyo.jp", 3, false}, + {1, "akishima.tokyo.jp", 3, false}, + {1, "aogashima.tokyo.jp", 3, false}, + {1, "arakawa.tokyo.jp", 3, false}, + {1, "bunkyo.tokyo.jp", 3, false}, + {1, "chiyoda.tokyo.jp", 3, false}, + {1, "chofu.tokyo.jp", 3, false}, + {1, "chuo.tokyo.jp", 3, false}, + {1, "edogawa.tokyo.jp", 3, false}, + {1, "fuchu.tokyo.jp", 3, false}, + {1, "fussa.tokyo.jp", 3, false}, + {1, "hachijo.tokyo.jp", 3, false}, + {1, "hachioji.tokyo.jp", 3, false}, + {1, "hamura.tokyo.jp", 3, false}, + {1, "higashikurume.tokyo.jp", 3, false}, + {1, "higashimurayama.tokyo.jp", 3, false}, + {1, "higashiyamato.tokyo.jp", 3, false}, + {1, "hino.tokyo.jp", 3, false}, + {1, "hinode.tokyo.jp", 3, false}, + {1, "hinohara.tokyo.jp", 3, false}, + {1, "inagi.tokyo.jp", 3, false}, + {1, "itabashi.tokyo.jp", 3, false}, + {1, "katsushika.tokyo.jp", 3, false}, + {1, "kita.tokyo.jp", 3, false}, + {1, "kiyose.tokyo.jp", 3, false}, + {1, "kodaira.tokyo.jp", 3, false}, + {1, "koganei.tokyo.jp", 3, false}, + {1, "kokubunji.tokyo.jp", 3, false}, + {1, "komae.tokyo.jp", 3, false}, + {1, "koto.tokyo.jp", 3, false}, + {1, "kouzushima.tokyo.jp", 3, false}, + {1, "kunitachi.tokyo.jp", 3, false}, + {1, "machida.tokyo.jp", 3, false}, + {1, "meguro.tokyo.jp", 3, false}, + {1, "minato.tokyo.jp", 3, false}, + {1, "mitaka.tokyo.jp", 3, false}, + {1, "mizuho.tokyo.jp", 3, false}, + {1, "musashimurayama.tokyo.jp", 3, false}, + {1, "musashino.tokyo.jp", 3, false}, + {1, "nakano.tokyo.jp", 3, false}, + {1, "nerima.tokyo.jp", 3, false}, + {1, "ogasawara.tokyo.jp", 3, false}, + {1, "okutama.tokyo.jp", 3, false}, + {1, "ome.tokyo.jp", 3, false}, + {1, "oshima.tokyo.jp", 3, false}, + {1, "ota.tokyo.jp", 3, false}, + {1, "setagaya.tokyo.jp", 3, false}, + {1, "shibuya.tokyo.jp", 3, false}, + {1, "shinagawa.tokyo.jp", 3, false}, + {1, "shinjuku.tokyo.jp", 3, false}, + {1, "suginami.tokyo.jp", 3, false}, + {1, "sumida.tokyo.jp", 3, false}, + {1, "tachikawa.tokyo.jp", 3, false}, + {1, "taito.tokyo.jp", 3, false}, + {1, "tama.tokyo.jp", 3, false}, + {1, "toshima.tokyo.jp", 3, false}, + {1, "chizu.tottori.jp", 3, false}, + {1, "hino.tottori.jp", 3, false}, + {1, "kawahara.tottori.jp", 3, false}, + {1, "koge.tottori.jp", 3, false}, + {1, "kotoura.tottori.jp", 3, false}, + {1, "misasa.tottori.jp", 3, false}, + {1, "nanbu.tottori.jp", 3, false}, + {1, "nichinan.tottori.jp", 3, false}, + {1, "sakaiminato.tottori.jp", 3, false}, + {1, "tottori.tottori.jp", 3, false}, + {1, "wakasa.tottori.jp", 3, false}, + {1, "yazu.tottori.jp", 3, false}, + {1, "yonago.tottori.jp", 3, false}, + {1, "asahi.toyama.jp", 3, false}, + {1, "fuchu.toyama.jp", 3, false}, + {1, "fukumitsu.toyama.jp", 3, false}, + {1, "funahashi.toyama.jp", 3, false}, + {1, "himi.toyama.jp", 3, false}, + {1, "imizu.toyama.jp", 3, false}, + {1, "inami.toyama.jp", 3, false}, + {1, "johana.toyama.jp", 3, false}, + {1, "kamiichi.toyama.jp", 3, false}, + {1, "kurobe.toyama.jp", 3, false}, + {1, "nakaniikawa.toyama.jp", 3, false}, + {1, "namerikawa.toyama.jp", 3, false}, + {1, "nanto.toyama.jp", 3, false}, + {1, "nyuzen.toyama.jp", 3, false}, + {1, "oyabe.toyama.jp", 3, false}, + {1, "taira.toyama.jp", 3, false}, + {1, "takaoka.toyama.jp", 3, false}, + {1, "tateyama.toyama.jp", 3, false}, + {1, "toga.toyama.jp", 3, false}, + {1, "tonami.toyama.jp", 3, false}, + {1, "toyama.toyama.jp", 3, false}, + {1, "unazuki.toyama.jp", 3, false}, + {1, "uozu.toyama.jp", 3, false}, + {1, "yamada.toyama.jp", 3, false}, + {1, "arida.wakayama.jp", 3, false}, + {1, "aridagawa.wakayama.jp", 3, false}, + {1, "gobo.wakayama.jp", 3, false}, + {1, "hashimoto.wakayama.jp", 3, false}, + {1, "hidaka.wakayama.jp", 3, false}, + {1, "hirogawa.wakayama.jp", 3, false}, + {1, "inami.wakayama.jp", 3, false}, + {1, "iwade.wakayama.jp", 3, false}, + {1, "kainan.wakayama.jp", 3, false}, + {1, "kamitonda.wakayama.jp", 3, false}, + {1, "katsuragi.wakayama.jp", 3, false}, + {1, "kimino.wakayama.jp", 3, false}, + {1, "kinokawa.wakayama.jp", 3, false}, + {1, "kitayama.wakayama.jp", 3, false}, + {1, "koya.wakayama.jp", 3, false}, + {1, "koza.wakayama.jp", 3, false}, + {1, "kozagawa.wakayama.jp", 3, false}, + {1, "kudoyama.wakayama.jp", 3, false}, + {1, "kushimoto.wakayama.jp", 3, false}, + {1, "mihama.wakayama.jp", 3, false}, + {1, "misato.wakayama.jp", 3, false}, + {1, "nachikatsuura.wakayama.jp", 3, false}, + {1, "shingu.wakayama.jp", 3, false}, + {1, "shirahama.wakayama.jp", 3, false}, + {1, "taiji.wakayama.jp", 3, false}, + {1, "tanabe.wakayama.jp", 3, false}, + {1, "wakayama.wakayama.jp", 3, false}, + {1, "yuasa.wakayama.jp", 3, false}, + {1, "yura.wakayama.jp", 3, false}, + {1, "asahi.yamagata.jp", 3, false}, + {1, "funagata.yamagata.jp", 3, false}, + {1, "higashine.yamagata.jp", 3, false}, + {1, "iide.yamagata.jp", 3, false}, + {1, "kahoku.yamagata.jp", 3, false}, + {1, "kaminoyama.yamagata.jp", 3, false}, + {1, "kaneyama.yamagata.jp", 3, false}, + {1, "kawanishi.yamagata.jp", 3, false}, + {1, "mamurogawa.yamagata.jp", 3, false}, + {1, "mikawa.yamagata.jp", 3, false}, + {1, "murayama.yamagata.jp", 3, false}, + {1, "nagai.yamagata.jp", 3, false}, + {1, "nakayama.yamagata.jp", 3, false}, + {1, "nanyo.yamagata.jp", 3, false}, + {1, "nishikawa.yamagata.jp", 3, false}, + {1, "obanazawa.yamagata.jp", 3, false}, + {1, "oe.yamagata.jp", 3, false}, + {1, "oguni.yamagata.jp", 3, false}, + {1, "ohkura.yamagata.jp", 3, false}, + {1, "oishida.yamagata.jp", 3, false}, + {1, "sagae.yamagata.jp", 3, false}, + {1, "sakata.yamagata.jp", 3, false}, + {1, "sakegawa.yamagata.jp", 3, false}, + {1, "shinjo.yamagata.jp", 3, false}, + {1, "shirataka.yamagata.jp", 3, false}, + {1, "shonai.yamagata.jp", 3, false}, + {1, "takahata.yamagata.jp", 3, false}, + {1, "tendo.yamagata.jp", 3, false}, + {1, "tozawa.yamagata.jp", 3, false}, + {1, "tsuruoka.yamagata.jp", 3, false}, + {1, "yamagata.yamagata.jp", 3, false}, + {1, "yamanobe.yamagata.jp", 3, false}, + {1, "yonezawa.yamagata.jp", 3, false}, + {1, "yuza.yamagata.jp", 3, false}, + {1, "abu.yamaguchi.jp", 3, false}, + {1, "hagi.yamaguchi.jp", 3, false}, + {1, "hikari.yamaguchi.jp", 3, false}, + {1, "hofu.yamaguchi.jp", 3, false}, + {1, "iwakuni.yamaguchi.jp", 3, false}, + {1, "kudamatsu.yamaguchi.jp", 3, false}, + {1, "mitou.yamaguchi.jp", 3, false}, + {1, "nagato.yamaguchi.jp", 3, false}, + {1, "oshima.yamaguchi.jp", 3, false}, + {1, "shimonoseki.yamaguchi.jp", 3, false}, + {1, "shunan.yamaguchi.jp", 3, false}, + {1, "tabuse.yamaguchi.jp", 3, false}, + {1, "tokuyama.yamaguchi.jp", 3, false}, + {1, "toyota.yamaguchi.jp", 3, false}, + {1, "ube.yamaguchi.jp", 3, false}, + {1, "yuu.yamaguchi.jp", 3, false}, + {1, "chuo.yamanashi.jp", 3, false}, + {1, "doshi.yamanashi.jp", 3, false}, + {1, "fuefuki.yamanashi.jp", 3, false}, + {1, "fujikawa.yamanashi.jp", 3, false}, + {1, "fujikawaguchiko.yamanashi.jp", 3, false}, + {1, "fujiyoshida.yamanashi.jp", 3, false}, + {1, "hayakawa.yamanashi.jp", 3, false}, + {1, "hokuto.yamanashi.jp", 3, false}, + {1, "ichikawamisato.yamanashi.jp", 3, false}, + {1, "kai.yamanashi.jp", 3, false}, + {1, "kofu.yamanashi.jp", 3, false}, + {1, "koshu.yamanashi.jp", 3, false}, + {1, "kosuge.yamanashi.jp", 3, false}, + {1, "minami-alps.yamanashi.jp", 3, false}, + {1, "minobu.yamanashi.jp", 3, false}, + {1, "nakamichi.yamanashi.jp", 3, false}, + {1, "nanbu.yamanashi.jp", 3, false}, + {1, "narusawa.yamanashi.jp", 3, false}, + {1, "nirasaki.yamanashi.jp", 3, false}, + {1, "nishikatsura.yamanashi.jp", 3, false}, + {1, "oshino.yamanashi.jp", 3, false}, + {1, "otsuki.yamanashi.jp", 3, false}, + {1, "showa.yamanashi.jp", 3, false}, + {1, "tabayama.yamanashi.jp", 3, false}, + {1, "tsuru.yamanashi.jp", 3, false}, + {1, "uenohara.yamanashi.jp", 3, false}, + {1, "yamanakako.yamanashi.jp", 3, false}, + {1, "yamanashi.yamanashi.jp", 3, false}, + {1, "ke", 1, false}, + {1, "ac.ke", 2, false}, + {1, "co.ke", 2, false}, + {1, "go.ke", 2, false}, + {1, "info.ke", 2, false}, + {1, "me.ke", 2, false}, + {1, "mobi.ke", 2, false}, + {1, "ne.ke", 2, false}, + {1, "or.ke", 2, false}, + {1, "sc.ke", 2, false}, + {1, "kg", 1, false}, + {1, "org.kg", 2, false}, + {1, "net.kg", 2, false}, + {1, "com.kg", 2, false}, + {1, "edu.kg", 2, false}, + {1, "gov.kg", 2, false}, + {1, "mil.kg", 2, false}, + {2, "kh", 2, false}, + {1, "ki", 1, false}, + {1, "edu.ki", 2, false}, + {1, "biz.ki", 2, false}, + {1, "net.ki", 2, false}, + {1, "org.ki", 2, false}, + {1, "gov.ki", 2, false}, + {1, "info.ki", 2, false}, + {1, "com.ki", 2, false}, + {1, "km", 1, false}, + {1, "org.km", 2, false}, + {1, "nom.km", 2, false}, + {1, "gov.km", 2, false}, + {1, "prd.km", 2, false}, + {1, "tm.km", 2, false}, + {1, "edu.km", 2, false}, + {1, "mil.km", 2, false}, + {1, "ass.km", 2, false}, + {1, "com.km", 2, false}, + {1, "coop.km", 2, false}, + {1, "asso.km", 2, false}, + {1, "presse.km", 2, false}, + {1, "medecin.km", 2, false}, + {1, "notaires.km", 2, false}, + {1, "pharmaciens.km", 2, false}, + {1, "veterinaire.km", 2, false}, + {1, "gouv.km", 2, false}, + {1, "kn", 1, false}, + {1, "net.kn", 2, false}, + {1, "org.kn", 2, false}, + {1, "edu.kn", 2, false}, + {1, "gov.kn", 2, false}, + {1, "kp", 1, false}, + {1, "com.kp", 2, false}, + {1, "edu.kp", 2, false}, + {1, "gov.kp", 2, false}, + {1, "org.kp", 2, false}, + {1, "rep.kp", 2, false}, + {1, "tra.kp", 2, false}, + {1, "kr", 1, false}, + {1, "ac.kr", 2, false}, + {1, "co.kr", 2, false}, + {1, "es.kr", 2, false}, + {1, "go.kr", 2, false}, + {1, "hs.kr", 2, false}, + {1, "kg.kr", 2, false}, + {1, "mil.kr", 2, false}, + {1, "ms.kr", 2, false}, + {1, "ne.kr", 2, false}, + {1, "or.kr", 2, false}, + {1, "pe.kr", 2, false}, + {1, "re.kr", 2, false}, + {1, "sc.kr", 2, false}, + {1, "busan.kr", 2, false}, + {1, "chungbuk.kr", 2, false}, + {1, "chungnam.kr", 2, false}, + {1, "daegu.kr", 2, false}, + {1, "daejeon.kr", 2, false}, + {1, "gangwon.kr", 2, false}, + {1, "gwangju.kr", 2, false}, + {1, "gyeongbuk.kr", 2, false}, + {1, "gyeonggi.kr", 2, false}, + {1, "gyeongnam.kr", 2, false}, + {1, "incheon.kr", 2, false}, + {1, "jeju.kr", 2, false}, + {1, "jeonbuk.kr", 2, false}, + {1, "jeonnam.kr", 2, false}, + {1, "seoul.kr", 2, false}, + {1, "ulsan.kr", 2, false}, + {1, "kw", 1, false}, + {1, "com.kw", 2, false}, + {1, "edu.kw", 2, false}, + {1, "emb.kw", 2, false}, + {1, "gov.kw", 2, false}, + {1, "ind.kw", 2, false}, + {1, "net.kw", 2, false}, + {1, "org.kw", 2, false}, + {1, "ky", 1, false}, + {1, "edu.ky", 2, false}, + {1, "gov.ky", 2, false}, + {1, "com.ky", 2, false}, + {1, "org.ky", 2, false}, + {1, "net.ky", 2, false}, + {1, "kz", 1, false}, + {1, "org.kz", 2, false}, + {1, "edu.kz", 2, false}, + {1, "net.kz", 2, false}, + {1, "gov.kz", 2, false}, + {1, "mil.kz", 2, false}, + {1, "com.kz", 2, false}, + {1, "la", 1, false}, + {1, "int.la", 2, false}, + {1, "net.la", 2, false}, + {1, "info.la", 2, false}, + {1, "edu.la", 2, false}, + {1, "gov.la", 2, false}, + {1, "per.la", 2, false}, + {1, "com.la", 2, false}, + {1, "org.la", 2, false}, + {1, "lb", 1, false}, + {1, "com.lb", 2, false}, + {1, "edu.lb", 2, false}, + {1, "gov.lb", 2, false}, + {1, "net.lb", 2, false}, + {1, "org.lb", 2, false}, + {1, "lc", 1, false}, + {1, "com.lc", 2, false}, + {1, "net.lc", 2, false}, + {1, "co.lc", 2, false}, + {1, "org.lc", 2, false}, + {1, "edu.lc", 2, false}, + {1, "gov.lc", 2, false}, + {1, "li", 1, false}, + {1, "lk", 1, false}, + {1, "gov.lk", 2, false}, + {1, "sch.lk", 2, false}, + {1, "net.lk", 2, false}, + {1, "int.lk", 2, false}, + {1, "com.lk", 2, false}, + {1, "org.lk", 2, false}, + {1, "edu.lk", 2, false}, + {1, "ngo.lk", 2, false}, + {1, "soc.lk", 2, false}, + {1, "web.lk", 2, false}, + {1, "ltd.lk", 2, false}, + {1, "assn.lk", 2, false}, + {1, "grp.lk", 2, false}, + {1, "hotel.lk", 2, false}, + {1, "ac.lk", 2, false}, + {1, "lr", 1, false}, + {1, "com.lr", 2, false}, + {1, "edu.lr", 2, false}, + {1, "gov.lr", 2, false}, + {1, "org.lr", 2, false}, + {1, "net.lr", 2, false}, + {1, "ls", 1, false}, + {1, "ac.ls", 2, false}, + {1, "biz.ls", 2, false}, + {1, "co.ls", 2, false}, + {1, "edu.ls", 2, false}, + {1, "gov.ls", 2, false}, + {1, "info.ls", 2, false}, + {1, "net.ls", 2, false}, + {1, "org.ls", 2, false}, + {1, "sc.ls", 2, false}, + {1, "lt", 1, false}, + {1, "gov.lt", 2, false}, + {1, "lu", 1, false}, + {1, "lv", 1, false}, + {1, "com.lv", 2, false}, + {1, "edu.lv", 2, false}, + {1, "gov.lv", 2, false}, + {1, "org.lv", 2, false}, + {1, "mil.lv", 2, false}, + {1, "id.lv", 2, false}, + {1, "net.lv", 2, false}, + {1, "asn.lv", 2, false}, + {1, "conf.lv", 2, false}, + {1, "ly", 1, false}, + {1, "com.ly", 2, false}, + {1, "net.ly", 2, false}, + {1, "gov.ly", 2, false}, + {1, "plc.ly", 2, false}, + {1, "edu.ly", 2, false}, + {1, "sch.ly", 2, false}, + {1, "med.ly", 2, false}, + {1, "org.ly", 2, false}, + {1, "id.ly", 2, false}, + {1, "ma", 1, false}, + {1, "co.ma", 2, false}, + {1, "net.ma", 2, false}, + {1, "gov.ma", 2, false}, + {1, "org.ma", 2, false}, + {1, "ac.ma", 2, false}, + {1, "press.ma", 2, false}, + {1, "mc", 1, false}, + {1, "tm.mc", 2, false}, + {1, "asso.mc", 2, false}, + {1, "md", 1, false}, + {1, "me", 1, false}, + {1, "co.me", 2, false}, + {1, "net.me", 2, false}, + {1, "org.me", 2, false}, + {1, "edu.me", 2, false}, + {1, "ac.me", 2, false}, + {1, "gov.me", 2, false}, + {1, "its.me", 2, false}, + {1, "priv.me", 2, false}, + {1, "mg", 1, false}, + {1, "org.mg", 2, false}, + {1, "nom.mg", 2, false}, + {1, "gov.mg", 2, false}, + {1, "prd.mg", 2, false}, + {1, "tm.mg", 2, false}, + {1, "edu.mg", 2, false}, + {1, "mil.mg", 2, false}, + {1, "com.mg", 2, false}, + {1, "co.mg", 2, false}, + {1, "mh", 1, false}, + {1, "mil", 1, false}, + {1, "mk", 1, false}, + {1, "com.mk", 2, false}, + {1, "org.mk", 2, false}, + {1, "net.mk", 2, false}, + {1, "edu.mk", 2, false}, + {1, "gov.mk", 2, false}, + {1, "inf.mk", 2, false}, + {1, "name.mk", 2, false}, + {1, "ml", 1, false}, + {1, "com.ml", 2, false}, + {1, "edu.ml", 2, false}, + {1, "gouv.ml", 2, false}, + {1, "gov.ml", 2, false}, + {1, "net.ml", 2, false}, + {1, "org.ml", 2, false}, + {1, "presse.ml", 2, false}, + {2, "mm", 2, false}, + {1, "mn", 1, false}, + {1, "gov.mn", 2, false}, + {1, "edu.mn", 2, false}, + {1, "org.mn", 2, false}, + {1, "mo", 1, false}, + {1, "com.mo", 2, false}, + {1, "net.mo", 2, false}, + {1, "org.mo", 2, false}, + {1, "edu.mo", 2, false}, + {1, "gov.mo", 2, false}, + {1, "mobi", 1, false}, + {1, "mp", 1, false}, + {1, "mq", 1, false}, + {1, "mr", 1, false}, + {1, "gov.mr", 2, false}, + {1, "ms", 1, false}, + {1, "com.ms", 2, false}, + {1, "edu.ms", 2, false}, + {1, "gov.ms", 2, false}, + {1, "net.ms", 2, false}, + {1, "org.ms", 2, false}, + {1, "mt", 1, false}, + {1, "com.mt", 2, false}, + {1, "edu.mt", 2, false}, + {1, "net.mt", 2, false}, + {1, "org.mt", 2, false}, + {1, "mu", 1, false}, + {1, "com.mu", 2, false}, + {1, "net.mu", 2, false}, + {1, "org.mu", 2, false}, + {1, "gov.mu", 2, false}, + {1, "ac.mu", 2, false}, + {1, "co.mu", 2, false}, + {1, "or.mu", 2, false}, + {1, "museum", 1, false}, + {1, "academy.museum", 2, false}, + {1, "agriculture.museum", 2, false}, + {1, "air.museum", 2, false}, + {1, "airguard.museum", 2, false}, + {1, "alabama.museum", 2, false}, + {1, "alaska.museum", 2, false}, + {1, "amber.museum", 2, false}, + {1, "ambulance.museum", 2, false}, + {1, "american.museum", 2, false}, + {1, "americana.museum", 2, false}, + {1, "americanantiques.museum", 2, false}, + {1, "americanart.museum", 2, false}, + {1, "amsterdam.museum", 2, false}, + {1, "and.museum", 2, false}, + {1, "annefrank.museum", 2, false}, + {1, "anthro.museum", 2, false}, + {1, "anthropology.museum", 2, false}, + {1, "antiques.museum", 2, false}, + {1, "aquarium.museum", 2, false}, + {1, "arboretum.museum", 2, false}, + {1, "archaeological.museum", 2, false}, + {1, "archaeology.museum", 2, false}, + {1, "architecture.museum", 2, false}, + {1, "art.museum", 2, false}, + {1, "artanddesign.museum", 2, false}, + {1, "artcenter.museum", 2, false}, + {1, "artdeco.museum", 2, false}, + {1, "arteducation.museum", 2, false}, + {1, "artgallery.museum", 2, false}, + {1, "arts.museum", 2, false}, + {1, "artsandcrafts.museum", 2, false}, + {1, "asmatart.museum", 2, false}, + {1, "assassination.museum", 2, false}, + {1, "assisi.museum", 2, false}, + {1, "association.museum", 2, false}, + {1, "astronomy.museum", 2, false}, + {1, "atlanta.museum", 2, false}, + {1, "austin.museum", 2, false}, + {1, "australia.museum", 2, false}, + {1, "automotive.museum", 2, false}, + {1, "aviation.museum", 2, false}, + {1, "axis.museum", 2, false}, + {1, "badajoz.museum", 2, false}, + {1, "baghdad.museum", 2, false}, + {1, "bahn.museum", 2, false}, + {1, "bale.museum", 2, false}, + {1, "baltimore.museum", 2, false}, + {1, "barcelona.museum", 2, false}, + {1, "baseball.museum", 2, false}, + {1, "basel.museum", 2, false}, + {1, "baths.museum", 2, false}, + {1, "bauern.museum", 2, false}, + {1, "beauxarts.museum", 2, false}, + {1, "beeldengeluid.museum", 2, false}, + {1, "bellevue.museum", 2, false}, + {1, "bergbau.museum", 2, false}, + {1, "berkeley.museum", 2, false}, + {1, "berlin.museum", 2, false}, + {1, "bern.museum", 2, false}, + {1, "bible.museum", 2, false}, + {1, "bilbao.museum", 2, false}, + {1, "bill.museum", 2, false}, + {1, "birdart.museum", 2, false}, + {1, "birthplace.museum", 2, false}, + {1, "bonn.museum", 2, false}, + {1, "boston.museum", 2, false}, + {1, "botanical.museum", 2, false}, + {1, "botanicalgarden.museum", 2, false}, + {1, "botanicgarden.museum", 2, false}, + {1, "botany.museum", 2, false}, + {1, "brandywinevalley.museum", 2, false}, + {1, "brasil.museum", 2, false}, + {1, "bristol.museum", 2, false}, + {1, "british.museum", 2, false}, + {1, "britishcolumbia.museum", 2, false}, + {1, "broadcast.museum", 2, false}, + {1, "brunel.museum", 2, false}, + {1, "brussel.museum", 2, false}, + {1, "brussels.museum", 2, false}, + {1, "bruxelles.museum", 2, false}, + {1, "building.museum", 2, false}, + {1, "burghof.museum", 2, false}, + {1, "bus.museum", 2, false}, + {1, "bushey.museum", 2, false}, + {1, "cadaques.museum", 2, false}, + {1, "california.museum", 2, false}, + {1, "cambridge.museum", 2, false}, + {1, "can.museum", 2, false}, + {1, "canada.museum", 2, false}, + {1, "capebreton.museum", 2, false}, + {1, "carrier.museum", 2, false}, + {1, "cartoonart.museum", 2, false}, + {1, "casadelamoneda.museum", 2, false}, + {1, "castle.museum", 2, false}, + {1, "castres.museum", 2, false}, + {1, "celtic.museum", 2, false}, + {1, "center.museum", 2, false}, + {1, "chattanooga.museum", 2, false}, + {1, "cheltenham.museum", 2, false}, + {1, "chesapeakebay.museum", 2, false}, + {1, "chicago.museum", 2, false}, + {1, "children.museum", 2, false}, + {1, "childrens.museum", 2, false}, + {1, "childrensgarden.museum", 2, false}, + {1, "chiropractic.museum", 2, false}, + {1, "chocolate.museum", 2, false}, + {1, "christiansburg.museum", 2, false}, + {1, "cincinnati.museum", 2, false}, + {1, "cinema.museum", 2, false}, + {1, "circus.museum", 2, false}, + {1, "civilisation.museum", 2, false}, + {1, "civilization.museum", 2, false}, + {1, "civilwar.museum", 2, false}, + {1, "clinton.museum", 2, false}, + {1, "clock.museum", 2, false}, + {1, "coal.museum", 2, false}, + {1, "coastaldefence.museum", 2, false}, + {1, "cody.museum", 2, false}, + {1, "coldwar.museum", 2, false}, + {1, "collection.museum", 2, false}, + {1, "colonialwilliamsburg.museum", 2, false}, + {1, "coloradoplateau.museum", 2, false}, + {1, "columbia.museum", 2, false}, + {1, "columbus.museum", 2, false}, + {1, "communication.museum", 2, false}, + {1, "communications.museum", 2, false}, + {1, "community.museum", 2, false}, + {1, "computer.museum", 2, false}, + {1, "computerhistory.museum", 2, false}, + {1, "xn--comunicaes-v6a2o.museum", 2, false}, + {1, "contemporary.museum", 2, false}, + {1, "contemporaryart.museum", 2, false}, + {1, "convent.museum", 2, false}, + {1, "copenhagen.museum", 2, false}, + {1, "corporation.museum", 2, false}, + {1, "xn--correios-e-telecomunicaes-ghc29a.museum", 2, false}, + {1, "corvette.museum", 2, false}, + {1, "costume.museum", 2, false}, + {1, "countryestate.museum", 2, false}, + {1, "county.museum", 2, false}, + {1, "crafts.museum", 2, false}, + {1, "cranbrook.museum", 2, false}, + {1, "creation.museum", 2, false}, + {1, "cultural.museum", 2, false}, + {1, "culturalcenter.museum", 2, false}, + {1, "culture.museum", 2, false}, + {1, "cyber.museum", 2, false}, + {1, "cymru.museum", 2, false}, + {1, "dali.museum", 2, false}, + {1, "dallas.museum", 2, false}, + {1, "database.museum", 2, false}, + {1, "ddr.museum", 2, false}, + {1, "decorativearts.museum", 2, false}, + {1, "delaware.museum", 2, false}, + {1, "delmenhorst.museum", 2, false}, + {1, "denmark.museum", 2, false}, + {1, "depot.museum", 2, false}, + {1, "design.museum", 2, false}, + {1, "detroit.museum", 2, false}, + {1, "dinosaur.museum", 2, false}, + {1, "discovery.museum", 2, false}, + {1, "dolls.museum", 2, false}, + {1, "donostia.museum", 2, false}, + {1, "durham.museum", 2, false}, + {1, "eastafrica.museum", 2, false}, + {1, "eastcoast.museum", 2, false}, + {1, "education.museum", 2, false}, + {1, "educational.museum", 2, false}, + {1, "egyptian.museum", 2, false}, + {1, "eisenbahn.museum", 2, false}, + {1, "elburg.museum", 2, false}, + {1, "elvendrell.museum", 2, false}, + {1, "embroidery.museum", 2, false}, + {1, "encyclopedic.museum", 2, false}, + {1, "england.museum", 2, false}, + {1, "entomology.museum", 2, false}, + {1, "environment.museum", 2, false}, + {1, "environmentalconservation.museum", 2, false}, + {1, "epilepsy.museum", 2, false}, + {1, "essex.museum", 2, false}, + {1, "estate.museum", 2, false}, + {1, "ethnology.museum", 2, false}, + {1, "exeter.museum", 2, false}, + {1, "exhibition.museum", 2, false}, + {1, "family.museum", 2, false}, + {1, "farm.museum", 2, false}, + {1, "farmequipment.museum", 2, false}, + {1, "farmers.museum", 2, false}, + {1, "farmstead.museum", 2, false}, + {1, "field.museum", 2, false}, + {1, "figueres.museum", 2, false}, + {1, "filatelia.museum", 2, false}, + {1, "film.museum", 2, false}, + {1, "fineart.museum", 2, false}, + {1, "finearts.museum", 2, false}, + {1, "finland.museum", 2, false}, + {1, "flanders.museum", 2, false}, + {1, "florida.museum", 2, false}, + {1, "force.museum", 2, false}, + {1, "fortmissoula.museum", 2, false}, + {1, "fortworth.museum", 2, false}, + {1, "foundation.museum", 2, false}, + {1, "francaise.museum", 2, false}, + {1, "frankfurt.museum", 2, false}, + {1, "franziskaner.museum", 2, false}, + {1, "freemasonry.museum", 2, false}, + {1, "freiburg.museum", 2, false}, + {1, "fribourg.museum", 2, false}, + {1, "frog.museum", 2, false}, + {1, "fundacio.museum", 2, false}, + {1, "furniture.museum", 2, false}, + {1, "gallery.museum", 2, false}, + {1, "garden.museum", 2, false}, + {1, "gateway.museum", 2, false}, + {1, "geelvinck.museum", 2, false}, + {1, "gemological.museum", 2, false}, + {1, "geology.museum", 2, false}, + {1, "georgia.museum", 2, false}, + {1, "giessen.museum", 2, false}, + {1, "glas.museum", 2, false}, + {1, "glass.museum", 2, false}, + {1, "gorge.museum", 2, false}, + {1, "grandrapids.museum", 2, false}, + {1, "graz.museum", 2, false}, + {1, "guernsey.museum", 2, false}, + {1, "halloffame.museum", 2, false}, + {1, "hamburg.museum", 2, false}, + {1, "handson.museum", 2, false}, + {1, "harvestcelebration.museum", 2, false}, + {1, "hawaii.museum", 2, false}, + {1, "health.museum", 2, false}, + {1, "heimatunduhren.museum", 2, false}, + {1, "hellas.museum", 2, false}, + {1, "helsinki.museum", 2, false}, + {1, "hembygdsforbund.museum", 2, false}, + {1, "heritage.museum", 2, false}, + {1, "histoire.museum", 2, false}, + {1, "historical.museum", 2, false}, + {1, "historicalsociety.museum", 2, false}, + {1, "historichouses.museum", 2, false}, + {1, "historisch.museum", 2, false}, + {1, "historisches.museum", 2, false}, + {1, "history.museum", 2, false}, + {1, "historyofscience.museum", 2, false}, + {1, "horology.museum", 2, false}, + {1, "house.museum", 2, false}, + {1, "humanities.museum", 2, false}, + {1, "illustration.museum", 2, false}, + {1, "imageandsound.museum", 2, false}, + {1, "indian.museum", 2, false}, + {1, "indiana.museum", 2, false}, + {1, "indianapolis.museum", 2, false}, + {1, "indianmarket.museum", 2, false}, + {1, "intelligence.museum", 2, false}, + {1, "interactive.museum", 2, false}, + {1, "iraq.museum", 2, false}, + {1, "iron.museum", 2, false}, + {1, "isleofman.museum", 2, false}, + {1, "jamison.museum", 2, false}, + {1, "jefferson.museum", 2, false}, + {1, "jerusalem.museum", 2, false}, + {1, "jewelry.museum", 2, false}, + {1, "jewish.museum", 2, false}, + {1, "jewishart.museum", 2, false}, + {1, "jfk.museum", 2, false}, + {1, "journalism.museum", 2, false}, + {1, "judaica.museum", 2, false}, + {1, "judygarland.museum", 2, false}, + {1, "juedisches.museum", 2, false}, + {1, "juif.museum", 2, false}, + {1, "karate.museum", 2, false}, + {1, "karikatur.museum", 2, false}, + {1, "kids.museum", 2, false}, + {1, "koebenhavn.museum", 2, false}, + {1, "koeln.museum", 2, false}, + {1, "kunst.museum", 2, false}, + {1, "kunstsammlung.museum", 2, false}, + {1, "kunstunddesign.museum", 2, false}, + {1, "labor.museum", 2, false}, + {1, "labour.museum", 2, false}, + {1, "lajolla.museum", 2, false}, + {1, "lancashire.museum", 2, false}, + {1, "landes.museum", 2, false}, + {1, "lans.museum", 2, false}, + {1, "xn--lns-qla.museum", 2, false}, + {1, "larsson.museum", 2, false}, + {1, "lewismiller.museum", 2, false}, + {1, "lincoln.museum", 2, false}, + {1, "linz.museum", 2, false}, + {1, "living.museum", 2, false}, + {1, "livinghistory.museum", 2, false}, + {1, "localhistory.museum", 2, false}, + {1, "london.museum", 2, false}, + {1, "losangeles.museum", 2, false}, + {1, "louvre.museum", 2, false}, + {1, "loyalist.museum", 2, false}, + {1, "lucerne.museum", 2, false}, + {1, "luxembourg.museum", 2, false}, + {1, "luzern.museum", 2, false}, + {1, "mad.museum", 2, false}, + {1, "madrid.museum", 2, false}, + {1, "mallorca.museum", 2, false}, + {1, "manchester.museum", 2, false}, + {1, "mansion.museum", 2, false}, + {1, "mansions.museum", 2, false}, + {1, "manx.museum", 2, false}, + {1, "marburg.museum", 2, false}, + {1, "maritime.museum", 2, false}, + {1, "maritimo.museum", 2, false}, + {1, "maryland.museum", 2, false}, + {1, "marylhurst.museum", 2, false}, + {1, "media.museum", 2, false}, + {1, "medical.museum", 2, false}, + {1, "medizinhistorisches.museum", 2, false}, + {1, "meeres.museum", 2, false}, + {1, "memorial.museum", 2, false}, + {1, "mesaverde.museum", 2, false}, + {1, "michigan.museum", 2, false}, + {1, "midatlantic.museum", 2, false}, + {1, "military.museum", 2, false}, + {1, "mill.museum", 2, false}, + {1, "miners.museum", 2, false}, + {1, "mining.museum", 2, false}, + {1, "minnesota.museum", 2, false}, + {1, "missile.museum", 2, false}, + {1, "missoula.museum", 2, false}, + {1, "modern.museum", 2, false}, + {1, "moma.museum", 2, false}, + {1, "money.museum", 2, false}, + {1, "monmouth.museum", 2, false}, + {1, "monticello.museum", 2, false}, + {1, "montreal.museum", 2, false}, + {1, "moscow.museum", 2, false}, + {1, "motorcycle.museum", 2, false}, + {1, "muenchen.museum", 2, false}, + {1, "muenster.museum", 2, false}, + {1, "mulhouse.museum", 2, false}, + {1, "muncie.museum", 2, false}, + {1, "museet.museum", 2, false}, + {1, "museumcenter.museum", 2, false}, + {1, "museumvereniging.museum", 2, false}, + {1, "music.museum", 2, false}, + {1, "national.museum", 2, false}, + {1, "nationalfirearms.museum", 2, false}, + {1, "nationalheritage.museum", 2, false}, + {1, "nativeamerican.museum", 2, false}, + {1, "naturalhistory.museum", 2, false}, + {1, "naturalhistorymuseum.museum", 2, false}, + {1, "naturalsciences.museum", 2, false}, + {1, "nature.museum", 2, false}, + {1, "naturhistorisches.museum", 2, false}, + {1, "natuurwetenschappen.museum", 2, false}, + {1, "naumburg.museum", 2, false}, + {1, "naval.museum", 2, false}, + {1, "nebraska.museum", 2, false}, + {1, "neues.museum", 2, false}, + {1, "newhampshire.museum", 2, false}, + {1, "newjersey.museum", 2, false}, + {1, "newmexico.museum", 2, false}, + {1, "newport.museum", 2, false}, + {1, "newspaper.museum", 2, false}, + {1, "newyork.museum", 2, false}, + {1, "niepce.museum", 2, false}, + {1, "norfolk.museum", 2, false}, + {1, "north.museum", 2, false}, + {1, "nrw.museum", 2, false}, + {1, "nyc.museum", 2, false}, + {1, "nyny.museum", 2, false}, + {1, "oceanographic.museum", 2, false}, + {1, "oceanographique.museum", 2, false}, + {1, "omaha.museum", 2, false}, + {1, "online.museum", 2, false}, + {1, "ontario.museum", 2, false}, + {1, "openair.museum", 2, false}, + {1, "oregon.museum", 2, false}, + {1, "oregontrail.museum", 2, false}, + {1, "otago.museum", 2, false}, + {1, "oxford.museum", 2, false}, + {1, "pacific.museum", 2, false}, + {1, "paderborn.museum", 2, false}, + {1, "palace.museum", 2, false}, + {1, "paleo.museum", 2, false}, + {1, "palmsprings.museum", 2, false}, + {1, "panama.museum", 2, false}, + {1, "paris.museum", 2, false}, + {1, "pasadena.museum", 2, false}, + {1, "pharmacy.museum", 2, false}, + {1, "philadelphia.museum", 2, false}, + {1, "philadelphiaarea.museum", 2, false}, + {1, "philately.museum", 2, false}, + {1, "phoenix.museum", 2, false}, + {1, "photography.museum", 2, false}, + {1, "pilots.museum", 2, false}, + {1, "pittsburgh.museum", 2, false}, + {1, "planetarium.museum", 2, false}, + {1, "plantation.museum", 2, false}, + {1, "plants.museum", 2, false}, + {1, "plaza.museum", 2, false}, + {1, "portal.museum", 2, false}, + {1, "portland.museum", 2, false}, + {1, "portlligat.museum", 2, false}, + {1, "posts-and-telecommunications.museum", 2, false}, + {1, "preservation.museum", 2, false}, + {1, "presidio.museum", 2, false}, + {1, "press.museum", 2, false}, + {1, "project.museum", 2, false}, + {1, "public.museum", 2, false}, + {1, "pubol.museum", 2, false}, + {1, "quebec.museum", 2, false}, + {1, "railroad.museum", 2, false}, + {1, "railway.museum", 2, false}, + {1, "research.museum", 2, false}, + {1, "resistance.museum", 2, false}, + {1, "riodejaneiro.museum", 2, false}, + {1, "rochester.museum", 2, false}, + {1, "rockart.museum", 2, false}, + {1, "roma.museum", 2, false}, + {1, "russia.museum", 2, false}, + {1, "saintlouis.museum", 2, false}, + {1, "salem.museum", 2, false}, + {1, "salvadordali.museum", 2, false}, + {1, "salzburg.museum", 2, false}, + {1, "sandiego.museum", 2, false}, + {1, "sanfrancisco.museum", 2, false}, + {1, "santabarbara.museum", 2, false}, + {1, "santacruz.museum", 2, false}, + {1, "santafe.museum", 2, false}, + {1, "saskatchewan.museum", 2, false}, + {1, "satx.museum", 2, false}, + {1, "savannahga.museum", 2, false}, + {1, "schlesisches.museum", 2, false}, + {1, "schoenbrunn.museum", 2, false}, + {1, "schokoladen.museum", 2, false}, + {1, "school.museum", 2, false}, + {1, "schweiz.museum", 2, false}, + {1, "science.museum", 2, false}, + {1, "scienceandhistory.museum", 2, false}, + {1, "scienceandindustry.museum", 2, false}, + {1, "sciencecenter.museum", 2, false}, + {1, "sciencecenters.museum", 2, false}, + {1, "science-fiction.museum", 2, false}, + {1, "sciencehistory.museum", 2, false}, + {1, "sciences.museum", 2, false}, + {1, "sciencesnaturelles.museum", 2, false}, + {1, "scotland.museum", 2, false}, + {1, "seaport.museum", 2, false}, + {1, "settlement.museum", 2, false}, + {1, "settlers.museum", 2, false}, + {1, "shell.museum", 2, false}, + {1, "sherbrooke.museum", 2, false}, + {1, "sibenik.museum", 2, false}, + {1, "silk.museum", 2, false}, + {1, "ski.museum", 2, false}, + {1, "skole.museum", 2, false}, + {1, "society.museum", 2, false}, + {1, "sologne.museum", 2, false}, + {1, "soundandvision.museum", 2, false}, + {1, "southcarolina.museum", 2, false}, + {1, "southwest.museum", 2, false}, + {1, "space.museum", 2, false}, + {1, "spy.museum", 2, false}, + {1, "square.museum", 2, false}, + {1, "stadt.museum", 2, false}, + {1, "stalbans.museum", 2, false}, + {1, "starnberg.museum", 2, false}, + {1, "state.museum", 2, false}, + {1, "stateofdelaware.museum", 2, false}, + {1, "station.museum", 2, false}, + {1, "steam.museum", 2, false}, + {1, "steiermark.museum", 2, false}, + {1, "stjohn.museum", 2, false}, + {1, "stockholm.museum", 2, false}, + {1, "stpetersburg.museum", 2, false}, + {1, "stuttgart.museum", 2, false}, + {1, "suisse.museum", 2, false}, + {1, "surgeonshall.museum", 2, false}, + {1, "surrey.museum", 2, false}, + {1, "svizzera.museum", 2, false}, + {1, "sweden.museum", 2, false}, + {1, "sydney.museum", 2, false}, + {1, "tank.museum", 2, false}, + {1, "tcm.museum", 2, false}, + {1, "technology.museum", 2, false}, + {1, "telekommunikation.museum", 2, false}, + {1, "television.museum", 2, false}, + {1, "texas.museum", 2, false}, + {1, "textile.museum", 2, false}, + {1, "theater.museum", 2, false}, + {1, "time.museum", 2, false}, + {1, "timekeeping.museum", 2, false}, + {1, "topology.museum", 2, false}, + {1, "torino.museum", 2, false}, + {1, "touch.museum", 2, false}, + {1, "town.museum", 2, false}, + {1, "transport.museum", 2, false}, + {1, "tree.museum", 2, false}, + {1, "trolley.museum", 2, false}, + {1, "trust.museum", 2, false}, + {1, "trustee.museum", 2, false}, + {1, "uhren.museum", 2, false}, + {1, "ulm.museum", 2, false}, + {1, "undersea.museum", 2, false}, + {1, "university.museum", 2, false}, + {1, "usa.museum", 2, false}, + {1, "usantiques.museum", 2, false}, + {1, "usarts.museum", 2, false}, + {1, "uscountryestate.museum", 2, false}, + {1, "usculture.museum", 2, false}, + {1, "usdecorativearts.museum", 2, false}, + {1, "usgarden.museum", 2, false}, + {1, "ushistory.museum", 2, false}, + {1, "ushuaia.museum", 2, false}, + {1, "uslivinghistory.museum", 2, false}, + {1, "utah.museum", 2, false}, + {1, "uvic.museum", 2, false}, + {1, "valley.museum", 2, false}, + {1, "vantaa.museum", 2, false}, + {1, "versailles.museum", 2, false}, + {1, "viking.museum", 2, false}, + {1, "village.museum", 2, false}, + {1, "virginia.museum", 2, false}, + {1, "virtual.museum", 2, false}, + {1, "virtuel.museum", 2, false}, + {1, "vlaanderen.museum", 2, false}, + {1, "volkenkunde.museum", 2, false}, + {1, "wales.museum", 2, false}, + {1, "wallonie.museum", 2, false}, + {1, "war.museum", 2, false}, + {1, "washingtondc.museum", 2, false}, + {1, "watchandclock.museum", 2, false}, + {1, "watch-and-clock.museum", 2, false}, + {1, "western.museum", 2, false}, + {1, "westfalen.museum", 2, false}, + {1, "whaling.museum", 2, false}, + {1, "wildlife.museum", 2, false}, + {1, "williamsburg.museum", 2, false}, + {1, "windmill.museum", 2, false}, + {1, "workshop.museum", 2, false}, + {1, "york.museum", 2, false}, + {1, "yorkshire.museum", 2, false}, + {1, "yosemite.museum", 2, false}, + {1, "youth.museum", 2, false}, + {1, "zoological.museum", 2, false}, + {1, "zoology.museum", 2, false}, + {1, "xn--9dbhblg6di.museum", 2, false}, + {1, "xn--h1aegh.museum", 2, false}, + {1, "mv", 1, false}, + {1, "aero.mv", 2, false}, + {1, "biz.mv", 2, false}, + {1, "com.mv", 2, false}, + {1, "coop.mv", 2, false}, + {1, "edu.mv", 2, false}, + {1, "gov.mv", 2, false}, + {1, "info.mv", 2, false}, + {1, "int.mv", 2, false}, + {1, "mil.mv", 2, false}, + {1, "museum.mv", 2, false}, + {1, "name.mv", 2, false}, + {1, "net.mv", 2, false}, + {1, "org.mv", 2, false}, + {1, "pro.mv", 2, false}, + {1, "mw", 1, false}, + {1, "ac.mw", 2, false}, + {1, "biz.mw", 2, false}, + {1, "co.mw", 2, false}, + {1, "com.mw", 2, false}, + {1, "coop.mw", 2, false}, + {1, "edu.mw", 2, false}, + {1, "gov.mw", 2, false}, + {1, "int.mw", 2, false}, + {1, "museum.mw", 2, false}, + {1, "net.mw", 2, false}, + {1, "org.mw", 2, false}, + {1, "mx", 1, false}, + {1, "com.mx", 2, false}, + {1, "org.mx", 2, false}, + {1, "gob.mx", 2, false}, + {1, "edu.mx", 2, false}, + {1, "net.mx", 2, false}, + {1, "my", 1, false}, + {1, "com.my", 2, false}, + {1, "net.my", 2, false}, + {1, "org.my", 2, false}, + {1, "gov.my", 2, false}, + {1, "edu.my", 2, false}, + {1, "mil.my", 2, false}, + {1, "name.my", 2, false}, + {1, "mz", 1, false}, + {1, "ac.mz", 2, false}, + {1, "adv.mz", 2, false}, + {1, "co.mz", 2, false}, + {1, "edu.mz", 2, false}, + {1, "gov.mz", 2, false}, + {1, "mil.mz", 2, false}, + {1, "net.mz", 2, false}, + {1, "org.mz", 2, false}, + {1, "na", 1, false}, + {1, "info.na", 2, false}, + {1, "pro.na", 2, false}, + {1, "name.na", 2, false}, + {1, "school.na", 2, false}, + {1, "or.na", 2, false}, + {1, "dr.na", 2, false}, + {1, "us.na", 2, false}, + {1, "mx.na", 2, false}, + {1, "ca.na", 2, false}, + {1, "in.na", 2, false}, + {1, "cc.na", 2, false}, + {1, "tv.na", 2, false}, + {1, "ws.na", 2, false}, + {1, "mobi.na", 2, false}, + {1, "co.na", 2, false}, + {1, "com.na", 2, false}, + {1, "org.na", 2, false}, + {1, "name", 1, false}, + {1, "nc", 1, false}, + {1, "asso.nc", 2, false}, + {1, "nom.nc", 2, false}, + {1, "ne", 1, false}, + {1, "net", 1, false}, + {1, "nf", 1, false}, + {1, "com.nf", 2, false}, + {1, "net.nf", 2, false}, + {1, "per.nf", 2, false}, + {1, "rec.nf", 2, false}, + {1, "web.nf", 2, false}, + {1, "arts.nf", 2, false}, + {1, "firm.nf", 2, false}, + {1, "info.nf", 2, false}, + {1, "other.nf", 2, false}, + {1, "store.nf", 2, false}, + {1, "ng", 1, false}, + {1, "com.ng", 2, false}, + {1, "edu.ng", 2, false}, + {1, "gov.ng", 2, false}, + {1, "i.ng", 2, false}, + {1, "mil.ng", 2, false}, + {1, "mobi.ng", 2, false}, + {1, "name.ng", 2, false}, + {1, "net.ng", 2, false}, + {1, "org.ng", 2, false}, + {1, "sch.ng", 2, false}, + {1, "ni", 1, false}, + {1, "ac.ni", 2, false}, + {1, "biz.ni", 2, false}, + {1, "co.ni", 2, false}, + {1, "com.ni", 2, false}, + {1, "edu.ni", 2, false}, + {1, "gob.ni", 2, false}, + {1, "in.ni", 2, false}, + {1, "info.ni", 2, false}, + {1, "int.ni", 2, false}, + {1, "mil.ni", 2, false}, + {1, "net.ni", 2, false}, + {1, "nom.ni", 2, false}, + {1, "org.ni", 2, false}, + {1, "web.ni", 2, false}, + {1, "nl", 1, false}, + {1, "no", 1, false}, + {1, "fhs.no", 2, false}, + {1, "vgs.no", 2, false}, + {1, "fylkesbibl.no", 2, false}, + {1, "folkebibl.no", 2, false}, + {1, "museum.no", 2, false}, + {1, "idrett.no", 2, false}, + {1, "priv.no", 2, false}, + {1, "mil.no", 2, false}, + {1, "stat.no", 2, false}, + {1, "dep.no", 2, false}, + {1, "kommune.no", 2, false}, + {1, "herad.no", 2, false}, + {1, "aa.no", 2, false}, + {1, "ah.no", 2, false}, + {1, "bu.no", 2, false}, + {1, "fm.no", 2, false}, + {1, "hl.no", 2, false}, + {1, "hm.no", 2, false}, + {1, "jan-mayen.no", 2, false}, + {1, "mr.no", 2, false}, + {1, "nl.no", 2, false}, + {1, "nt.no", 2, false}, + {1, "of.no", 2, false}, + {1, "ol.no", 2, false}, + {1, "oslo.no", 2, false}, + {1, "rl.no", 2, false}, + {1, "sf.no", 2, false}, + {1, "st.no", 2, false}, + {1, "svalbard.no", 2, false}, + {1, "tm.no", 2, false}, + {1, "tr.no", 2, false}, + {1, "va.no", 2, false}, + {1, "vf.no", 2, false}, + {1, "gs.aa.no", 3, false}, + {1, "gs.ah.no", 3, false}, + {1, "gs.bu.no", 3, false}, + {1, "gs.fm.no", 3, false}, + {1, "gs.hl.no", 3, false}, + {1, "gs.hm.no", 3, false}, + {1, "gs.jan-mayen.no", 3, false}, + {1, "gs.mr.no", 3, false}, + {1, "gs.nl.no", 3, false}, + {1, "gs.nt.no", 3, false}, + {1, "gs.of.no", 3, false}, + {1, "gs.ol.no", 3, false}, + {1, "gs.oslo.no", 3, false}, + {1, "gs.rl.no", 3, false}, + {1, "gs.sf.no", 3, false}, + {1, "gs.st.no", 3, false}, + {1, "gs.svalbard.no", 3, false}, + {1, "gs.tm.no", 3, false}, + {1, "gs.tr.no", 3, false}, + {1, "gs.va.no", 3, false}, + {1, "gs.vf.no", 3, false}, + {1, "akrehamn.no", 2, false}, + {1, "xn--krehamn-dxa.no", 2, false}, + {1, "algard.no", 2, false}, + {1, "xn--lgrd-poac.no", 2, false}, + {1, "arna.no", 2, false}, + {1, "brumunddal.no", 2, false}, + {1, "bryne.no", 2, false}, + {1, "bronnoysund.no", 2, false}, + {1, "xn--brnnysund-m8ac.no", 2, false}, + {1, "drobak.no", 2, false}, + {1, "xn--drbak-wua.no", 2, false}, + {1, "egersund.no", 2, false}, + {1, "fetsund.no", 2, false}, + {1, "floro.no", 2, false}, + {1, "xn--flor-jra.no", 2, false}, + {1, "fredrikstad.no", 2, false}, + {1, "hokksund.no", 2, false}, + {1, "honefoss.no", 2, false}, + {1, "xn--hnefoss-q1a.no", 2, false}, + {1, "jessheim.no", 2, false}, + {1, "jorpeland.no", 2, false}, + {1, "xn--jrpeland-54a.no", 2, false}, + {1, "kirkenes.no", 2, false}, + {1, "kopervik.no", 2, false}, + {1, "krokstadelva.no", 2, false}, + {1, "langevag.no", 2, false}, + {1, "xn--langevg-jxa.no", 2, false}, + {1, "leirvik.no", 2, false}, + {1, "mjondalen.no", 2, false}, + {1, "xn--mjndalen-64a.no", 2, false}, + {1, "mo-i-rana.no", 2, false}, + {1, "mosjoen.no", 2, false}, + {1, "xn--mosjen-eya.no", 2, false}, + {1, "nesoddtangen.no", 2, false}, + {1, "orkanger.no", 2, false}, + {1, "osoyro.no", 2, false}, + {1, "xn--osyro-wua.no", 2, false}, + {1, "raholt.no", 2, false}, + {1, "xn--rholt-mra.no", 2, false}, + {1, "sandnessjoen.no", 2, false}, + {1, "xn--sandnessjen-ogb.no", 2, false}, + {1, "skedsmokorset.no", 2, false}, + {1, "slattum.no", 2, false}, + {1, "spjelkavik.no", 2, false}, + {1, "stathelle.no", 2, false}, + {1, "stavern.no", 2, false}, + {1, "stjordalshalsen.no", 2, false}, + {1, "xn--stjrdalshalsen-sqb.no", 2, false}, + {1, "tananger.no", 2, false}, + {1, "tranby.no", 2, false}, + {1, "vossevangen.no", 2, false}, + {1, "afjord.no", 2, false}, + {1, "xn--fjord-lra.no", 2, false}, + {1, "agdenes.no", 2, false}, + {1, "al.no", 2, false}, + {1, "xn--l-1fa.no", 2, false}, + {1, "alesund.no", 2, false}, + {1, "xn--lesund-hua.no", 2, false}, + {1, "alstahaug.no", 2, false}, + {1, "alta.no", 2, false}, + {1, "xn--lt-liac.no", 2, false}, + {1, "alaheadju.no", 2, false}, + {1, "xn--laheadju-7ya.no", 2, false}, + {1, "alvdal.no", 2, false}, + {1, "amli.no", 2, false}, + {1, "xn--mli-tla.no", 2, false}, + {1, "amot.no", 2, false}, + {1, "xn--mot-tla.no", 2, false}, + {1, "andebu.no", 2, false}, + {1, "andoy.no", 2, false}, + {1, "xn--andy-ira.no", 2, false}, + {1, "andasuolo.no", 2, false}, + {1, "ardal.no", 2, false}, + {1, "xn--rdal-poa.no", 2, false}, + {1, "aremark.no", 2, false}, + {1, "arendal.no", 2, false}, + {1, "xn--s-1fa.no", 2, false}, + {1, "aseral.no", 2, false}, + {1, "xn--seral-lra.no", 2, false}, + {1, "asker.no", 2, false}, + {1, "askim.no", 2, false}, + {1, "askvoll.no", 2, false}, + {1, "askoy.no", 2, false}, + {1, "xn--asky-ira.no", 2, false}, + {1, "asnes.no", 2, false}, + {1, "xn--snes-poa.no", 2, false}, + {1, "audnedaln.no", 2, false}, + {1, "aukra.no", 2, false}, + {1, "aure.no", 2, false}, + {1, "aurland.no", 2, false}, + {1, "aurskog-holand.no", 2, false}, + {1, "xn--aurskog-hland-jnb.no", 2, false}, + {1, "austevoll.no", 2, false}, + {1, "austrheim.no", 2, false}, + {1, "averoy.no", 2, false}, + {1, "xn--avery-yua.no", 2, false}, + {1, "balestrand.no", 2, false}, + {1, "ballangen.no", 2, false}, + {1, "balat.no", 2, false}, + {1, "xn--blt-elab.no", 2, false}, + {1, "balsfjord.no", 2, false}, + {1, "bahccavuotna.no", 2, false}, + {1, "xn--bhccavuotna-k7a.no", 2, false}, + {1, "bamble.no", 2, false}, + {1, "bardu.no", 2, false}, + {1, "beardu.no", 2, false}, + {1, "beiarn.no", 2, false}, + {1, "bajddar.no", 2, false}, + {1, "xn--bjddar-pta.no", 2, false}, + {1, "baidar.no", 2, false}, + {1, "xn--bidr-5nac.no", 2, false}, + {1, "berg.no", 2, false}, + {1, "bergen.no", 2, false}, + {1, "berlevag.no", 2, false}, + {1, "xn--berlevg-jxa.no", 2, false}, + {1, "bearalvahki.no", 2, false}, + {1, "xn--bearalvhki-y4a.no", 2, false}, + {1, "bindal.no", 2, false}, + {1, "birkenes.no", 2, false}, + {1, "bjarkoy.no", 2, false}, + {1, "xn--bjarky-fya.no", 2, false}, + {1, "bjerkreim.no", 2, false}, + {1, "bjugn.no", 2, false}, + {1, "bodo.no", 2, false}, + {1, "xn--bod-2na.no", 2, false}, + {1, "badaddja.no", 2, false}, + {1, "xn--bdddj-mrabd.no", 2, false}, + {1, "budejju.no", 2, false}, + {1, "bokn.no", 2, false}, + {1, "bremanger.no", 2, false}, + {1, "bronnoy.no", 2, false}, + {1, "xn--brnny-wuac.no", 2, false}, + {1, "bygland.no", 2, false}, + {1, "bykle.no", 2, false}, + {1, "barum.no", 2, false}, + {1, "xn--brum-voa.no", 2, false}, + {1, "bo.telemark.no", 3, false}, + {1, "xn--b-5ga.telemark.no", 3, false}, + {1, "bo.nordland.no", 3, false}, + {1, "xn--b-5ga.nordland.no", 3, false}, + {1, "bievat.no", 2, false}, + {1, "xn--bievt-0qa.no", 2, false}, + {1, "bomlo.no", 2, false}, + {1, "xn--bmlo-gra.no", 2, false}, + {1, "batsfjord.no", 2, false}, + {1, "xn--btsfjord-9za.no", 2, false}, + {1, "bahcavuotna.no", 2, false}, + {1, "xn--bhcavuotna-s4a.no", 2, false}, + {1, "dovre.no", 2, false}, + {1, "drammen.no", 2, false}, + {1, "drangedal.no", 2, false}, + {1, "dyroy.no", 2, false}, + {1, "xn--dyry-ira.no", 2, false}, + {1, "donna.no", 2, false}, + {1, "xn--dnna-gra.no", 2, false}, + {1, "eid.no", 2, false}, + {1, "eidfjord.no", 2, false}, + {1, "eidsberg.no", 2, false}, + {1, "eidskog.no", 2, false}, + {1, "eidsvoll.no", 2, false}, + {1, "eigersund.no", 2, false}, + {1, "elverum.no", 2, false}, + {1, "enebakk.no", 2, false}, + {1, "engerdal.no", 2, false}, + {1, "etne.no", 2, false}, + {1, "etnedal.no", 2, false}, + {1, "evenes.no", 2, false}, + {1, "evenassi.no", 2, false}, + {1, "xn--eveni-0qa01ga.no", 2, false}, + {1, "evje-og-hornnes.no", 2, false}, + {1, "farsund.no", 2, false}, + {1, "fauske.no", 2, false}, + {1, "fuossko.no", 2, false}, + {1, "fuoisku.no", 2, false}, + {1, "fedje.no", 2, false}, + {1, "fet.no", 2, false}, + {1, "finnoy.no", 2, false}, + {1, "xn--finny-yua.no", 2, false}, + {1, "fitjar.no", 2, false}, + {1, "fjaler.no", 2, false}, + {1, "fjell.no", 2, false}, + {1, "flakstad.no", 2, false}, + {1, "flatanger.no", 2, false}, + {1, "flekkefjord.no", 2, false}, + {1, "flesberg.no", 2, false}, + {1, "flora.no", 2, false}, + {1, "fla.no", 2, false}, + {1, "xn--fl-zia.no", 2, false}, + {1, "folldal.no", 2, false}, + {1, "forsand.no", 2, false}, + {1, "fosnes.no", 2, false}, + {1, "frei.no", 2, false}, + {1, "frogn.no", 2, false}, + {1, "froland.no", 2, false}, + {1, "frosta.no", 2, false}, + {1, "frana.no", 2, false}, + {1, "xn--frna-woa.no", 2, false}, + {1, "froya.no", 2, false}, + {1, "xn--frya-hra.no", 2, false}, + {1, "fusa.no", 2, false}, + {1, "fyresdal.no", 2, false}, + {1, "forde.no", 2, false}, + {1, "xn--frde-gra.no", 2, false}, + {1, "gamvik.no", 2, false}, + {1, "gangaviika.no", 2, false}, + {1, "xn--ggaviika-8ya47h.no", 2, false}, + {1, "gaular.no", 2, false}, + {1, "gausdal.no", 2, false}, + {1, "gildeskal.no", 2, false}, + {1, "xn--gildeskl-g0a.no", 2, false}, + {1, "giske.no", 2, false}, + {1, "gjemnes.no", 2, false}, + {1, "gjerdrum.no", 2, false}, + {1, "gjerstad.no", 2, false}, + {1, "gjesdal.no", 2, false}, + {1, "gjovik.no", 2, false}, + {1, "xn--gjvik-wua.no", 2, false}, + {1, "gloppen.no", 2, false}, + {1, "gol.no", 2, false}, + {1, "gran.no", 2, false}, + {1, "grane.no", 2, false}, + {1, "granvin.no", 2, false}, + {1, "gratangen.no", 2, false}, + {1, "grimstad.no", 2, false}, + {1, "grong.no", 2, false}, + {1, "kraanghke.no", 2, false}, + {1, "xn--kranghke-b0a.no", 2, false}, + {1, "grue.no", 2, false}, + {1, "gulen.no", 2, false}, + {1, "hadsel.no", 2, false}, + {1, "halden.no", 2, false}, + {1, "halsa.no", 2, false}, + {1, "hamar.no", 2, false}, + {1, "hamaroy.no", 2, false}, + {1, "habmer.no", 2, false}, + {1, "xn--hbmer-xqa.no", 2, false}, + {1, "hapmir.no", 2, false}, + {1, "xn--hpmir-xqa.no", 2, false}, + {1, "hammerfest.no", 2, false}, + {1, "hammarfeasta.no", 2, false}, + {1, "xn--hmmrfeasta-s4ac.no", 2, false}, + {1, "haram.no", 2, false}, + {1, "hareid.no", 2, false}, + {1, "harstad.no", 2, false}, + {1, "hasvik.no", 2, false}, + {1, "aknoluokta.no", 2, false}, + {1, "xn--koluokta-7ya57h.no", 2, false}, + {1, "hattfjelldal.no", 2, false}, + {1, "aarborte.no", 2, false}, + {1, "haugesund.no", 2, false}, + {1, "hemne.no", 2, false}, + {1, "hemnes.no", 2, false}, + {1, "hemsedal.no", 2, false}, + {1, "heroy.more-og-romsdal.no", 3, false}, + {1, "xn--hery-ira.xn--mre-og-romsdal-qqb.no", 3, false}, + {1, "heroy.nordland.no", 3, false}, + {1, "xn--hery-ira.nordland.no", 3, false}, + {1, "hitra.no", 2, false}, + {1, "hjartdal.no", 2, false}, + {1, "hjelmeland.no", 2, false}, + {1, "hobol.no", 2, false}, + {1, "xn--hobl-ira.no", 2, false}, + {1, "hof.no", 2, false}, + {1, "hol.no", 2, false}, + {1, "hole.no", 2, false}, + {1, "holmestrand.no", 2, false}, + {1, "holtalen.no", 2, false}, + {1, "xn--holtlen-hxa.no", 2, false}, + {1, "hornindal.no", 2, false}, + {1, "horten.no", 2, false}, + {1, "hurdal.no", 2, false}, + {1, "hurum.no", 2, false}, + {1, "hvaler.no", 2, false}, + {1, "hyllestad.no", 2, false}, + {1, "hagebostad.no", 2, false}, + {1, "xn--hgebostad-g3a.no", 2, false}, + {1, "hoyanger.no", 2, false}, + {1, "xn--hyanger-q1a.no", 2, false}, + {1, "hoylandet.no", 2, false}, + {1, "xn--hylandet-54a.no", 2, false}, + {1, "ha.no", 2, false}, + {1, "xn--h-2fa.no", 2, false}, + {1, "ibestad.no", 2, false}, + {1, "inderoy.no", 2, false}, + {1, "xn--indery-fya.no", 2, false}, + {1, "iveland.no", 2, false}, + {1, "jevnaker.no", 2, false}, + {1, "jondal.no", 2, false}, + {1, "jolster.no", 2, false}, + {1, "xn--jlster-bya.no", 2, false}, + {1, "karasjok.no", 2, false}, + {1, "karasjohka.no", 2, false}, + {1, "xn--krjohka-hwab49j.no", 2, false}, + {1, "karlsoy.no", 2, false}, + {1, "galsa.no", 2, false}, + {1, "xn--gls-elac.no", 2, false}, + {1, "karmoy.no", 2, false}, + {1, "xn--karmy-yua.no", 2, false}, + {1, "kautokeino.no", 2, false}, + {1, "guovdageaidnu.no", 2, false}, + {1, "klepp.no", 2, false}, + {1, "klabu.no", 2, false}, + {1, "xn--klbu-woa.no", 2, false}, + {1, "kongsberg.no", 2, false}, + {1, "kongsvinger.no", 2, false}, + {1, "kragero.no", 2, false}, + {1, "xn--krager-gya.no", 2, false}, + {1, "kristiansand.no", 2, false}, + {1, "kristiansund.no", 2, false}, + {1, "krodsherad.no", 2, false}, + {1, "xn--krdsherad-m8a.no", 2, false}, + {1, "kvalsund.no", 2, false}, + {1, "rahkkeravju.no", 2, false}, + {1, "xn--rhkkervju-01af.no", 2, false}, + {1, "kvam.no", 2, false}, + {1, "kvinesdal.no", 2, false}, + {1, "kvinnherad.no", 2, false}, + {1, "kviteseid.no", 2, false}, + {1, "kvitsoy.no", 2, false}, + {1, "xn--kvitsy-fya.no", 2, false}, + {1, "kvafjord.no", 2, false}, + {1, "xn--kvfjord-nxa.no", 2, false}, + {1, "giehtavuoatna.no", 2, false}, + {1, "kvanangen.no", 2, false}, + {1, "xn--kvnangen-k0a.no", 2, false}, + {1, "navuotna.no", 2, false}, + {1, "xn--nvuotna-hwa.no", 2, false}, + {1, "kafjord.no", 2, false}, + {1, "xn--kfjord-iua.no", 2, false}, + {1, "gaivuotna.no", 2, false}, + {1, "xn--givuotna-8ya.no", 2, false}, + {1, "larvik.no", 2, false}, + {1, "lavangen.no", 2, false}, + {1, "lavagis.no", 2, false}, + {1, "loabat.no", 2, false}, + {1, "xn--loabt-0qa.no", 2, false}, + {1, "lebesby.no", 2, false}, + {1, "davvesiida.no", 2, false}, + {1, "leikanger.no", 2, false}, + {1, "leirfjord.no", 2, false}, + {1, "leka.no", 2, false}, + {1, "leksvik.no", 2, false}, + {1, "lenvik.no", 2, false}, + {1, "leangaviika.no", 2, false}, + {1, "xn--leagaviika-52b.no", 2, false}, + {1, "lesja.no", 2, false}, + {1, "levanger.no", 2, false}, + {1, "lier.no", 2, false}, + {1, "lierne.no", 2, false}, + {1, "lillehammer.no", 2, false}, + {1, "lillesand.no", 2, false}, + {1, "lindesnes.no", 2, false}, + {1, "lindas.no", 2, false}, + {1, "xn--linds-pra.no", 2, false}, + {1, "lom.no", 2, false}, + {1, "loppa.no", 2, false}, + {1, "lahppi.no", 2, false}, + {1, "xn--lhppi-xqa.no", 2, false}, + {1, "lund.no", 2, false}, + {1, "lunner.no", 2, false}, + {1, "luroy.no", 2, false}, + {1, "xn--lury-ira.no", 2, false}, + {1, "luster.no", 2, false}, + {1, "lyngdal.no", 2, false}, + {1, "lyngen.no", 2, false}, + {1, "ivgu.no", 2, false}, + {1, "lardal.no", 2, false}, + {1, "lerdal.no", 2, false}, + {1, "xn--lrdal-sra.no", 2, false}, + {1, "lodingen.no", 2, false}, + {1, "xn--ldingen-q1a.no", 2, false}, + {1, "lorenskog.no", 2, false}, + {1, "xn--lrenskog-54a.no", 2, false}, + {1, "loten.no", 2, false}, + {1, "xn--lten-gra.no", 2, false}, + {1, "malvik.no", 2, false}, + {1, "masoy.no", 2, false}, + {1, "xn--msy-ula0h.no", 2, false}, + {1, "muosat.no", 2, false}, + {1, "xn--muost-0qa.no", 2, false}, + {1, "mandal.no", 2, false}, + {1, "marker.no", 2, false}, + {1, "marnardal.no", 2, false}, + {1, "masfjorden.no", 2, false}, + {1, "meland.no", 2, false}, + {1, "meldal.no", 2, false}, + {1, "melhus.no", 2, false}, + {1, "meloy.no", 2, false}, + {1, "xn--mely-ira.no", 2, false}, + {1, "meraker.no", 2, false}, + {1, "xn--merker-kua.no", 2, false}, + {1, "moareke.no", 2, false}, + {1, "xn--moreke-jua.no", 2, false}, + {1, "midsund.no", 2, false}, + {1, "midtre-gauldal.no", 2, false}, + {1, "modalen.no", 2, false}, + {1, "modum.no", 2, false}, + {1, "molde.no", 2, false}, + {1, "moskenes.no", 2, false}, + {1, "moss.no", 2, false}, + {1, "mosvik.no", 2, false}, + {1, "malselv.no", 2, false}, + {1, "xn--mlselv-iua.no", 2, false}, + {1, "malatvuopmi.no", 2, false}, + {1, "xn--mlatvuopmi-s4a.no", 2, false}, + {1, "namdalseid.no", 2, false}, + {1, "aejrie.no", 2, false}, + {1, "namsos.no", 2, false}, + {1, "namsskogan.no", 2, false}, + {1, "naamesjevuemie.no", 2, false}, + {1, "xn--nmesjevuemie-tcba.no", 2, false}, + {1, "laakesvuemie.no", 2, false}, + {1, "nannestad.no", 2, false}, + {1, "narvik.no", 2, false}, + {1, "narviika.no", 2, false}, + {1, "naustdal.no", 2, false}, + {1, "nedre-eiker.no", 2, false}, + {1, "nes.akershus.no", 3, false}, + {1, "nes.buskerud.no", 3, false}, + {1, "nesna.no", 2, false}, + {1, "nesodden.no", 2, false}, + {1, "nesseby.no", 2, false}, + {1, "unjarga.no", 2, false}, + {1, "xn--unjrga-rta.no", 2, false}, + {1, "nesset.no", 2, false}, + {1, "nissedal.no", 2, false}, + {1, "nittedal.no", 2, false}, + {1, "nord-aurdal.no", 2, false}, + {1, "nord-fron.no", 2, false}, + {1, "nord-odal.no", 2, false}, + {1, "norddal.no", 2, false}, + {1, "nordkapp.no", 2, false}, + {1, "davvenjarga.no", 2, false}, + {1, "xn--davvenjrga-y4a.no", 2, false}, + {1, "nordre-land.no", 2, false}, + {1, "nordreisa.no", 2, false}, + {1, "raisa.no", 2, false}, + {1, "xn--risa-5na.no", 2, false}, + {1, "nore-og-uvdal.no", 2, false}, + {1, "notodden.no", 2, false}, + {1, "naroy.no", 2, false}, + {1, "xn--nry-yla5g.no", 2, false}, + {1, "notteroy.no", 2, false}, + {1, "xn--nttery-byae.no", 2, false}, + {1, "odda.no", 2, false}, + {1, "oksnes.no", 2, false}, + {1, "xn--ksnes-uua.no", 2, false}, + {1, "oppdal.no", 2, false}, + {1, "oppegard.no", 2, false}, + {1, "xn--oppegrd-ixa.no", 2, false}, + {1, "orkdal.no", 2, false}, + {1, "orland.no", 2, false}, + {1, "xn--rland-uua.no", 2, false}, + {1, "orskog.no", 2, false}, + {1, "xn--rskog-uua.no", 2, false}, + {1, "orsta.no", 2, false}, + {1, "xn--rsta-fra.no", 2, false}, + {1, "os.hedmark.no", 3, false}, + {1, "os.hordaland.no", 3, false}, + {1, "osen.no", 2, false}, + {1, "osteroy.no", 2, false}, + {1, "xn--ostery-fya.no", 2, false}, + {1, "ostre-toten.no", 2, false}, + {1, "xn--stre-toten-zcb.no", 2, false}, + {1, "overhalla.no", 2, false}, + {1, "ovre-eiker.no", 2, false}, + {1, "xn--vre-eiker-k8a.no", 2, false}, + {1, "oyer.no", 2, false}, + {1, "xn--yer-zna.no", 2, false}, + {1, "oygarden.no", 2, false}, + {1, "xn--ygarden-p1a.no", 2, false}, + {1, "oystre-slidre.no", 2, false}, + {1, "xn--ystre-slidre-ujb.no", 2, false}, + {1, "porsanger.no", 2, false}, + {1, "porsangu.no", 2, false}, + {1, "xn--porsgu-sta26f.no", 2, false}, + {1, "porsgrunn.no", 2, false}, + {1, "radoy.no", 2, false}, + {1, "xn--rady-ira.no", 2, false}, + {1, "rakkestad.no", 2, false}, + {1, "rana.no", 2, false}, + {1, "ruovat.no", 2, false}, + {1, "randaberg.no", 2, false}, + {1, "rauma.no", 2, false}, + {1, "rendalen.no", 2, false}, + {1, "rennebu.no", 2, false}, + {1, "rennesoy.no", 2, false}, + {1, "xn--rennesy-v1a.no", 2, false}, + {1, "rindal.no", 2, false}, + {1, "ringebu.no", 2, false}, + {1, "ringerike.no", 2, false}, + {1, "ringsaker.no", 2, false}, + {1, "rissa.no", 2, false}, + {1, "risor.no", 2, false}, + {1, "xn--risr-ira.no", 2, false}, + {1, "roan.no", 2, false}, + {1, "rollag.no", 2, false}, + {1, "rygge.no", 2, false}, + {1, "ralingen.no", 2, false}, + {1, "xn--rlingen-mxa.no", 2, false}, + {1, "rodoy.no", 2, false}, + {1, "xn--rdy-0nab.no", 2, false}, + {1, "romskog.no", 2, false}, + {1, "xn--rmskog-bya.no", 2, false}, + {1, "roros.no", 2, false}, + {1, "xn--rros-gra.no", 2, false}, + {1, "rost.no", 2, false}, + {1, "xn--rst-0na.no", 2, false}, + {1, "royken.no", 2, false}, + {1, "xn--ryken-vua.no", 2, false}, + {1, "royrvik.no", 2, false}, + {1, "xn--ryrvik-bya.no", 2, false}, + {1, "rade.no", 2, false}, + {1, "xn--rde-ula.no", 2, false}, + {1, "salangen.no", 2, false}, + {1, "siellak.no", 2, false}, + {1, "saltdal.no", 2, false}, + {1, "salat.no", 2, false}, + {1, "xn--slt-elab.no", 2, false}, + {1, "xn--slat-5na.no", 2, false}, + {1, "samnanger.no", 2, false}, + {1, "sande.more-og-romsdal.no", 3, false}, + {1, "sande.xn--mre-og-romsdal-qqb.no", 3, false}, + {1, "sande.vestfold.no", 3, false}, + {1, "sandefjord.no", 2, false}, + {1, "sandnes.no", 2, false}, + {1, "sandoy.no", 2, false}, + {1, "xn--sandy-yua.no", 2, false}, + {1, "sarpsborg.no", 2, false}, + {1, "sauda.no", 2, false}, + {1, "sauherad.no", 2, false}, + {1, "sel.no", 2, false}, + {1, "selbu.no", 2, false}, + {1, "selje.no", 2, false}, + {1, "seljord.no", 2, false}, + {1, "sigdal.no", 2, false}, + {1, "siljan.no", 2, false}, + {1, "sirdal.no", 2, false}, + {1, "skaun.no", 2, false}, + {1, "skedsmo.no", 2, false}, + {1, "ski.no", 2, false}, + {1, "skien.no", 2, false}, + {1, "skiptvet.no", 2, false}, + {1, "skjervoy.no", 2, false}, + {1, "xn--skjervy-v1a.no", 2, false}, + {1, "skierva.no", 2, false}, + {1, "xn--skierv-uta.no", 2, false}, + {1, "skjak.no", 2, false}, + {1, "xn--skjk-soa.no", 2, false}, + {1, "skodje.no", 2, false}, + {1, "skanland.no", 2, false}, + {1, "xn--sknland-fxa.no", 2, false}, + {1, "skanit.no", 2, false}, + {1, "xn--sknit-yqa.no", 2, false}, + {1, "smola.no", 2, false}, + {1, "xn--smla-hra.no", 2, false}, + {1, "snillfjord.no", 2, false}, + {1, "snasa.no", 2, false}, + {1, "xn--snsa-roa.no", 2, false}, + {1, "snoasa.no", 2, false}, + {1, "snaase.no", 2, false}, + {1, "xn--snase-nra.no", 2, false}, + {1, "sogndal.no", 2, false}, + {1, "sokndal.no", 2, false}, + {1, "sola.no", 2, false}, + {1, "solund.no", 2, false}, + {1, "songdalen.no", 2, false}, + {1, "sortland.no", 2, false}, + {1, "spydeberg.no", 2, false}, + {1, "stange.no", 2, false}, + {1, "stavanger.no", 2, false}, + {1, "steigen.no", 2, false}, + {1, "steinkjer.no", 2, false}, + {1, "stjordal.no", 2, false}, + {1, "xn--stjrdal-s1a.no", 2, false}, + {1, "stokke.no", 2, false}, + {1, "stor-elvdal.no", 2, false}, + {1, "stord.no", 2, false}, + {1, "stordal.no", 2, false}, + {1, "storfjord.no", 2, false}, + {1, "omasvuotna.no", 2, false}, + {1, "strand.no", 2, false}, + {1, "stranda.no", 2, false}, + {1, "stryn.no", 2, false}, + {1, "sula.no", 2, false}, + {1, "suldal.no", 2, false}, + {1, "sund.no", 2, false}, + {1, "sunndal.no", 2, false}, + {1, "surnadal.no", 2, false}, + {1, "sveio.no", 2, false}, + {1, "svelvik.no", 2, false}, + {1, "sykkylven.no", 2, false}, + {1, "sogne.no", 2, false}, + {1, "xn--sgne-gra.no", 2, false}, + {1, "somna.no", 2, false}, + {1, "xn--smna-gra.no", 2, false}, + {1, "sondre-land.no", 2, false}, + {1, "xn--sndre-land-0cb.no", 2, false}, + {1, "sor-aurdal.no", 2, false}, + {1, "xn--sr-aurdal-l8a.no", 2, false}, + {1, "sor-fron.no", 2, false}, + {1, "xn--sr-fron-q1a.no", 2, false}, + {1, "sor-odal.no", 2, false}, + {1, "xn--sr-odal-q1a.no", 2, false}, + {1, "sor-varanger.no", 2, false}, + {1, "xn--sr-varanger-ggb.no", 2, false}, + {1, "matta-varjjat.no", 2, false}, + {1, "xn--mtta-vrjjat-k7af.no", 2, false}, + {1, "sorfold.no", 2, false}, + {1, "xn--srfold-bya.no", 2, false}, + {1, "sorreisa.no", 2, false}, + {1, "xn--srreisa-q1a.no", 2, false}, + {1, "sorum.no", 2, false}, + {1, "xn--srum-gra.no", 2, false}, + {1, "tana.no", 2, false}, + {1, "deatnu.no", 2, false}, + {1, "time.no", 2, false}, + {1, "tingvoll.no", 2, false}, + {1, "tinn.no", 2, false}, + {1, "tjeldsund.no", 2, false}, + {1, "dielddanuorri.no", 2, false}, + {1, "tjome.no", 2, false}, + {1, "xn--tjme-hra.no", 2, false}, + {1, "tokke.no", 2, false}, + {1, "tolga.no", 2, false}, + {1, "torsken.no", 2, false}, + {1, "tranoy.no", 2, false}, + {1, "xn--trany-yua.no", 2, false}, + {1, "tromso.no", 2, false}, + {1, "xn--troms-zua.no", 2, false}, + {1, "tromsa.no", 2, false}, + {1, "romsa.no", 2, false}, + {1, "trondheim.no", 2, false}, + {1, "troandin.no", 2, false}, + {1, "trysil.no", 2, false}, + {1, "trana.no", 2, false}, + {1, "xn--trna-woa.no", 2, false}, + {1, "trogstad.no", 2, false}, + {1, "xn--trgstad-r1a.no", 2, false}, + {1, "tvedestrand.no", 2, false}, + {1, "tydal.no", 2, false}, + {1, "tynset.no", 2, false}, + {1, "tysfjord.no", 2, false}, + {1, "divtasvuodna.no", 2, false}, + {1, "divttasvuotna.no", 2, false}, + {1, "tysnes.no", 2, false}, + {1, "tysvar.no", 2, false}, + {1, "xn--tysvr-vra.no", 2, false}, + {1, "tonsberg.no", 2, false}, + {1, "xn--tnsberg-q1a.no", 2, false}, + {1, "ullensaker.no", 2, false}, + {1, "ullensvang.no", 2, false}, + {1, "ulvik.no", 2, false}, + {1, "utsira.no", 2, false}, + {1, "vadso.no", 2, false}, + {1, "xn--vads-jra.no", 2, false}, + {1, "cahcesuolo.no", 2, false}, + {1, "xn--hcesuolo-7ya35b.no", 2, false}, + {1, "vaksdal.no", 2, false}, + {1, "valle.no", 2, false}, + {1, "vang.no", 2, false}, + {1, "vanylven.no", 2, false}, + {1, "vardo.no", 2, false}, + {1, "xn--vard-jra.no", 2, false}, + {1, "varggat.no", 2, false}, + {1, "xn--vrggt-xqad.no", 2, false}, + {1, "vefsn.no", 2, false}, + {1, "vaapste.no", 2, false}, + {1, "vega.no", 2, false}, + {1, "vegarshei.no", 2, false}, + {1, "xn--vegrshei-c0a.no", 2, false}, + {1, "vennesla.no", 2, false}, + {1, "verdal.no", 2, false}, + {1, "verran.no", 2, false}, + {1, "vestby.no", 2, false}, + {1, "vestnes.no", 2, false}, + {1, "vestre-slidre.no", 2, false}, + {1, "vestre-toten.no", 2, false}, + {1, "vestvagoy.no", 2, false}, + {1, "xn--vestvgy-ixa6o.no", 2, false}, + {1, "vevelstad.no", 2, false}, + {1, "vik.no", 2, false}, + {1, "vikna.no", 2, false}, + {1, "vindafjord.no", 2, false}, + {1, "volda.no", 2, false}, + {1, "voss.no", 2, false}, + {1, "varoy.no", 2, false}, + {1, "xn--vry-yla5g.no", 2, false}, + {1, "vagan.no", 2, false}, + {1, "xn--vgan-qoa.no", 2, false}, + {1, "voagat.no", 2, false}, + {1, "vagsoy.no", 2, false}, + {1, "xn--vgsy-qoa0j.no", 2, false}, + {1, "vaga.no", 2, false}, + {1, "xn--vg-yiab.no", 2, false}, + {1, "valer.ostfold.no", 3, false}, + {1, "xn--vler-qoa.xn--stfold-9xa.no", 3, false}, + {1, "valer.hedmark.no", 3, false}, + {1, "xn--vler-qoa.hedmark.no", 3, false}, + {2, "np", 2, false}, + {1, "nr", 1, false}, + {1, "biz.nr", 2, false}, + {1, "info.nr", 2, false}, + {1, "gov.nr", 2, false}, + {1, "edu.nr", 2, false}, + {1, "org.nr", 2, false}, + {1, "net.nr", 2, false}, + {1, "com.nr", 2, false}, + {1, "nu", 1, false}, + {1, "nz", 1, false}, + {1, "ac.nz", 2, false}, + {1, "co.nz", 2, false}, + {1, "cri.nz", 2, false}, + {1, "geek.nz", 2, false}, + {1, "gen.nz", 2, false}, + {1, "govt.nz", 2, false}, + {1, "health.nz", 2, false}, + {1, "iwi.nz", 2, false}, + {1, "kiwi.nz", 2, false}, + {1, "maori.nz", 2, false}, + {1, "mil.nz", 2, false}, + {1, "xn--mori-qsa.nz", 2, false}, + {1, "net.nz", 2, false}, + {1, "org.nz", 2, false}, + {1, "parliament.nz", 2, false}, + {1, "school.nz", 2, false}, + {1, "om", 1, false}, + {1, "co.om", 2, false}, + {1, "com.om", 2, false}, + {1, "edu.om", 2, false}, + {1, "gov.om", 2, false}, + {1, "med.om", 2, false}, + {1, "museum.om", 2, false}, + {1, "net.om", 2, false}, + {1, "org.om", 2, false}, + {1, "pro.om", 2, false}, + {1, "onion", 1, false}, + {1, "org", 1, false}, + {1, "pa", 1, false}, + {1, "ac.pa", 2, false}, + {1, "gob.pa", 2, false}, + {1, "com.pa", 2, false}, + {1, "org.pa", 2, false}, + {1, "sld.pa", 2, false}, + {1, "edu.pa", 2, false}, + {1, "net.pa", 2, false}, + {1, "ing.pa", 2, false}, + {1, "abo.pa", 2, false}, + {1, "med.pa", 2, false}, + {1, "nom.pa", 2, false}, + {1, "pe", 1, false}, + {1, "edu.pe", 2, false}, + {1, "gob.pe", 2, false}, + {1, "nom.pe", 2, false}, + {1, "mil.pe", 2, false}, + {1, "org.pe", 2, false}, + {1, "com.pe", 2, false}, + {1, "net.pe", 2, false}, + {1, "pf", 1, false}, + {1, "com.pf", 2, false}, + {1, "org.pf", 2, false}, + {1, "edu.pf", 2, false}, + {2, "pg", 2, false}, + {1, "ph", 1, false}, + {1, "com.ph", 2, false}, + {1, "net.ph", 2, false}, + {1, "org.ph", 2, false}, + {1, "gov.ph", 2, false}, + {1, "edu.ph", 2, false}, + {1, "ngo.ph", 2, false}, + {1, "mil.ph", 2, false}, + {1, "i.ph", 2, false}, + {1, "pk", 1, false}, + {1, "com.pk", 2, false}, + {1, "net.pk", 2, false}, + {1, "edu.pk", 2, false}, + {1, "org.pk", 2, false}, + {1, "fam.pk", 2, false}, + {1, "biz.pk", 2, false}, + {1, "web.pk", 2, false}, + {1, "gov.pk", 2, false}, + {1, "gob.pk", 2, false}, + {1, "gok.pk", 2, false}, + {1, "gon.pk", 2, false}, + {1, "gop.pk", 2, false}, + {1, "gos.pk", 2, false}, + {1, "info.pk", 2, false}, + {1, "pl", 1, false}, + {1, "com.pl", 2, false}, + {1, "net.pl", 2, false}, + {1, "org.pl", 2, false}, + {1, "aid.pl", 2, false}, + {1, "agro.pl", 2, false}, + {1, "atm.pl", 2, false}, + {1, "auto.pl", 2, false}, + {1, "biz.pl", 2, false}, + {1, "edu.pl", 2, false}, + {1, "gmina.pl", 2, false}, + {1, "gsm.pl", 2, false}, + {1, "info.pl", 2, false}, + {1, "mail.pl", 2, false}, + {1, "miasta.pl", 2, false}, + {1, "media.pl", 2, false}, + {1, "mil.pl", 2, false}, + {1, "nieruchomosci.pl", 2, false}, + {1, "nom.pl", 2, false}, + {1, "pc.pl", 2, false}, + {1, "powiat.pl", 2, false}, + {1, "priv.pl", 2, false}, + {1, "realestate.pl", 2, false}, + {1, "rel.pl", 2, false}, + {1, "sex.pl", 2, false}, + {1, "shop.pl", 2, false}, + {1, "sklep.pl", 2, false}, + {1, "sos.pl", 2, false}, + {1, "szkola.pl", 2, false}, + {1, "targi.pl", 2, false}, + {1, "tm.pl", 2, false}, + {1, "tourism.pl", 2, false}, + {1, "travel.pl", 2, false}, + {1, "turystyka.pl", 2, false}, + {1, "gov.pl", 2, false}, + {1, "ap.gov.pl", 3, false}, + {1, "ic.gov.pl", 3, false}, + {1, "is.gov.pl", 3, false}, + {1, "us.gov.pl", 3, false}, + {1, "kmpsp.gov.pl", 3, false}, + {1, "kppsp.gov.pl", 3, false}, + {1, "kwpsp.gov.pl", 3, false}, + {1, "psp.gov.pl", 3, false}, + {1, "wskr.gov.pl", 3, false}, + {1, "kwp.gov.pl", 3, false}, + {1, "mw.gov.pl", 3, false}, + {1, "ug.gov.pl", 3, false}, + {1, "um.gov.pl", 3, false}, + {1, "umig.gov.pl", 3, false}, + {1, "ugim.gov.pl", 3, false}, + {1, "upow.gov.pl", 3, false}, + {1, "uw.gov.pl", 3, false}, + {1, "starostwo.gov.pl", 3, false}, + {1, "pa.gov.pl", 3, false}, + {1, "po.gov.pl", 3, false}, + {1, "psse.gov.pl", 3, false}, + {1, "pup.gov.pl", 3, false}, + {1, "rzgw.gov.pl", 3, false}, + {1, "sa.gov.pl", 3, false}, + {1, "so.gov.pl", 3, false}, + {1, "sr.gov.pl", 3, false}, + {1, "wsa.gov.pl", 3, false}, + {1, "sko.gov.pl", 3, false}, + {1, "uzs.gov.pl", 3, false}, + {1, "wiih.gov.pl", 3, false}, + {1, "winb.gov.pl", 3, false}, + {1, "pinb.gov.pl", 3, false}, + {1, "wios.gov.pl", 3, false}, + {1, "witd.gov.pl", 3, false}, + {1, "wzmiuw.gov.pl", 3, false}, + {1, "piw.gov.pl", 3, false}, + {1, "wiw.gov.pl", 3, false}, + {1, "griw.gov.pl", 3, false}, + {1, "wif.gov.pl", 3, false}, + {1, "oum.gov.pl", 3, false}, + {1, "sdn.gov.pl", 3, false}, + {1, "zp.gov.pl", 3, false}, + {1, "uppo.gov.pl", 3, false}, + {1, "mup.gov.pl", 3, false}, + {1, "wuoz.gov.pl", 3, false}, + {1, "konsulat.gov.pl", 3, false}, + {1, "oirm.gov.pl", 3, false}, + {1, "augustow.pl", 2, false}, + {1, "babia-gora.pl", 2, false}, + {1, "bedzin.pl", 2, false}, + {1, "beskidy.pl", 2, false}, + {1, "bialowieza.pl", 2, false}, + {1, "bialystok.pl", 2, false}, + {1, "bielawa.pl", 2, false}, + {1, "bieszczady.pl", 2, false}, + {1, "boleslawiec.pl", 2, false}, + {1, "bydgoszcz.pl", 2, false}, + {1, "bytom.pl", 2, false}, + {1, "cieszyn.pl", 2, false}, + {1, "czeladz.pl", 2, false}, + {1, "czest.pl", 2, false}, + {1, "dlugoleka.pl", 2, false}, + {1, "elblag.pl", 2, false}, + {1, "elk.pl", 2, false}, + {1, "glogow.pl", 2, false}, + {1, "gniezno.pl", 2, false}, + {1, "gorlice.pl", 2, false}, + {1, "grajewo.pl", 2, false}, + {1, "ilawa.pl", 2, false}, + {1, "jaworzno.pl", 2, false}, + {1, "jelenia-gora.pl", 2, false}, + {1, "jgora.pl", 2, false}, + {1, "kalisz.pl", 2, false}, + {1, "kazimierz-dolny.pl", 2, false}, + {1, "karpacz.pl", 2, false}, + {1, "kartuzy.pl", 2, false}, + {1, "kaszuby.pl", 2, false}, + {1, "katowice.pl", 2, false}, + {1, "kepno.pl", 2, false}, + {1, "ketrzyn.pl", 2, false}, + {1, "klodzko.pl", 2, false}, + {1, "kobierzyce.pl", 2, false}, + {1, "kolobrzeg.pl", 2, false}, + {1, "konin.pl", 2, false}, + {1, "konskowola.pl", 2, false}, + {1, "kutno.pl", 2, false}, + {1, "lapy.pl", 2, false}, + {1, "lebork.pl", 2, false}, + {1, "legnica.pl", 2, false}, + {1, "lezajsk.pl", 2, false}, + {1, "limanowa.pl", 2, false}, + {1, "lomza.pl", 2, false}, + {1, "lowicz.pl", 2, false}, + {1, "lubin.pl", 2, false}, + {1, "lukow.pl", 2, false}, + {1, "malbork.pl", 2, false}, + {1, "malopolska.pl", 2, false}, + {1, "mazowsze.pl", 2, false}, + {1, "mazury.pl", 2, false}, + {1, "mielec.pl", 2, false}, + {1, "mielno.pl", 2, false}, + {1, "mragowo.pl", 2, false}, + {1, "naklo.pl", 2, false}, + {1, "nowaruda.pl", 2, false}, + {1, "nysa.pl", 2, false}, + {1, "olawa.pl", 2, false}, + {1, "olecko.pl", 2, false}, + {1, "olkusz.pl", 2, false}, + {1, "olsztyn.pl", 2, false}, + {1, "opoczno.pl", 2, false}, + {1, "opole.pl", 2, false}, + {1, "ostroda.pl", 2, false}, + {1, "ostroleka.pl", 2, false}, + {1, "ostrowiec.pl", 2, false}, + {1, "ostrowwlkp.pl", 2, false}, + {1, "pila.pl", 2, false}, + {1, "pisz.pl", 2, false}, + {1, "podhale.pl", 2, false}, + {1, "podlasie.pl", 2, false}, + {1, "polkowice.pl", 2, false}, + {1, "pomorze.pl", 2, false}, + {1, "pomorskie.pl", 2, false}, + {1, "prochowice.pl", 2, false}, + {1, "pruszkow.pl", 2, false}, + {1, "przeworsk.pl", 2, false}, + {1, "pulawy.pl", 2, false}, + {1, "radom.pl", 2, false}, + {1, "rawa-maz.pl", 2, false}, + {1, "rybnik.pl", 2, false}, + {1, "rzeszow.pl", 2, false}, + {1, "sanok.pl", 2, false}, + {1, "sejny.pl", 2, false}, + {1, "slask.pl", 2, false}, + {1, "slupsk.pl", 2, false}, + {1, "sosnowiec.pl", 2, false}, + {1, "stalowa-wola.pl", 2, false}, + {1, "skoczow.pl", 2, false}, + {1, "starachowice.pl", 2, false}, + {1, "stargard.pl", 2, false}, + {1, "suwalki.pl", 2, false}, + {1, "swidnica.pl", 2, false}, + {1, "swiebodzin.pl", 2, false}, + {1, "swinoujscie.pl", 2, false}, + {1, "szczecin.pl", 2, false}, + {1, "szczytno.pl", 2, false}, + {1, "tarnobrzeg.pl", 2, false}, + {1, "tgory.pl", 2, false}, + {1, "turek.pl", 2, false}, + {1, "tychy.pl", 2, false}, + {1, "ustka.pl", 2, false}, + {1, "walbrzych.pl", 2, false}, + {1, "warmia.pl", 2, false}, + {1, "warszawa.pl", 2, false}, + {1, "waw.pl", 2, false}, + {1, "wegrow.pl", 2, false}, + {1, "wielun.pl", 2, false}, + {1, "wlocl.pl", 2, false}, + {1, "wloclawek.pl", 2, false}, + {1, "wodzislaw.pl", 2, false}, + {1, "wolomin.pl", 2, false}, + {1, "wroclaw.pl", 2, false}, + {1, "zachpomor.pl", 2, false}, + {1, "zagan.pl", 2, false}, + {1, "zarow.pl", 2, false}, + {1, "zgora.pl", 2, false}, + {1, "zgorzelec.pl", 2, false}, + {1, "pm", 1, false}, + {1, "pn", 1, false}, + {1, "gov.pn", 2, false}, + {1, "co.pn", 2, false}, + {1, "org.pn", 2, false}, + {1, "edu.pn", 2, false}, + {1, "net.pn", 2, false}, + {1, "post", 1, false}, + {1, "pr", 1, false}, + {1, "com.pr", 2, false}, + {1, "net.pr", 2, false}, + {1, "org.pr", 2, false}, + {1, "gov.pr", 2, false}, + {1, "edu.pr", 2, false}, + {1, "isla.pr", 2, false}, + {1, "pro.pr", 2, false}, + {1, "biz.pr", 2, false}, + {1, "info.pr", 2, false}, + {1, "name.pr", 2, false}, + {1, "est.pr", 2, false}, + {1, "prof.pr", 2, false}, + {1, "ac.pr", 2, false}, + {1, "pro", 1, false}, + {1, "aaa.pro", 2, false}, + {1, "aca.pro", 2, false}, + {1, "acct.pro", 2, false}, + {1, "avocat.pro", 2, false}, + {1, "bar.pro", 2, false}, + {1, "cpa.pro", 2, false}, + {1, "eng.pro", 2, false}, + {1, "jur.pro", 2, false}, + {1, "law.pro", 2, false}, + {1, "med.pro", 2, false}, + {1, "recht.pro", 2, false}, + {1, "ps", 1, false}, + {1, "edu.ps", 2, false}, + {1, "gov.ps", 2, false}, + {1, "sec.ps", 2, false}, + {1, "plo.ps", 2, false}, + {1, "com.ps", 2, false}, + {1, "org.ps", 2, false}, + {1, "net.ps", 2, false}, + {1, "pt", 1, false}, + {1, "net.pt", 2, false}, + {1, "gov.pt", 2, false}, + {1, "org.pt", 2, false}, + {1, "edu.pt", 2, false}, + {1, "int.pt", 2, false}, + {1, "publ.pt", 2, false}, + {1, "com.pt", 2, false}, + {1, "nome.pt", 2, false}, + {1, "pw", 1, false}, + {1, "co.pw", 2, false}, + {1, "ne.pw", 2, false}, + {1, "or.pw", 2, false}, + {1, "ed.pw", 2, false}, + {1, "go.pw", 2, false}, + {1, "belau.pw", 2, false}, + {1, "py", 1, false}, + {1, "com.py", 2, false}, + {1, "coop.py", 2, false}, + {1, "edu.py", 2, false}, + {1, "gov.py", 2, false}, + {1, "mil.py", 2, false}, + {1, "net.py", 2, false}, + {1, "org.py", 2, false}, + {1, "qa", 1, false}, + {1, "com.qa", 2, false}, + {1, "edu.qa", 2, false}, + {1, "gov.qa", 2, false}, + {1, "mil.qa", 2, false}, + {1, "name.qa", 2, false}, + {1, "net.qa", 2, false}, + {1, "org.qa", 2, false}, + {1, "sch.qa", 2, false}, + {1, "re", 1, false}, + {1, "asso.re", 2, false}, + {1, "com.re", 2, false}, + {1, "nom.re", 2, false}, + {1, "ro", 1, false}, + {1, "arts.ro", 2, false}, + {1, "com.ro", 2, false}, + {1, "firm.ro", 2, false}, + {1, "info.ro", 2, false}, + {1, "nom.ro", 2, false}, + {1, "nt.ro", 2, false}, + {1, "org.ro", 2, false}, + {1, "rec.ro", 2, false}, + {1, "store.ro", 2, false}, + {1, "tm.ro", 2, false}, + {1, "www.ro", 2, false}, + {1, "rs", 1, false}, + {1, "ac.rs", 2, false}, + {1, "co.rs", 2, false}, + {1, "edu.rs", 2, false}, + {1, "gov.rs", 2, false}, + {1, "in.rs", 2, false}, + {1, "org.rs", 2, false}, + {1, "ru", 1, false}, + {1, "rw", 1, false}, + {1, "ac.rw", 2, false}, + {1, "co.rw", 2, false}, + {1, "coop.rw", 2, false}, + {1, "gov.rw", 2, false}, + {1, "mil.rw", 2, false}, + {1, "net.rw", 2, false}, + {1, "org.rw", 2, false}, + {1, "sa", 1, false}, + {1, "com.sa", 2, false}, + {1, "net.sa", 2, false}, + {1, "org.sa", 2, false}, + {1, "gov.sa", 2, false}, + {1, "med.sa", 2, false}, + {1, "pub.sa", 2, false}, + {1, "edu.sa", 2, false}, + {1, "sch.sa", 2, false}, + {1, "sb", 1, false}, + {1, "com.sb", 2, false}, + {1, "edu.sb", 2, false}, + {1, "gov.sb", 2, false}, + {1, "net.sb", 2, false}, + {1, "org.sb", 2, false}, + {1, "sc", 1, false}, + {1, "com.sc", 2, false}, + {1, "gov.sc", 2, false}, + {1, "net.sc", 2, false}, + {1, "org.sc", 2, false}, + {1, "edu.sc", 2, false}, + {1, "sd", 1, false}, + {1, "com.sd", 2, false}, + {1, "net.sd", 2, false}, + {1, "org.sd", 2, false}, + {1, "edu.sd", 2, false}, + {1, "med.sd", 2, false}, + {1, "tv.sd", 2, false}, + {1, "gov.sd", 2, false}, + {1, "info.sd", 2, false}, + {1, "se", 1, false}, + {1, "a.se", 2, false}, + {1, "ac.se", 2, false}, + {1, "b.se", 2, false}, + {1, "bd.se", 2, false}, + {1, "brand.se", 2, false}, + {1, "c.se", 2, false}, + {1, "d.se", 2, false}, + {1, "e.se", 2, false}, + {1, "f.se", 2, false}, + {1, "fh.se", 2, false}, + {1, "fhsk.se", 2, false}, + {1, "fhv.se", 2, false}, + {1, "g.se", 2, false}, + {1, "h.se", 2, false}, + {1, "i.se", 2, false}, + {1, "k.se", 2, false}, + {1, "komforb.se", 2, false}, + {1, "kommunalforbund.se", 2, false}, + {1, "komvux.se", 2, false}, + {1, "l.se", 2, false}, + {1, "lanbib.se", 2, false}, + {1, "m.se", 2, false}, + {1, "n.se", 2, false}, + {1, "naturbruksgymn.se", 2, false}, + {1, "o.se", 2, false}, + {1, "org.se", 2, false}, + {1, "p.se", 2, false}, + {1, "parti.se", 2, false}, + {1, "pp.se", 2, false}, + {1, "press.se", 2, false}, + {1, "r.se", 2, false}, + {1, "s.se", 2, false}, + {1, "t.se", 2, false}, + {1, "tm.se", 2, false}, + {1, "u.se", 2, false}, + {1, "w.se", 2, false}, + {1, "x.se", 2, false}, + {1, "y.se", 2, false}, + {1, "z.se", 2, false}, + {1, "sg", 1, false}, + {1, "com.sg", 2, false}, + {1, "net.sg", 2, false}, + {1, "org.sg", 2, false}, + {1, "gov.sg", 2, false}, + {1, "edu.sg", 2, false}, + {1, "per.sg", 2, false}, + {1, "sh", 1, false}, + {1, "com.sh", 2, false}, + {1, "net.sh", 2, false}, + {1, "gov.sh", 2, false}, + {1, "org.sh", 2, false}, + {1, "mil.sh", 2, false}, + {1, "si", 1, false}, + {1, "sj", 1, false}, + {1, "sk", 1, false}, + {1, "sl", 1, false}, + {1, "com.sl", 2, false}, + {1, "net.sl", 2, false}, + {1, "edu.sl", 2, false}, + {1, "gov.sl", 2, false}, + {1, "org.sl", 2, false}, + {1, "sm", 1, false}, + {1, "sn", 1, false}, + {1, "art.sn", 2, false}, + {1, "com.sn", 2, false}, + {1, "edu.sn", 2, false}, + {1, "gouv.sn", 2, false}, + {1, "org.sn", 2, false}, + {1, "perso.sn", 2, false}, + {1, "univ.sn", 2, false}, + {1, "so", 1, false}, + {1, "com.so", 2, false}, + {1, "edu.so", 2, false}, + {1, "gov.so", 2, false}, + {1, "me.so", 2, false}, + {1, "net.so", 2, false}, + {1, "org.so", 2, false}, + {1, "sr", 1, false}, + {1, "ss", 1, false}, + {1, "biz.ss", 2, false}, + {1, "com.ss", 2, false}, + {1, "edu.ss", 2, false}, + {1, "gov.ss", 2, false}, + {1, "net.ss", 2, false}, + {1, "org.ss", 2, false}, + {1, "st", 1, false}, + {1, "co.st", 2, false}, + {1, "com.st", 2, false}, + {1, "consulado.st", 2, false}, + {1, "edu.st", 2, false}, + {1, "embaixada.st", 2, false}, + {1, "gov.st", 2, false}, + {1, "mil.st", 2, false}, + {1, "net.st", 2, false}, + {1, "org.st", 2, false}, + {1, "principe.st", 2, false}, + {1, "saotome.st", 2, false}, + {1, "store.st", 2, false}, + {1, "su", 1, false}, + {1, "sv", 1, false}, + {1, "com.sv", 2, false}, + {1, "edu.sv", 2, false}, + {1, "gob.sv", 2, false}, + {1, "org.sv", 2, false}, + {1, "red.sv", 2, false}, + {1, "sx", 1, false}, + {1, "gov.sx", 2, false}, + {1, "sy", 1, false}, + {1, "edu.sy", 2, false}, + {1, "gov.sy", 2, false}, + {1, "net.sy", 2, false}, + {1, "mil.sy", 2, false}, + {1, "com.sy", 2, false}, + {1, "org.sy", 2, false}, + {1, "sz", 1, false}, + {1, "co.sz", 2, false}, + {1, "ac.sz", 2, false}, + {1, "org.sz", 2, false}, + {1, "tc", 1, false}, + {1, "td", 1, false}, + {1, "tel", 1, false}, + {1, "tf", 1, false}, + {1, "tg", 1, false}, + {1, "th", 1, false}, + {1, "ac.th", 2, false}, + {1, "co.th", 2, false}, + {1, "go.th", 2, false}, + {1, "in.th", 2, false}, + {1, "mi.th", 2, false}, + {1, "net.th", 2, false}, + {1, "or.th", 2, false}, + {1, "tj", 1, false}, + {1, "ac.tj", 2, false}, + {1, "biz.tj", 2, false}, + {1, "co.tj", 2, false}, + {1, "com.tj", 2, false}, + {1, "edu.tj", 2, false}, + {1, "go.tj", 2, false}, + {1, "gov.tj", 2, false}, + {1, "int.tj", 2, false}, + {1, "mil.tj", 2, false}, + {1, "name.tj", 2, false}, + {1, "net.tj", 2, false}, + {1, "nic.tj", 2, false}, + {1, "org.tj", 2, false}, + {1, "test.tj", 2, false}, + {1, "web.tj", 2, false}, + {1, "tk", 1, false}, + {1, "tl", 1, false}, + {1, "gov.tl", 2, false}, + {1, "tm", 1, false}, + {1, "com.tm", 2, false}, + {1, "co.tm", 2, false}, + {1, "org.tm", 2, false}, + {1, "net.tm", 2, false}, + {1, "nom.tm", 2, false}, + {1, "gov.tm", 2, false}, + {1, "mil.tm", 2, false}, + {1, "edu.tm", 2, false}, + {1, "tn", 1, false}, + {1, "com.tn", 2, false}, + {1, "ens.tn", 2, false}, + {1, "fin.tn", 2, false}, + {1, "gov.tn", 2, false}, + {1, "ind.tn", 2, false}, + {1, "intl.tn", 2, false}, + {1, "nat.tn", 2, false}, + {1, "net.tn", 2, false}, + {1, "org.tn", 2, false}, + {1, "info.tn", 2, false}, + {1, "perso.tn", 2, false}, + {1, "tourism.tn", 2, false}, + {1, "edunet.tn", 2, false}, + {1, "rnrt.tn", 2, false}, + {1, "rns.tn", 2, false}, + {1, "rnu.tn", 2, false}, + {1, "mincom.tn", 2, false}, + {1, "agrinet.tn", 2, false}, + {1, "defense.tn", 2, false}, + {1, "turen.tn", 2, false}, + {1, "to", 1, false}, + {1, "com.to", 2, false}, + {1, "gov.to", 2, false}, + {1, "net.to", 2, false}, + {1, "org.to", 2, false}, + {1, "edu.to", 2, false}, + {1, "mil.to", 2, false}, + {1, "tr", 1, false}, + {1, "av.tr", 2, false}, + {1, "bbs.tr", 2, false}, + {1, "bel.tr", 2, false}, + {1, "biz.tr", 2, false}, + {1, "com.tr", 2, false}, + {1, "dr.tr", 2, false}, + {1, "edu.tr", 2, false}, + {1, "gen.tr", 2, false}, + {1, "gov.tr", 2, false}, + {1, "info.tr", 2, false}, + {1, "mil.tr", 2, false}, + {1, "k12.tr", 2, false}, + {1, "kep.tr", 2, false}, + {1, "name.tr", 2, false}, + {1, "net.tr", 2, false}, + {1, "org.tr", 2, false}, + {1, "pol.tr", 2, false}, + {1, "tel.tr", 2, false}, + {1, "tsk.tr", 2, false}, + {1, "tv.tr", 2, false}, + {1, "web.tr", 2, false}, + {1, "nc.tr", 2, false}, + {1, "gov.nc.tr", 3, false}, + {1, "tt", 1, false}, + {1, "co.tt", 2, false}, + {1, "com.tt", 2, false}, + {1, "org.tt", 2, false}, + {1, "net.tt", 2, false}, + {1, "biz.tt", 2, false}, + {1, "info.tt", 2, false}, + {1, "pro.tt", 2, false}, + {1, "int.tt", 2, false}, + {1, "coop.tt", 2, false}, + {1, "jobs.tt", 2, false}, + {1, "mobi.tt", 2, false}, + {1, "travel.tt", 2, false}, + {1, "museum.tt", 2, false}, + {1, "aero.tt", 2, false}, + {1, "name.tt", 2, false}, + {1, "gov.tt", 2, false}, + {1, "edu.tt", 2, false}, + {1, "tv", 1, false}, + {1, "tw", 1, false}, + {1, "edu.tw", 2, false}, + {1, "gov.tw", 2, false}, + {1, "mil.tw", 2, false}, + {1, "com.tw", 2, false}, + {1, "net.tw", 2, false}, + {1, "org.tw", 2, false}, + {1, "idv.tw", 2, false}, + {1, "game.tw", 2, false}, + {1, "ebiz.tw", 2, false}, + {1, "club.tw", 2, false}, + {1, "xn--zf0ao64a.tw", 2, false}, + {1, "xn--uc0atv.tw", 2, false}, + {1, "xn--czrw28b.tw", 2, false}, + {1, "tz", 1, false}, + {1, "ac.tz", 2, false}, + {1, "co.tz", 2, false}, + {1, "go.tz", 2, false}, + {1, "hotel.tz", 2, false}, + {1, "info.tz", 2, false}, + {1, "me.tz", 2, false}, + {1, "mil.tz", 2, false}, + {1, "mobi.tz", 2, false}, + {1, "ne.tz", 2, false}, + {1, "or.tz", 2, false}, + {1, "sc.tz", 2, false}, + {1, "tv.tz", 2, false}, + {1, "ua", 1, false}, + {1, "com.ua", 2, false}, + {1, "edu.ua", 2, false}, + {1, "gov.ua", 2, false}, + {1, "in.ua", 2, false}, + {1, "net.ua", 2, false}, + {1, "org.ua", 2, false}, + {1, "cherkassy.ua", 2, false}, + {1, "cherkasy.ua", 2, false}, + {1, "chernigov.ua", 2, false}, + {1, "chernihiv.ua", 2, false}, + {1, "chernivtsi.ua", 2, false}, + {1, "chernovtsy.ua", 2, false}, + {1, "ck.ua", 2, false}, + {1, "cn.ua", 2, false}, + {1, "cr.ua", 2, false}, + {1, "crimea.ua", 2, false}, + {1, "cv.ua", 2, false}, + {1, "dn.ua", 2, false}, + {1, "dnepropetrovsk.ua", 2, false}, + {1, "dnipropetrovsk.ua", 2, false}, + {1, "dominic.ua", 2, false}, + {1, "donetsk.ua", 2, false}, + {1, "dp.ua", 2, false}, + {1, "if.ua", 2, false}, + {1, "ivano-frankivsk.ua", 2, false}, + {1, "kh.ua", 2, false}, + {1, "kharkiv.ua", 2, false}, + {1, "kharkov.ua", 2, false}, + {1, "kherson.ua", 2, false}, + {1, "khmelnitskiy.ua", 2, false}, + {1, "khmelnytskyi.ua", 2, false}, + {1, "kiev.ua", 2, false}, + {1, "kirovograd.ua", 2, false}, + {1, "km.ua", 2, false}, + {1, "kr.ua", 2, false}, + {1, "krym.ua", 2, false}, + {1, "ks.ua", 2, false}, + {1, "kv.ua", 2, false}, + {1, "kyiv.ua", 2, false}, + {1, "lg.ua", 2, false}, + {1, "lt.ua", 2, false}, + {1, "lugansk.ua", 2, false}, + {1, "lutsk.ua", 2, false}, + {1, "lv.ua", 2, false}, + {1, "lviv.ua", 2, false}, + {1, "mk.ua", 2, false}, + {1, "mykolaiv.ua", 2, false}, + {1, "nikolaev.ua", 2, false}, + {1, "od.ua", 2, false}, + {1, "odesa.ua", 2, false}, + {1, "odessa.ua", 2, false}, + {1, "pl.ua", 2, false}, + {1, "poltava.ua", 2, false}, + {1, "rivne.ua", 2, false}, + {1, "rovno.ua", 2, false}, + {1, "rv.ua", 2, false}, + {1, "sb.ua", 2, false}, + {1, "sebastopol.ua", 2, false}, + {1, "sevastopol.ua", 2, false}, + {1, "sm.ua", 2, false}, + {1, "sumy.ua", 2, false}, + {1, "te.ua", 2, false}, + {1, "ternopil.ua", 2, false}, + {1, "uz.ua", 2, false}, + {1, "uzhgorod.ua", 2, false}, + {1, "vinnica.ua", 2, false}, + {1, "vinnytsia.ua", 2, false}, + {1, "vn.ua", 2, false}, + {1, "volyn.ua", 2, false}, + {1, "yalta.ua", 2, false}, + {1, "zaporizhzhe.ua", 2, false}, + {1, "zaporizhzhia.ua", 2, false}, + {1, "zhitomir.ua", 2, false}, + {1, "zhytomyr.ua", 2, false}, + {1, "zp.ua", 2, false}, + {1, "zt.ua", 2, false}, + {1, "ug", 1, false}, + {1, "co.ug", 2, false}, + {1, "or.ug", 2, false}, + {1, "ac.ug", 2, false}, + {1, "sc.ug", 2, false}, + {1, "go.ug", 2, false}, + {1, "ne.ug", 2, false}, + {1, "com.ug", 2, false}, + {1, "org.ug", 2, false}, + {1, "uk", 1, false}, + {1, "ac.uk", 2, false}, + {1, "co.uk", 2, false}, + {1, "gov.uk", 2, false}, + {1, "ltd.uk", 2, false}, + {1, "me.uk", 2, false}, + {1, "net.uk", 2, false}, + {1, "nhs.uk", 2, false}, + {1, "org.uk", 2, false}, + {1, "plc.uk", 2, false}, + {1, "police.uk", 2, false}, + {2, "sch.uk", 3, false}, + {1, "us", 1, false}, + {1, "dni.us", 2, false}, + {1, "fed.us", 2, false}, + {1, "isa.us", 2, false}, + {1, "kids.us", 2, false}, + {1, "nsn.us", 2, false}, + {1, "ak.us", 2, false}, + {1, "al.us", 2, false}, + {1, "ar.us", 2, false}, + {1, "as.us", 2, false}, + {1, "az.us", 2, false}, + {1, "ca.us", 2, false}, + {1, "co.us", 2, false}, + {1, "ct.us", 2, false}, + {1, "dc.us", 2, false}, + {1, "de.us", 2, false}, + {1, "fl.us", 2, false}, + {1, "ga.us", 2, false}, + {1, "gu.us", 2, false}, + {1, "hi.us", 2, false}, + {1, "ia.us", 2, false}, + {1, "id.us", 2, false}, + {1, "il.us", 2, false}, + {1, "in.us", 2, false}, + {1, "ks.us", 2, false}, + {1, "ky.us", 2, false}, + {1, "la.us", 2, false}, + {1, "ma.us", 2, false}, + {1, "md.us", 2, false}, + {1, "me.us", 2, false}, + {1, "mi.us", 2, false}, + {1, "mn.us", 2, false}, + {1, "mo.us", 2, false}, + {1, "ms.us", 2, false}, + {1, "mt.us", 2, false}, + {1, "nc.us", 2, false}, + {1, "nd.us", 2, false}, + {1, "ne.us", 2, false}, + {1, "nh.us", 2, false}, + {1, "nj.us", 2, false}, + {1, "nm.us", 2, false}, + {1, "nv.us", 2, false}, + {1, "ny.us", 2, false}, + {1, "oh.us", 2, false}, + {1, "ok.us", 2, false}, + {1, "or.us", 2, false}, + {1, "pa.us", 2, false}, + {1, "pr.us", 2, false}, + {1, "ri.us", 2, false}, + {1, "sc.us", 2, false}, + {1, "sd.us", 2, false}, + {1, "tn.us", 2, false}, + {1, "tx.us", 2, false}, + {1, "ut.us", 2, false}, + {1, "vi.us", 2, false}, + {1, "vt.us", 2, false}, + {1, "va.us", 2, false}, + {1, "wa.us", 2, false}, + {1, "wi.us", 2, false}, + {1, "wv.us", 2, false}, + {1, "wy.us", 2, false}, + {1, "k12.ak.us", 3, false}, + {1, "k12.al.us", 3, false}, + {1, "k12.ar.us", 3, false}, + {1, "k12.as.us", 3, false}, + {1, "k12.az.us", 3, false}, + {1, "k12.ca.us", 3, false}, + {1, "k12.co.us", 3, false}, + {1, "k12.ct.us", 3, false}, + {1, "k12.dc.us", 3, false}, + {1, "k12.de.us", 3, false}, + {1, "k12.fl.us", 3, false}, + {1, "k12.ga.us", 3, false}, + {1, "k12.gu.us", 3, false}, + {1, "k12.ia.us", 3, false}, + {1, "k12.id.us", 3, false}, + {1, "k12.il.us", 3, false}, + {1, "k12.in.us", 3, false}, + {1, "k12.ks.us", 3, false}, + {1, "k12.ky.us", 3, false}, + {1, "k12.la.us", 3, false}, + {1, "k12.ma.us", 3, false}, + {1, "k12.md.us", 3, false}, + {1, "k12.me.us", 3, false}, + {1, "k12.mi.us", 3, false}, + {1, "k12.mn.us", 3, false}, + {1, "k12.mo.us", 3, false}, + {1, "k12.ms.us", 3, false}, + {1, "k12.mt.us", 3, false}, + {1, "k12.nc.us", 3, false}, + {1, "k12.ne.us", 3, false}, + {1, "k12.nh.us", 3, false}, + {1, "k12.nj.us", 3, false}, + {1, "k12.nm.us", 3, false}, + {1, "k12.nv.us", 3, false}, + {1, "k12.ny.us", 3, false}, + {1, "k12.oh.us", 3, false}, + {1, "k12.ok.us", 3, false}, + {1, "k12.or.us", 3, false}, + {1, "k12.pa.us", 3, false}, + {1, "k12.pr.us", 3, false}, + {1, "k12.ri.us", 3, false}, + {1, "k12.sc.us", 3, false}, + {1, "k12.tn.us", 3, false}, + {1, "k12.tx.us", 3, false}, + {1, "k12.ut.us", 3, false}, + {1, "k12.vi.us", 3, false}, + {1, "k12.vt.us", 3, false}, + {1, "k12.va.us", 3, false}, + {1, "k12.wa.us", 3, false}, + {1, "k12.wi.us", 3, false}, + {1, "k12.wy.us", 3, false}, + {1, "cc.ak.us", 3, false}, + {1, "cc.al.us", 3, false}, + {1, "cc.ar.us", 3, false}, + {1, "cc.as.us", 3, false}, + {1, "cc.az.us", 3, false}, + {1, "cc.ca.us", 3, false}, + {1, "cc.co.us", 3, false}, + {1, "cc.ct.us", 3, false}, + {1, "cc.dc.us", 3, false}, + {1, "cc.de.us", 3, false}, + {1, "cc.fl.us", 3, false}, + {1, "cc.ga.us", 3, false}, + {1, "cc.gu.us", 3, false}, + {1, "cc.hi.us", 3, false}, + {1, "cc.ia.us", 3, false}, + {1, "cc.id.us", 3, false}, + {1, "cc.il.us", 3, false}, + {1, "cc.in.us", 3, false}, + {1, "cc.ks.us", 3, false}, + {1, "cc.ky.us", 3, false}, + {1, "cc.la.us", 3, false}, + {1, "cc.ma.us", 3, false}, + {1, "cc.md.us", 3, false}, + {1, "cc.me.us", 3, false}, + {1, "cc.mi.us", 3, false}, + {1, "cc.mn.us", 3, false}, + {1, "cc.mo.us", 3, false}, + {1, "cc.ms.us", 3, false}, + {1, "cc.mt.us", 3, false}, + {1, "cc.nc.us", 3, false}, + {1, "cc.nd.us", 3, false}, + {1, "cc.ne.us", 3, false}, + {1, "cc.nh.us", 3, false}, + {1, "cc.nj.us", 3, false}, + {1, "cc.nm.us", 3, false}, + {1, "cc.nv.us", 3, false}, + {1, "cc.ny.us", 3, false}, + {1, "cc.oh.us", 3, false}, + {1, "cc.ok.us", 3, false}, + {1, "cc.or.us", 3, false}, + {1, "cc.pa.us", 3, false}, + {1, "cc.pr.us", 3, false}, + {1, "cc.ri.us", 3, false}, + {1, "cc.sc.us", 3, false}, + {1, "cc.sd.us", 3, false}, + {1, "cc.tn.us", 3, false}, + {1, "cc.tx.us", 3, false}, + {1, "cc.ut.us", 3, false}, + {1, "cc.vi.us", 3, false}, + {1, "cc.vt.us", 3, false}, + {1, "cc.va.us", 3, false}, + {1, "cc.wa.us", 3, false}, + {1, "cc.wi.us", 3, false}, + {1, "cc.wv.us", 3, false}, + {1, "cc.wy.us", 3, false}, + {1, "lib.ak.us", 3, false}, + {1, "lib.al.us", 3, false}, + {1, "lib.ar.us", 3, false}, + {1, "lib.as.us", 3, false}, + {1, "lib.az.us", 3, false}, + {1, "lib.ca.us", 3, false}, + {1, "lib.co.us", 3, false}, + {1, "lib.ct.us", 3, false}, + {1, "lib.dc.us", 3, false}, + {1, "lib.fl.us", 3, false}, + {1, "lib.ga.us", 3, false}, + {1, "lib.gu.us", 3, false}, + {1, "lib.hi.us", 3, false}, + {1, "lib.ia.us", 3, false}, + {1, "lib.id.us", 3, false}, + {1, "lib.il.us", 3, false}, + {1, "lib.in.us", 3, false}, + {1, "lib.ks.us", 3, false}, + {1, "lib.ky.us", 3, false}, + {1, "lib.la.us", 3, false}, + {1, "lib.ma.us", 3, false}, + {1, "lib.md.us", 3, false}, + {1, "lib.me.us", 3, false}, + {1, "lib.mi.us", 3, false}, + {1, "lib.mn.us", 3, false}, + {1, "lib.mo.us", 3, false}, + {1, "lib.ms.us", 3, false}, + {1, "lib.mt.us", 3, false}, + {1, "lib.nc.us", 3, false}, + {1, "lib.nd.us", 3, false}, + {1, "lib.ne.us", 3, false}, + {1, "lib.nh.us", 3, false}, + {1, "lib.nj.us", 3, false}, + {1, "lib.nm.us", 3, false}, + {1, "lib.nv.us", 3, false}, + {1, "lib.ny.us", 3, false}, + {1, "lib.oh.us", 3, false}, + {1, "lib.ok.us", 3, false}, + {1, "lib.or.us", 3, false}, + {1, "lib.pa.us", 3, false}, + {1, "lib.pr.us", 3, false}, + {1, "lib.ri.us", 3, false}, + {1, "lib.sc.us", 3, false}, + {1, "lib.sd.us", 3, false}, + {1, "lib.tn.us", 3, false}, + {1, "lib.tx.us", 3, false}, + {1, "lib.ut.us", 3, false}, + {1, "lib.vi.us", 3, false}, + {1, "lib.vt.us", 3, false}, + {1, "lib.va.us", 3, false}, + {1, "lib.wa.us", 3, false}, + {1, "lib.wi.us", 3, false}, + {1, "lib.wy.us", 3, false}, + {1, "pvt.k12.ma.us", 4, false}, + {1, "chtr.k12.ma.us", 4, false}, + {1, "paroch.k12.ma.us", 4, false}, + {1, "ann-arbor.mi.us", 3, false}, + {1, "cog.mi.us", 3, false}, + {1, "dst.mi.us", 3, false}, + {1, "eaton.mi.us", 3, false}, + {1, "gen.mi.us", 3, false}, + {1, "mus.mi.us", 3, false}, + {1, "tec.mi.us", 3, false}, + {1, "washtenaw.mi.us", 3, false}, + {1, "uy", 1, false}, + {1, "com.uy", 2, false}, + {1, "edu.uy", 2, false}, + {1, "gub.uy", 2, false}, + {1, "mil.uy", 2, false}, + {1, "net.uy", 2, false}, + {1, "org.uy", 2, false}, + {1, "uz", 1, false}, + {1, "co.uz", 2, false}, + {1, "com.uz", 2, false}, + {1, "net.uz", 2, false}, + {1, "org.uz", 2, false}, + {1, "va", 1, false}, + {1, "vc", 1, false}, + {1, "com.vc", 2, false}, + {1, "net.vc", 2, false}, + {1, "org.vc", 2, false}, + {1, "gov.vc", 2, false}, + {1, "mil.vc", 2, false}, + {1, "edu.vc", 2, false}, + {1, "ve", 1, false}, + {1, "arts.ve", 2, false}, + {1, "co.ve", 2, false}, + {1, "com.ve", 2, false}, + {1, "e12.ve", 2, false}, + {1, "edu.ve", 2, false}, + {1, "firm.ve", 2, false}, + {1, "gob.ve", 2, false}, + {1, "gov.ve", 2, false}, + {1, "info.ve", 2, false}, + {1, "int.ve", 2, false}, + {1, "mil.ve", 2, false}, + {1, "net.ve", 2, false}, + {1, "org.ve", 2, false}, + {1, "rec.ve", 2, false}, + {1, "store.ve", 2, false}, + {1, "tec.ve", 2, false}, + {1, "web.ve", 2, false}, + {1, "vg", 1, false}, + {1, "vi", 1, false}, + {1, "co.vi", 2, false}, + {1, "com.vi", 2, false}, + {1, "k12.vi", 2, false}, + {1, "net.vi", 2, false}, + {1, "org.vi", 2, false}, + {1, "vn", 1, false}, + {1, "com.vn", 2, false}, + {1, "net.vn", 2, false}, + {1, "org.vn", 2, false}, + {1, "edu.vn", 2, false}, + {1, "gov.vn", 2, false}, + {1, "int.vn", 2, false}, + {1, "ac.vn", 2, false}, + {1, "biz.vn", 2, false}, + {1, "info.vn", 2, false}, + {1, "name.vn", 2, false}, + {1, "pro.vn", 2, false}, + {1, "health.vn", 2, false}, + {1, "vu", 1, false}, + {1, "com.vu", 2, false}, + {1, "edu.vu", 2, false}, + {1, "net.vu", 2, false}, + {1, "org.vu", 2, false}, + {1, "wf", 1, false}, + {1, "ws", 1, false}, + {1, "com.ws", 2, false}, + {1, "net.ws", 2, false}, + {1, "org.ws", 2, false}, + {1, "gov.ws", 2, false}, + {1, "edu.ws", 2, false}, + {1, "yt", 1, false}, + {1, "xn--mgbaam7a8h", 1, false}, + {1, "xn--y9a3aq", 1, false}, + {1, "xn--54b7fta0cc", 1, false}, + {1, "xn--90ae", 1, false}, + {1, "xn--90ais", 1, false}, + {1, "xn--fiqs8s", 1, false}, + {1, "xn--fiqz9s", 1, false}, + {1, "xn--lgbbat1ad8j", 1, false}, + {1, "xn--wgbh1c", 1, false}, + {1, "xn--e1a4c", 1, false}, + {1, "xn--mgbah1a3hjkrd", 1, false}, + {1, "xn--node", 1, false}, + {1, "xn--qxam", 1, false}, + {1, "xn--j6w193g", 1, false}, + {1, "xn--55qx5d.xn--j6w193g", 2, false}, + {1, "xn--wcvs22d.xn--j6w193g", 2, false}, + {1, "xn--mxtq1m.xn--j6w193g", 2, false}, + {1, "xn--gmqw5a.xn--j6w193g", 2, false}, + {1, "xn--od0alg.xn--j6w193g", 2, false}, + {1, "xn--uc0atv.xn--j6w193g", 2, false}, + {1, "xn--2scrj9c", 1, false}, + {1, "xn--3hcrj9c", 1, false}, + {1, "xn--45br5cyl", 1, false}, + {1, "xn--h2breg3eve", 1, false}, + {1, "xn--h2brj9c8c", 1, false}, + {1, "xn--mgbgu82a", 1, false}, + {1, "xn--rvc1e0am3e", 1, false}, + {1, "xn--h2brj9c", 1, false}, + {1, "xn--mgbbh1a", 1, false}, + {1, "xn--mgbbh1a71e", 1, false}, + {1, "xn--fpcrj9c3d", 1, false}, + {1, "xn--gecrj9c", 1, false}, + {1, "xn--s9brj9c", 1, false}, + {1, "xn--45brj9c", 1, false}, + {1, "xn--xkc2dl3a5ee0h", 1, false}, + {1, "xn--mgba3a4f16a", 1, false}, + {1, "xn--mgba3a4fra", 1, false}, + {1, "xn--mgbtx2b", 1, false}, + {1, "xn--mgbayh7gpa", 1, false}, + {1, "xn--3e0b707e", 1, false}, + {1, "xn--80ao21a", 1, false}, + {1, "xn--fzc2c9e2c", 1, false}, + {1, "xn--xkc2al3hye2a", 1, false}, + {1, "xn--mgbc0a9azcg", 1, false}, + {1, "xn--d1alf", 1, false}, + {1, "xn--l1acc", 1, false}, + {1, "xn--mix891f", 1, false}, + {1, "xn--mix082f", 1, false}, + {1, "xn--mgbx4cd0ab", 1, false}, + {1, "xn--mgb9awbf", 1, false}, + {1, "xn--mgbai9azgqp6j", 1, false}, + {1, "xn--mgbai9a5eva00b", 1, false}, + {1, "xn--ygbi2ammx", 1, false}, + {1, "xn--90a3ac", 1, false}, + {1, "xn--o1ac.xn--90a3ac", 2, false}, + {1, "xn--c1avg.xn--90a3ac", 2, false}, + {1, "xn--90azh.xn--90a3ac", 2, false}, + {1, "xn--d1at.xn--90a3ac", 2, false}, + {1, "xn--o1ach.xn--90a3ac", 2, false}, + {1, "xn--80au.xn--90a3ac", 2, false}, + {1, "xn--p1ai", 1, false}, + {1, "xn--wgbl6a", 1, false}, + {1, "xn--mgberp4a5d4ar", 1, false}, + {1, "xn--mgberp4a5d4a87g", 1, false}, + {1, "xn--mgbqly7c0a67fbc", 1, false}, + {1, "xn--mgbqly7cvafr", 1, false}, + {1, "xn--mgbpl2fh", 1, false}, + {1, "xn--yfro4i67o", 1, false}, + {1, "xn--clchc0ea0b2g2a9gcd", 1, false}, + {1, "xn--ogbpf8fl", 1, false}, + {1, "xn--mgbtf8fl", 1, false}, + {1, "xn--o3cw4h", 1, false}, + {1, "xn--12c1fe0br.xn--o3cw4h", 2, false}, + {1, "xn--12co0c3b4eva.xn--o3cw4h", 2, false}, + {1, "xn--h3cuzk1di.xn--o3cw4h", 2, false}, + {1, "xn--o3cyx2a.xn--o3cw4h", 2, false}, + {1, "xn--m3ch0j3a.xn--o3cw4h", 2, false}, + {1, "xn--12cfi8ixb8l.xn--o3cw4h", 2, false}, + {1, "xn--pgbs0dh", 1, false}, + {1, "xn--kpry57d", 1, false}, + {1, "xn--kprw13d", 1, false}, + {1, "xn--nnx388a", 1, false}, + {1, "xn--j1amh", 1, false}, + {1, "xn--mgb2ddes", 1, false}, + {1, "xxx", 1, false}, + {2, "ye", 2, false}, + {1, "ac.za", 2, false}, + {1, "agric.za", 2, false}, + {1, "alt.za", 2, false}, + {1, "co.za", 2, false}, + {1, "edu.za", 2, false}, + {1, "gov.za", 2, false}, + {1, "grondar.za", 2, false}, + {1, "law.za", 2, false}, + {1, "mil.za", 2, false}, + {1, "net.za", 2, false}, + {1, "ngo.za", 2, false}, + {1, "nic.za", 2, false}, + {1, "nis.za", 2, false}, + {1, "nom.za", 2, false}, + {1, "org.za", 2, false}, + {1, "school.za", 2, false}, + {1, "tm.za", 2, false}, + {1, "web.za", 2, false}, + {1, "zm", 1, false}, + {1, "ac.zm", 2, false}, + {1, "biz.zm", 2, false}, + {1, "co.zm", 2, false}, + {1, "com.zm", 2, false}, + {1, "edu.zm", 2, false}, + {1, "gov.zm", 2, false}, + {1, "info.zm", 2, false}, + {1, "mil.zm", 2, false}, + {1, "net.zm", 2, false}, + {1, "org.zm", 2, false}, + {1, "sch.zm", 2, false}, + {1, "zw", 1, false}, + {1, "ac.zw", 2, false}, + {1, "co.zw", 2, false}, + {1, "gov.zw", 2, false}, + {1, "mil.zw", 2, false}, + {1, "org.zw", 2, false}, + {1, "aaa", 1, false}, + {1, "aarp", 1, false}, + {1, "abarth", 1, false}, + {1, "abb", 1, false}, + {1, "abbott", 1, false}, + {1, "abbvie", 1, false}, + {1, "abc", 1, false}, + {1, "able", 1, false}, + {1, "abogado", 1, false}, + {1, "abudhabi", 1, false}, + {1, "academy", 1, false}, + {1, "accenture", 1, false}, + {1, "accountant", 1, false}, + {1, "accountants", 1, false}, + {1, "aco", 1, false}, + {1, "actor", 1, false}, + {1, "adac", 1, false}, + {1, "ads", 1, false}, + {1, "adult", 1, false}, + {1, "aeg", 1, false}, + {1, "aetna", 1, false}, + {1, "afamilycompany", 1, false}, + {1, "afl", 1, false}, + {1, "africa", 1, false}, + {1, "agakhan", 1, false}, + {1, "agency", 1, false}, + {1, "aig", 1, false}, + {1, "aigo", 1, false}, + {1, "airbus", 1, false}, + {1, "airforce", 1, false}, + {1, "airtel", 1, false}, + {1, "akdn", 1, false}, + {1, "alfaromeo", 1, false}, + {1, "alibaba", 1, false}, + {1, "alipay", 1, false}, + {1, "allfinanz", 1, false}, + {1, "allstate", 1, false}, + {1, "ally", 1, false}, + {1, "alsace", 1, false}, + {1, "alstom", 1, false}, + {1, "amazon", 1, false}, + {1, "americanexpress", 1, false}, + {1, "americanfamily", 1, false}, + {1, "amex", 1, false}, + {1, "amfam", 1, false}, + {1, "amica", 1, false}, + {1, "amsterdam", 1, false}, + {1, "analytics", 1, false}, + {1, "android", 1, false}, + {1, "anquan", 1, false}, + {1, "anz", 1, false}, + {1, "aol", 1, false}, + {1, "apartments", 1, false}, + {1, "app", 1, false}, + {1, "apple", 1, false}, + {1, "aquarelle", 1, false}, + {1, "arab", 1, false}, + {1, "aramco", 1, false}, + {1, "archi", 1, false}, + {1, "army", 1, false}, + {1, "art", 1, false}, + {1, "arte", 1, false}, + {1, "asda", 1, false}, + {1, "associates", 1, false}, + {1, "athleta", 1, false}, + {1, "attorney", 1, false}, + {1, "auction", 1, false}, + {1, "audi", 1, false}, + {1, "audible", 1, false}, + {1, "audio", 1, false}, + {1, "auspost", 1, false}, + {1, "author", 1, false}, + {1, "auto", 1, false}, + {1, "autos", 1, false}, + {1, "avianca", 1, false}, + {1, "aws", 1, false}, + {1, "axa", 1, false}, + {1, "azure", 1, false}, + {1, "baby", 1, false}, + {1, "baidu", 1, false}, + {1, "banamex", 1, false}, + {1, "bananarepublic", 1, false}, + {1, "band", 1, false}, + {1, "bank", 1, false}, + {1, "bar", 1, false}, + {1, "barcelona", 1, false}, + {1, "barclaycard", 1, false}, + {1, "barclays", 1, false}, + {1, "barefoot", 1, false}, + {1, "bargains", 1, false}, + {1, "baseball", 1, false}, + {1, "basketball", 1, false}, + {1, "bauhaus", 1, false}, + {1, "bayern", 1, false}, + {1, "bbc", 1, false}, + {1, "bbt", 1, false}, + {1, "bbva", 1, false}, + {1, "bcg", 1, false}, + {1, "bcn", 1, false}, + {1, "beats", 1, false}, + {1, "beauty", 1, false}, + {1, "beer", 1, false}, + {1, "bentley", 1, false}, + {1, "berlin", 1, false}, + {1, "best", 1, false}, + {1, "bestbuy", 1, false}, + {1, "bet", 1, false}, + {1, "bharti", 1, false}, + {1, "bible", 1, false}, + {1, "bid", 1, false}, + {1, "bike", 1, false}, + {1, "bing", 1, false}, + {1, "bingo", 1, false}, + {1, "bio", 1, false}, + {1, "black", 1, false}, + {1, "blackfriday", 1, false}, + {1, "blockbuster", 1, false}, + {1, "blog", 1, false}, + {1, "bloomberg", 1, false}, + {1, "blue", 1, false}, + {1, "bms", 1, false}, + {1, "bmw", 1, false}, + {1, "bnpparibas", 1, false}, + {1, "boats", 1, false}, + {1, "boehringer", 1, false}, + {1, "bofa", 1, false}, + {1, "bom", 1, false}, + {1, "bond", 1, false}, + {1, "boo", 1, false}, + {1, "book", 1, false}, + {1, "booking", 1, false}, + {1, "bosch", 1, false}, + {1, "bostik", 1, false}, + {1, "boston", 1, false}, + {1, "bot", 1, false}, + {1, "boutique", 1, false}, + {1, "box", 1, false}, + {1, "bradesco", 1, false}, + {1, "bridgestone", 1, false}, + {1, "broadway", 1, false}, + {1, "broker", 1, false}, + {1, "brother", 1, false}, + {1, "brussels", 1, false}, + {1, "budapest", 1, false}, + {1, "bugatti", 1, false}, + {1, "build", 1, false}, + {1, "builders", 1, false}, + {1, "business", 1, false}, + {1, "buy", 1, false}, + {1, "buzz", 1, false}, + {1, "bzh", 1, false}, + {1, "cab", 1, false}, + {1, "cafe", 1, false}, + {1, "cal", 1, false}, + {1, "call", 1, false}, + {1, "calvinklein", 1, false}, + {1, "cam", 1, false}, + {1, "camera", 1, false}, + {1, "camp", 1, false}, + {1, "cancerresearch", 1, false}, + {1, "canon", 1, false}, + {1, "capetown", 1, false}, + {1, "capital", 1, false}, + {1, "capitalone", 1, false}, + {1, "car", 1, false}, + {1, "caravan", 1, false}, + {1, "cards", 1, false}, + {1, "care", 1, false}, + {1, "career", 1, false}, + {1, "careers", 1, false}, + {1, "cars", 1, false}, + {1, "casa", 1, false}, + {1, "case", 1, false}, + {1, "caseih", 1, false}, + {1, "cash", 1, false}, + {1, "casino", 1, false}, + {1, "catering", 1, false}, + {1, "catholic", 1, false}, + {1, "cba", 1, false}, + {1, "cbn", 1, false}, + {1, "cbre", 1, false}, + {1, "cbs", 1, false}, + {1, "ceb", 1, false}, + {1, "center", 1, false}, + {1, "ceo", 1, false}, + {1, "cern", 1, false}, + {1, "cfa", 1, false}, + {1, "cfd", 1, false}, + {1, "chanel", 1, false}, + {1, "channel", 1, false}, + {1, "charity", 1, false}, + {1, "chase", 1, false}, + {1, "chat", 1, false}, + {1, "cheap", 1, false}, + {1, "chintai", 1, false}, + {1, "christmas", 1, false}, + {1, "chrome", 1, false}, + {1, "church", 1, false}, + {1, "cipriani", 1, false}, + {1, "circle", 1, false}, + {1, "cisco", 1, false}, + {1, "citadel", 1, false}, + {1, "citi", 1, false}, + {1, "citic", 1, false}, + {1, "city", 1, false}, + {1, "cityeats", 1, false}, + {1, "claims", 1, false}, + {1, "cleaning", 1, false}, + {1, "click", 1, false}, + {1, "clinic", 1, false}, + {1, "clinique", 1, false}, + {1, "clothing", 1, false}, + {1, "cloud", 1, false}, + {1, "club", 1, false}, + {1, "clubmed", 1, false}, + {1, "coach", 1, false}, + {1, "codes", 1, false}, + {1, "coffee", 1, false}, + {1, "college", 1, false}, + {1, "cologne", 1, false}, + {1, "comcast", 1, false}, + {1, "commbank", 1, false}, + {1, "community", 1, false}, + {1, "company", 1, false}, + {1, "compare", 1, false}, + {1, "computer", 1, false}, + {1, "comsec", 1, false}, + {1, "condos", 1, false}, + {1, "construction", 1, false}, + {1, "consulting", 1, false}, + {1, "contact", 1, false}, + {1, "contractors", 1, false}, + {1, "cooking", 1, false}, + {1, "cookingchannel", 1, false}, + {1, "cool", 1, false}, + {1, "corsica", 1, false}, + {1, "country", 1, false}, + {1, "coupon", 1, false}, + {1, "coupons", 1, false}, + {1, "courses", 1, false}, + {1, "cpa", 1, false}, + {1, "credit", 1, false}, + {1, "creditcard", 1, false}, + {1, "creditunion", 1, false}, + {1, "cricket", 1, false}, + {1, "crown", 1, false}, + {1, "crs", 1, false}, + {1, "cruise", 1, false}, + {1, "cruises", 1, false}, + {1, "csc", 1, false}, + {1, "cuisinella", 1, false}, + {1, "cymru", 1, false}, + {1, "cyou", 1, false}, + {1, "dabur", 1, false}, + {1, "dad", 1, false}, + {1, "dance", 1, false}, + {1, "data", 1, false}, + {1, "date", 1, false}, + {1, "dating", 1, false}, + {1, "datsun", 1, false}, + {1, "day", 1, false}, + {1, "dclk", 1, false}, + {1, "dds", 1, false}, + {1, "deal", 1, false}, + {1, "dealer", 1, false}, + {1, "deals", 1, false}, + {1, "degree", 1, false}, + {1, "delivery", 1, false}, + {1, "dell", 1, false}, + {1, "deloitte", 1, false}, + {1, "delta", 1, false}, + {1, "democrat", 1, false}, + {1, "dental", 1, false}, + {1, "dentist", 1, false}, + {1, "desi", 1, false}, + {1, "design", 1, false}, + {1, "dev", 1, false}, + {1, "dhl", 1, false}, + {1, "diamonds", 1, false}, + {1, "diet", 1, false}, + {1, "digital", 1, false}, + {1, "direct", 1, false}, + {1, "directory", 1, false}, + {1, "discount", 1, false}, + {1, "discover", 1, false}, + {1, "dish", 1, false}, + {1, "diy", 1, false}, + {1, "dnp", 1, false}, + {1, "docs", 1, false}, + {1, "doctor", 1, false}, + {1, "dog", 1, false}, + {1, "domains", 1, false}, + {1, "dot", 1, false}, + {1, "download", 1, false}, + {1, "drive", 1, false}, + {1, "dtv", 1, false}, + {1, "dubai", 1, false}, + {1, "duck", 1, false}, + {1, "dunlop", 1, false}, + {1, "dupont", 1, false}, + {1, "durban", 1, false}, + {1, "dvag", 1, false}, + {1, "dvr", 1, false}, + {1, "earth", 1, false}, + {1, "eat", 1, false}, + {1, "eco", 1, false}, + {1, "edeka", 1, false}, + {1, "education", 1, false}, + {1, "email", 1, false}, + {1, "emerck", 1, false}, + {1, "energy", 1, false}, + {1, "engineer", 1, false}, + {1, "engineering", 1, false}, + {1, "enterprises", 1, false}, + {1, "epson", 1, false}, + {1, "equipment", 1, false}, + {1, "ericsson", 1, false}, + {1, "erni", 1, false}, + {1, "esq", 1, false}, + {1, "estate", 1, false}, + {1, "esurance", 1, false}, + {1, "etisalat", 1, false}, + {1, "eurovision", 1, false}, + {1, "eus", 1, false}, + {1, "events", 1, false}, + {1, "exchange", 1, false}, + {1, "expert", 1, false}, + {1, "exposed", 1, false}, + {1, "express", 1, false}, + {1, "extraspace", 1, false}, + {1, "fage", 1, false}, + {1, "fail", 1, false}, + {1, "fairwinds", 1, false}, + {1, "faith", 1, false}, + {1, "family", 1, false}, + {1, "fan", 1, false}, + {1, "fans", 1, false}, + {1, "farm", 1, false}, + {1, "farmers", 1, false}, + {1, "fashion", 1, false}, + {1, "fast", 1, false}, + {1, "fedex", 1, false}, + {1, "feedback", 1, false}, + {1, "ferrari", 1, false}, + {1, "ferrero", 1, false}, + {1, "fiat", 1, false}, + {1, "fidelity", 1, false}, + {1, "fido", 1, false}, + {1, "film", 1, false}, + {1, "final", 1, false}, + {1, "finance", 1, false}, + {1, "financial", 1, false}, + {1, "fire", 1, false}, + {1, "firestone", 1, false}, + {1, "firmdale", 1, false}, + {1, "fish", 1, false}, + {1, "fishing", 1, false}, + {1, "fit", 1, false}, + {1, "fitness", 1, false}, + {1, "flickr", 1, false}, + {1, "flights", 1, false}, + {1, "flir", 1, false}, + {1, "florist", 1, false}, + {1, "flowers", 1, false}, + {1, "fly", 1, false}, + {1, "foo", 1, false}, + {1, "food", 1, false}, + {1, "foodnetwork", 1, false}, + {1, "football", 1, false}, + {1, "ford", 1, false}, + {1, "forex", 1, false}, + {1, "forsale", 1, false}, + {1, "forum", 1, false}, + {1, "foundation", 1, false}, + {1, "fox", 1, false}, + {1, "free", 1, false}, + {1, "fresenius", 1, false}, + {1, "frl", 1, false}, + {1, "frogans", 1, false}, + {1, "frontdoor", 1, false}, + {1, "frontier", 1, false}, + {1, "ftr", 1, false}, + {1, "fujitsu", 1, false}, + {1, "fujixerox", 1, false}, + {1, "fun", 1, false}, + {1, "fund", 1, false}, + {1, "furniture", 1, false}, + {1, "futbol", 1, false}, + {1, "fyi", 1, false}, + {1, "gal", 1, false}, + {1, "gallery", 1, false}, + {1, "gallo", 1, false}, + {1, "gallup", 1, false}, + {1, "game", 1, false}, + {1, "games", 1, false}, + {1, "gap", 1, false}, + {1, "garden", 1, false}, + {1, "gay", 1, false}, + {1, "gbiz", 1, false}, + {1, "gdn", 1, false}, + {1, "gea", 1, false}, + {1, "gent", 1, false}, + {1, "genting", 1, false}, + {1, "george", 1, false}, + {1, "ggee", 1, false}, + {1, "gift", 1, false}, + {1, "gifts", 1, false}, + {1, "gives", 1, false}, + {1, "giving", 1, false}, + {1, "glade", 1, false}, + {1, "glass", 1, false}, + {1, "gle", 1, false}, + {1, "global", 1, false}, + {1, "globo", 1, false}, + {1, "gmail", 1, false}, + {1, "gmbh", 1, false}, + {1, "gmo", 1, false}, + {1, "gmx", 1, false}, + {1, "godaddy", 1, false}, + {1, "gold", 1, false}, + {1, "goldpoint", 1, false}, + {1, "golf", 1, false}, + {1, "goo", 1, false}, + {1, "goodyear", 1, false}, + {1, "goog", 1, false}, + {1, "google", 1, false}, + {1, "gop", 1, false}, + {1, "got", 1, false}, + {1, "grainger", 1, false}, + {1, "graphics", 1, false}, + {1, "gratis", 1, false}, + {1, "green", 1, false}, + {1, "gripe", 1, false}, + {1, "grocery", 1, false}, + {1, "group", 1, false}, + {1, "guardian", 1, false}, + {1, "gucci", 1, false}, + {1, "guge", 1, false}, + {1, "guide", 1, false}, + {1, "guitars", 1, false}, + {1, "guru", 1, false}, + {1, "hair", 1, false}, + {1, "hamburg", 1, false}, + {1, "hangout", 1, false}, + {1, "haus", 1, false}, + {1, "hbo", 1, false}, + {1, "hdfc", 1, false}, + {1, "hdfcbank", 1, false}, + {1, "health", 1, false}, + {1, "healthcare", 1, false}, + {1, "help", 1, false}, + {1, "helsinki", 1, false}, + {1, "here", 1, false}, + {1, "hermes", 1, false}, + {1, "hgtv", 1, false}, + {1, "hiphop", 1, false}, + {1, "hisamitsu", 1, false}, + {1, "hitachi", 1, false}, + {1, "hiv", 1, false}, + {1, "hkt", 1, false}, + {1, "hockey", 1, false}, + {1, "holdings", 1, false}, + {1, "holiday", 1, false}, + {1, "homedepot", 1, false}, + {1, "homegoods", 1, false}, + {1, "homes", 1, false}, + {1, "homesense", 1, false}, + {1, "honda", 1, false}, + {1, "horse", 1, false}, + {1, "hospital", 1, false}, + {1, "host", 1, false}, + {1, "hosting", 1, false}, + {1, "hot", 1, false}, + {1, "hoteles", 1, false}, + {1, "hotels", 1, false}, + {1, "hotmail", 1, false}, + {1, "house", 1, false}, + {1, "how", 1, false}, + {1, "hsbc", 1, false}, + {1, "hughes", 1, false}, + {1, "hyatt", 1, false}, + {1, "hyundai", 1, false}, + {1, "ibm", 1, false}, + {1, "icbc", 1, false}, + {1, "ice", 1, false}, + {1, "icu", 1, false}, + {1, "ieee", 1, false}, + {1, "ifm", 1, false}, + {1, "ikano", 1, false}, + {1, "imamat", 1, false}, + {1, "imdb", 1, false}, + {1, "immo", 1, false}, + {1, "immobilien", 1, false}, + {1, "inc", 1, false}, + {1, "industries", 1, false}, + {1, "infiniti", 1, false}, + {1, "ing", 1, false}, + {1, "ink", 1, false}, + {1, "institute", 1, false}, + {1, "insurance", 1, false}, + {1, "insure", 1, false}, + {1, "intel", 1, false}, + {1, "international", 1, false}, + {1, "intuit", 1, false}, + {1, "investments", 1, false}, + {1, "ipiranga", 1, false}, + {1, "irish", 1, false}, + {1, "ismaili", 1, false}, + {1, "ist", 1, false}, + {1, "istanbul", 1, false}, + {1, "itau", 1, false}, + {1, "itv", 1, false}, + {1, "iveco", 1, false}, + {1, "jaguar", 1, false}, + {1, "java", 1, false}, + {1, "jcb", 1, false}, + {1, "jcp", 1, false}, + {1, "jeep", 1, false}, + {1, "jetzt", 1, false}, + {1, "jewelry", 1, false}, + {1, "jio", 1, false}, + {1, "jll", 1, false}, + {1, "jmp", 1, false}, + {1, "jnj", 1, false}, + {1, "joburg", 1, false}, + {1, "jot", 1, false}, + {1, "joy", 1, false}, + {1, "jpmorgan", 1, false}, + {1, "jprs", 1, false}, + {1, "juegos", 1, false}, + {1, "juniper", 1, false}, + {1, "kaufen", 1, false}, + {1, "kddi", 1, false}, + {1, "kerryhotels", 1, false}, + {1, "kerrylogistics", 1, false}, + {1, "kerryproperties", 1, false}, + {1, "kfh", 1, false}, + {1, "kia", 1, false}, + {1, "kim", 1, false}, + {1, "kinder", 1, false}, + {1, "kindle", 1, false}, + {1, "kitchen", 1, false}, + {1, "kiwi", 1, false}, + {1, "koeln", 1, false}, + {1, "komatsu", 1, false}, + {1, "kosher", 1, false}, + {1, "kpmg", 1, false}, + {1, "kpn", 1, false}, + {1, "krd", 1, false}, + {1, "kred", 1, false}, + {1, "kuokgroup", 1, false}, + {1, "kyoto", 1, false}, + {1, "lacaixa", 1, false}, + {1, "lamborghini", 1, false}, + {1, "lamer", 1, false}, + {1, "lancaster", 1, false}, + {1, "lancia", 1, false}, + {1, "land", 1, false}, + {1, "landrover", 1, false}, + {1, "lanxess", 1, false}, + {1, "lasalle", 1, false}, + {1, "lat", 1, false}, + {1, "latino", 1, false}, + {1, "latrobe", 1, false}, + {1, "law", 1, false}, + {1, "lawyer", 1, false}, + {1, "lds", 1, false}, + {1, "lease", 1, false}, + {1, "leclerc", 1, false}, + {1, "lefrak", 1, false}, + {1, "legal", 1, false}, + {1, "lego", 1, false}, + {1, "lexus", 1, false}, + {1, "lgbt", 1, false}, + {1, "lidl", 1, false}, + {1, "life", 1, false}, + {1, "lifeinsurance", 1, false}, + {1, "lifestyle", 1, false}, + {1, "lighting", 1, false}, + {1, "like", 1, false}, + {1, "lilly", 1, false}, + {1, "limited", 1, false}, + {1, "limo", 1, false}, + {1, "lincoln", 1, false}, + {1, "linde", 1, false}, + {1, "link", 1, false}, + {1, "lipsy", 1, false}, + {1, "live", 1, false}, + {1, "living", 1, false}, + {1, "lixil", 1, false}, + {1, "llc", 1, false}, + {1, "llp", 1, false}, + {1, "loan", 1, false}, + {1, "loans", 1, false}, + {1, "locker", 1, false}, + {1, "locus", 1, false}, + {1, "loft", 1, false}, + {1, "lol", 1, false}, + {1, "london", 1, false}, + {1, "lotte", 1, false}, + {1, "lotto", 1, false}, + {1, "love", 1, false}, + {1, "lpl", 1, false}, + {1, "lplfinancial", 1, false}, + {1, "ltd", 1, false}, + {1, "ltda", 1, false}, + {1, "lundbeck", 1, false}, + {1, "lupin", 1, false}, + {1, "luxe", 1, false}, + {1, "luxury", 1, false}, + {1, "macys", 1, false}, + {1, "madrid", 1, false}, + {1, "maif", 1, false}, + {1, "maison", 1, false}, + {1, "makeup", 1, false}, + {1, "man", 1, false}, + {1, "management", 1, false}, + {1, "mango", 1, false}, + {1, "map", 1, false}, + {1, "market", 1, false}, + {1, "marketing", 1, false}, + {1, "markets", 1, false}, + {1, "marriott", 1, false}, + {1, "marshalls", 1, false}, + {1, "maserati", 1, false}, + {1, "mattel", 1, false}, + {1, "mba", 1, false}, + {1, "mckinsey", 1, false}, + {1, "med", 1, false}, + {1, "media", 1, false}, + {1, "meet", 1, false}, + {1, "melbourne", 1, false}, + {1, "meme", 1, false}, + {1, "memorial", 1, false}, + {1, "men", 1, false}, + {1, "menu", 1, false}, + {1, "merckmsd", 1, false}, + {1, "metlife", 1, false}, + {1, "miami", 1, false}, + {1, "microsoft", 1, false}, + {1, "mini", 1, false}, + {1, "mint", 1, false}, + {1, "mit", 1, false}, + {1, "mitsubishi", 1, false}, + {1, "mlb", 1, false}, + {1, "mls", 1, false}, + {1, "mma", 1, false}, + {1, "mobile", 1, false}, + {1, "moda", 1, false}, + {1, "moe", 1, false}, + {1, "moi", 1, false}, + {1, "mom", 1, false}, + {1, "monash", 1, false}, + {1, "money", 1, false}, + {1, "monster", 1, false}, + {1, "mormon", 1, false}, + {1, "mortgage", 1, false}, + {1, "moscow", 1, false}, + {1, "moto", 1, false}, + {1, "motorcycles", 1, false}, + {1, "mov", 1, false}, + {1, "movie", 1, false}, + {1, "msd", 1, false}, + {1, "mtn", 1, false}, + {1, "mtr", 1, false}, + {1, "mutual", 1, false}, + {1, "nab", 1, false}, + {1, "nadex", 1, false}, + {1, "nagoya", 1, false}, + {1, "nationwide", 1, false}, + {1, "natura", 1, false}, + {1, "navy", 1, false}, + {1, "nba", 1, false}, + {1, "nec", 1, false}, + {1, "netbank", 1, false}, + {1, "netflix", 1, false}, + {1, "network", 1, false}, + {1, "neustar", 1, false}, + {1, "new", 1, false}, + {1, "newholland", 1, false}, + {1, "news", 1, false}, + {1, "next", 1, false}, + {1, "nextdirect", 1, false}, + {1, "nexus", 1, false}, + {1, "nfl", 1, false}, + {1, "ngo", 1, false}, + {1, "nhk", 1, false}, + {1, "nico", 1, false}, + {1, "nike", 1, false}, + {1, "nikon", 1, false}, + {1, "ninja", 1, false}, + {1, "nissan", 1, false}, + {1, "nissay", 1, false}, + {1, "nokia", 1, false}, + {1, "northwesternmutual", 1, false}, + {1, "norton", 1, false}, + {1, "now", 1, false}, + {1, "nowruz", 1, false}, + {1, "nowtv", 1, false}, + {1, "nra", 1, false}, + {1, "nrw", 1, false}, + {1, "ntt", 1, false}, + {1, "nyc", 1, false}, + {1, "obi", 1, false}, + {1, "observer", 1, false}, + {1, "off", 1, false}, + {1, "office", 1, false}, + {1, "okinawa", 1, false}, + {1, "olayan", 1, false}, + {1, "olayangroup", 1, false}, + {1, "oldnavy", 1, false}, + {1, "ollo", 1, false}, + {1, "omega", 1, false}, + {1, "one", 1, false}, + {1, "ong", 1, false}, + {1, "onl", 1, false}, + {1, "online", 1, false}, + {1, "onyourside", 1, false}, + {1, "ooo", 1, false}, + {1, "open", 1, false}, + {1, "oracle", 1, false}, + {1, "orange", 1, false}, + {1, "organic", 1, false}, + {1, "origins", 1, false}, + {1, "osaka", 1, false}, + {1, "otsuka", 1, false}, + {1, "ott", 1, false}, + {1, "ovh", 1, false}, + {1, "page", 1, false}, + {1, "panasonic", 1, false}, + {1, "paris", 1, false}, + {1, "pars", 1, false}, + {1, "partners", 1, false}, + {1, "parts", 1, false}, + {1, "party", 1, false}, + {1, "passagens", 1, false}, + {1, "pay", 1, false}, + {1, "pccw", 1, false}, + {1, "pet", 1, false}, + {1, "pfizer", 1, false}, + {1, "pharmacy", 1, false}, + {1, "phd", 1, false}, + {1, "philips", 1, false}, + {1, "phone", 1, false}, + {1, "photo", 1, false}, + {1, "photography", 1, false}, + {1, "photos", 1, false}, + {1, "physio", 1, false}, + {1, "pics", 1, false}, + {1, "pictet", 1, false}, + {1, "pictures", 1, false}, + {1, "pid", 1, false}, + {1, "pin", 1, false}, + {1, "ping", 1, false}, + {1, "pink", 1, false}, + {1, "pioneer", 1, false}, + {1, "pizza", 1, false}, + {1, "place", 1, false}, + {1, "play", 1, false}, + {1, "playstation", 1, false}, + {1, "plumbing", 1, false}, + {1, "plus", 1, false}, + {1, "pnc", 1, false}, + {1, "pohl", 1, false}, + {1, "poker", 1, false}, + {1, "politie", 1, false}, + {1, "porn", 1, false}, + {1, "pramerica", 1, false}, + {1, "praxi", 1, false}, + {1, "press", 1, false}, + {1, "prime", 1, false}, + {1, "prod", 1, false}, + {1, "productions", 1, false}, + {1, "prof", 1, false}, + {1, "progressive", 1, false}, + {1, "promo", 1, false}, + {1, "properties", 1, false}, + {1, "property", 1, false}, + {1, "protection", 1, false}, + {1, "pru", 1, false}, + {1, "prudential", 1, false}, + {1, "pub", 1, false}, + {1, "pwc", 1, false}, + {1, "qpon", 1, false}, + {1, "quebec", 1, false}, + {1, "quest", 1, false}, + {1, "qvc", 1, false}, + {1, "racing", 1, false}, + {1, "radio", 1, false}, + {1, "raid", 1, false}, + {1, "read", 1, false}, + {1, "realestate", 1, false}, + {1, "realtor", 1, false}, + {1, "realty", 1, false}, + {1, "recipes", 1, false}, + {1, "red", 1, false}, + {1, "redstone", 1, false}, + {1, "redumbrella", 1, false}, + {1, "rehab", 1, false}, + {1, "reise", 1, false}, + {1, "reisen", 1, false}, + {1, "reit", 1, false}, + {1, "reliance", 1, false}, + {1, "ren", 1, false}, + {1, "rent", 1, false}, + {1, "rentals", 1, false}, + {1, "repair", 1, false}, + {1, "report", 1, false}, + {1, "republican", 1, false}, + {1, "rest", 1, false}, + {1, "restaurant", 1, false}, + {1, "review", 1, false}, + {1, "reviews", 1, false}, + {1, "rexroth", 1, false}, + {1, "rich", 1, false}, + {1, "richardli", 1, false}, + {1, "ricoh", 1, false}, + {1, "rightathome", 1, false}, + {1, "ril", 1, false}, + {1, "rio", 1, false}, + {1, "rip", 1, false}, + {1, "rmit", 1, false}, + {1, "rocher", 1, false}, + {1, "rocks", 1, false}, + {1, "rodeo", 1, false}, + {1, "rogers", 1, false}, + {1, "room", 1, false}, + {1, "rsvp", 1, false}, + {1, "rugby", 1, false}, + {1, "ruhr", 1, false}, + {1, "run", 1, false}, + {1, "rwe", 1, false}, + {1, "ryukyu", 1, false}, + {1, "saarland", 1, false}, + {1, "safe", 1, false}, + {1, "safety", 1, false}, + {1, "sakura", 1, false}, + {1, "sale", 1, false}, + {1, "salon", 1, false}, + {1, "samsclub", 1, false}, + {1, "samsung", 1, false}, + {1, "sandvik", 1, false}, + {1, "sandvikcoromant", 1, false}, + {1, "sanofi", 1, false}, + {1, "sap", 1, false}, + {1, "sarl", 1, false}, + {1, "sas", 1, false}, + {1, "save", 1, false}, + {1, "saxo", 1, false}, + {1, "sbi", 1, false}, + {1, "sbs", 1, false}, + {1, "sca", 1, false}, + {1, "scb", 1, false}, + {1, "schaeffler", 1, false}, + {1, "schmidt", 1, false}, + {1, "scholarships", 1, false}, + {1, "school", 1, false}, + {1, "schule", 1, false}, + {1, "schwarz", 1, false}, + {1, "science", 1, false}, + {1, "scjohnson", 1, false}, + {1, "scor", 1, false}, + {1, "scot", 1, false}, + {1, "search", 1, false}, + {1, "seat", 1, false}, + {1, "secure", 1, false}, + {1, "security", 1, false}, + {1, "seek", 1, false}, + {1, "select", 1, false}, + {1, "sener", 1, false}, + {1, "services", 1, false}, + {1, "ses", 1, false}, + {1, "seven", 1, false}, + {1, "sew", 1, false}, + {1, "sex", 1, false}, + {1, "sexy", 1, false}, + {1, "sfr", 1, false}, + {1, "shangrila", 1, false}, + {1, "sharp", 1, false}, + {1, "shaw", 1, false}, + {1, "shell", 1, false}, + {1, "shia", 1, false}, + {1, "shiksha", 1, false}, + {1, "shoes", 1, false}, + {1, "shop", 1, false}, + {1, "shopping", 1, false}, + {1, "shouji", 1, false}, + {1, "show", 1, false}, + {1, "showtime", 1, false}, + {1, "shriram", 1, false}, + {1, "silk", 1, false}, + {1, "sina", 1, false}, + {1, "singles", 1, false}, + {1, "site", 1, false}, + {1, "ski", 1, false}, + {1, "skin", 1, false}, + {1, "sky", 1, false}, + {1, "skype", 1, false}, + {1, "sling", 1, false}, + {1, "smart", 1, false}, + {1, "smile", 1, false}, + {1, "sncf", 1, false}, + {1, "soccer", 1, false}, + {1, "social", 1, false}, + {1, "softbank", 1, false}, + {1, "software", 1, false}, + {1, "sohu", 1, false}, + {1, "solar", 1, false}, + {1, "solutions", 1, false}, + {1, "song", 1, false}, + {1, "sony", 1, false}, + {1, "soy", 1, false}, + {1, "spa", 1, false}, + {1, "space", 1, false}, + {1, "sport", 1, false}, + {1, "spot", 1, false}, + {1, "spreadbetting", 1, false}, + {1, "srl", 1, false}, + {1, "stada", 1, false}, + {1, "staples", 1, false}, + {1, "star", 1, false}, + {1, "statebank", 1, false}, + {1, "statefarm", 1, false}, + {1, "stc", 1, false}, + {1, "stcgroup", 1, false}, + {1, "stockholm", 1, false}, + {1, "storage", 1, false}, + {1, "store", 1, false}, + {1, "stream", 1, false}, + {1, "studio", 1, false}, + {1, "study", 1, false}, + {1, "style", 1, false}, + {1, "sucks", 1, false}, + {1, "supplies", 1, false}, + {1, "supply", 1, false}, + {1, "support", 1, false}, + {1, "surf", 1, false}, + {1, "surgery", 1, false}, + {1, "suzuki", 1, false}, + {1, "swatch", 1, false}, + {1, "swiftcover", 1, false}, + {1, "swiss", 1, false}, + {1, "sydney", 1, false}, + {1, "symantec", 1, false}, + {1, "systems", 1, false}, + {1, "tab", 1, false}, + {1, "taipei", 1, false}, + {1, "talk", 1, false}, + {1, "taobao", 1, false}, + {1, "target", 1, false}, + {1, "tatamotors", 1, false}, + {1, "tatar", 1, false}, + {1, "tattoo", 1, false}, + {1, "tax", 1, false}, + {1, "taxi", 1, false}, + {1, "tci", 1, false}, + {1, "tdk", 1, false}, + {1, "team", 1, false}, + {1, "tech", 1, false}, + {1, "technology", 1, false}, + {1, "temasek", 1, false}, + {1, "tennis", 1, false}, + {1, "teva", 1, false}, + {1, "thd", 1, false}, + {1, "theater", 1, false}, + {1, "theatre", 1, false}, + {1, "tiaa", 1, false}, + {1, "tickets", 1, false}, + {1, "tienda", 1, false}, + {1, "tiffany", 1, false}, + {1, "tips", 1, false}, + {1, "tires", 1, false}, + {1, "tirol", 1, false}, + {1, "tjmaxx", 1, false}, + {1, "tjx", 1, false}, + {1, "tkmaxx", 1, false}, + {1, "tmall", 1, false}, + {1, "today", 1, false}, + {1, "tokyo", 1, false}, + {1, "tools", 1, false}, + {1, "top", 1, false}, + {1, "toray", 1, false}, + {1, "toshiba", 1, false}, + {1, "total", 1, false}, + {1, "tours", 1, false}, + {1, "town", 1, false}, + {1, "toyota", 1, false}, + {1, "toys", 1, false}, + {1, "trade", 1, false}, + {1, "trading", 1, false}, + {1, "training", 1, false}, + {1, "travel", 1, false}, + {1, "travelchannel", 1, false}, + {1, "travelers", 1, false}, + {1, "travelersinsurance", 1, false}, + {1, "trust", 1, false}, + {1, "trv", 1, false}, + {1, "tube", 1, false}, + {1, "tui", 1, false}, + {1, "tunes", 1, false}, + {1, "tushu", 1, false}, + {1, "tvs", 1, false}, + {1, "ubank", 1, false}, + {1, "ubs", 1, false}, + {1, "unicom", 1, false}, + {1, "university", 1, false}, + {1, "uno", 1, false}, + {1, "uol", 1, false}, + {1, "ups", 1, false}, + {1, "vacations", 1, false}, + {1, "vana", 1, false}, + {1, "vanguard", 1, false}, + {1, "vegas", 1, false}, + {1, "ventures", 1, false}, + {1, "verisign", 1, false}, + {1, "versicherung", 1, false}, + {1, "vet", 1, false}, + {1, "viajes", 1, false}, + {1, "video", 1, false}, + {1, "vig", 1, false}, + {1, "viking", 1, false}, + {1, "villas", 1, false}, + {1, "vin", 1, false}, + {1, "vip", 1, false}, + {1, "virgin", 1, false}, + {1, "visa", 1, false}, + {1, "vision", 1, false}, + {1, "vistaprint", 1, false}, + {1, "viva", 1, false}, + {1, "vivo", 1, false}, + {1, "vlaanderen", 1, false}, + {1, "vodka", 1, false}, + {1, "volkswagen", 1, false}, + {1, "volvo", 1, false}, + {1, "vote", 1, false}, + {1, "voting", 1, false}, + {1, "voto", 1, false}, + {1, "voyage", 1, false}, + {1, "vuelos", 1, false}, + {1, "wales", 1, false}, + {1, "walmart", 1, false}, + {1, "walter", 1, false}, + {1, "wang", 1, false}, + {1, "wanggou", 1, false}, + {1, "watch", 1, false}, + {1, "watches", 1, false}, + {1, "weather", 1, false}, + {1, "weatherchannel", 1, false}, + {1, "webcam", 1, false}, + {1, "weber", 1, false}, + {1, "website", 1, false}, + {1, "wed", 1, false}, + {1, "wedding", 1, false}, + {1, "weibo", 1, false}, + {1, "weir", 1, false}, + {1, "whoswho", 1, false}, + {1, "wien", 1, false}, + {1, "wiki", 1, false}, + {1, "williamhill", 1, false}, + {1, "win", 1, false}, + {1, "windows", 1, false}, + {1, "wine", 1, false}, + {1, "winners", 1, false}, + {1, "wme", 1, false}, + {1, "wolterskluwer", 1, false}, + {1, "woodside", 1, false}, + {1, "work", 1, false}, + {1, "works", 1, false}, + {1, "world", 1, false}, + {1, "wow", 1, false}, + {1, "wtc", 1, false}, + {1, "wtf", 1, false}, + {1, "xbox", 1, false}, + {1, "xerox", 1, false}, + {1, "xfinity", 1, false}, + {1, "xihuan", 1, false}, + {1, "xin", 1, false}, + {1, "xn--11b4c3d", 1, false}, + {1, "xn--1ck2e1b", 1, false}, + {1, "xn--1qqw23a", 1, false}, + {1, "xn--30rr7y", 1, false}, + {1, "xn--3bst00m", 1, false}, + {1, "xn--3ds443g", 1, false}, + {1, "xn--3oq18vl8pn36a", 1, false}, + {1, "xn--3pxu8k", 1, false}, + {1, "xn--42c2d9a", 1, false}, + {1, "xn--45q11c", 1, false}, + {1, "xn--4gbrim", 1, false}, + {1, "xn--55qw42g", 1, false}, + {1, "xn--55qx5d", 1, false}, + {1, "xn--5su34j936bgsg", 1, false}, + {1, "xn--5tzm5g", 1, false}, + {1, "xn--6frz82g", 1, false}, + {1, "xn--6qq986b3xl", 1, false}, + {1, "xn--80adxhks", 1, false}, + {1, "xn--80aqecdr1a", 1, false}, + {1, "xn--80asehdb", 1, false}, + {1, "xn--80aswg", 1, false}, + {1, "xn--8y0a063a", 1, false}, + {1, "xn--9dbq2a", 1, false}, + {1, "xn--9et52u", 1, false}, + {1, "xn--9krt00a", 1, false}, + {1, "xn--b4w605ferd", 1, false}, + {1, "xn--bck1b9a5dre4c", 1, false}, + {1, "xn--c1avg", 1, false}, + {1, "xn--c2br7g", 1, false}, + {1, "xn--cck2b3b", 1, false}, + {1, "xn--cckwcxetd", 1, false}, + {1, "xn--cg4bki", 1, false}, + {1, "xn--czr694b", 1, false}, + {1, "xn--czrs0t", 1, false}, + {1, "xn--czru2d", 1, false}, + {1, "xn--d1acj3b", 1, false}, + {1, "xn--eckvdtc9d", 1, false}, + {1, "xn--efvy88h", 1, false}, + {1, "xn--estv75g", 1, false}, + {1, "xn--fct429k", 1, false}, + {1, "xn--fhbei", 1, false}, + {1, "xn--fiq228c5hs", 1, false}, + {1, "xn--fiq64b", 1, false}, + {1, "xn--fjq720a", 1, false}, + {1, "xn--flw351e", 1, false}, + {1, "xn--fzys8d69uvgm", 1, false}, + {1, "xn--g2xx48c", 1, false}, + {1, "xn--gckr3f0f", 1, false}, + {1, "xn--gk3at1e", 1, false}, + {1, "xn--hxt814e", 1, false}, + {1, "xn--i1b6b1a6a2e", 1, false}, + {1, "xn--imr513n", 1, false}, + {1, "xn--io0a7i", 1, false}, + {1, "xn--j1aef", 1, false}, + {1, "xn--jlq480n2rg", 1, false}, + {1, "xn--jlq61u9w7b", 1, false}, + {1, "xn--jvr189m", 1, false}, + {1, "xn--kcrx77d1x4a", 1, false}, + {1, "xn--kpu716f", 1, false}, + {1, "xn--kput3i", 1, false}, + {1, "xn--mgba3a3ejt", 1, false}, + {1, "xn--mgba7c0bbn0a", 1, false}, + {1, "xn--mgbaakc7dvf", 1, false}, + {1, "xn--mgbab2bd", 1, false}, + {1, "xn--mgbca7dzdo", 1, false}, + {1, "xn--mgbi4ecexp", 1, false}, + {1, "xn--mgbt3dhd", 1, false}, + {1, "xn--mk1bu44c", 1, false}, + {1, "xn--mxtq1m", 1, false}, + {1, "xn--ngbc5azd", 1, false}, + {1, "xn--ngbe9e0a", 1, false}, + {1, "xn--ngbrx", 1, false}, + {1, "xn--nqv7f", 1, false}, + {1, "xn--nqv7fs00ema", 1, false}, + {1, "xn--nyqy26a", 1, false}, + {1, "xn--otu796d", 1, false}, + {1, "xn--p1acf", 1, false}, + {1, "xn--pbt977c", 1, false}, + {1, "xn--pssy2u", 1, false}, + {1, "xn--q9jyb4c", 1, false}, + {1, "xn--qcka1pmc", 1, false}, + {1, "xn--rhqv96g", 1, false}, + {1, "xn--rovu88b", 1, false}, + {1, "xn--ses554g", 1, false}, + {1, "xn--t60b56a", 1, false}, + {1, "xn--tckwe", 1, false}, + {1, "xn--tiq49xqyj", 1, false}, + {1, "xn--unup4y", 1, false}, + {1, "xn--vermgensberater-ctb", 1, false}, + {1, "xn--vermgensberatung-pwb", 1, false}, + {1, "xn--vhquv", 1, false}, + {1, "xn--vuq861b", 1, false}, + {1, "xn--w4r85el8fhu5dnra", 1, false}, + {1, "xn--w4rs40l", 1, false}, + {1, "xn--xhq521b", 1, false}, + {1, "xn--zfr164b", 1, false}, + {1, "xyz", 1, false}, + {1, "yachts", 1, false}, + {1, "yahoo", 1, false}, + {1, "yamaxun", 1, false}, + {1, "yandex", 1, false}, + {1, "yodobashi", 1, false}, + {1, "yoga", 1, false}, + {1, "yokohama", 1, false}, + {1, "you", 1, false}, + {1, "youtube", 1, false}, + {1, "yun", 1, false}, + {1, "zappos", 1, false}, + {1, "zara", 1, false}, + {1, "zero", 1, false}, + {1, "zip", 1, false}, + {1, "zone", 1, false}, + {1, "zuerich", 1, false}, + {1, "cc.ua", 2, true}, + {1, "inf.ua", 2, true}, + {1, "ltd.ua", 2, true}, + {1, "adobeaemcloud.com", 2, true}, + {1, "adobeaemcloud.net", 2, true}, + {2, "dev.adobeaemcloud.com", 4, true}, + {1, "beep.pl", 2, true}, + {1, "barsy.ca", 2, true}, + {2, "compute.estate", 3, true}, + {2, "alces.network", 3, true}, + {1, "altervista.org", 2, true}, + {1, "alwaysdata.net", 2, true}, + {1, "cloudfront.net", 2, true}, + {2, "compute.amazonaws.com", 4, true}, + {2, "compute-1.amazonaws.com", 4, true}, + {2, "compute.amazonaws.com.cn", 5, true}, + {1, "us-east-1.amazonaws.com", 3, true}, + {1, "cn-north-1.eb.amazonaws.com.cn", 5, true}, + {1, "cn-northwest-1.eb.amazonaws.com.cn", 5, true}, + {1, "elasticbeanstalk.com", 2, true}, + {1, "ap-northeast-1.elasticbeanstalk.com", 3, true}, + {1, "ap-northeast-2.elasticbeanstalk.com", 3, true}, + {1, "ap-northeast-3.elasticbeanstalk.com", 3, true}, + {1, "ap-south-1.elasticbeanstalk.com", 3, true}, + {1, "ap-southeast-1.elasticbeanstalk.com", 3, true}, + {1, "ap-southeast-2.elasticbeanstalk.com", 3, true}, + {1, "ca-central-1.elasticbeanstalk.com", 3, true}, + {1, "eu-central-1.elasticbeanstalk.com", 3, true}, + {1, "eu-west-1.elasticbeanstalk.com", 3, true}, + {1, "eu-west-2.elasticbeanstalk.com", 3, true}, + {1, "eu-west-3.elasticbeanstalk.com", 3, true}, + {1, "sa-east-1.elasticbeanstalk.com", 3, true}, + {1, "us-east-1.elasticbeanstalk.com", 3, true}, + {1, "us-east-2.elasticbeanstalk.com", 3, true}, + {1, "us-gov-west-1.elasticbeanstalk.com", 3, true}, + {1, "us-west-1.elasticbeanstalk.com", 3, true}, + {1, "us-west-2.elasticbeanstalk.com", 3, true}, + {2, "elb.amazonaws.com", 4, true}, + {2, "elb.amazonaws.com.cn", 5, true}, + {1, "s3.amazonaws.com", 3, true}, + {1, "s3-ap-northeast-1.amazonaws.com", 3, true}, + {1, "s3-ap-northeast-2.amazonaws.com", 3, true}, + {1, "s3-ap-south-1.amazonaws.com", 3, true}, + {1, "s3-ap-southeast-1.amazonaws.com", 3, true}, + {1, "s3-ap-southeast-2.amazonaws.com", 3, true}, + {1, "s3-ca-central-1.amazonaws.com", 3, true}, + {1, "s3-eu-central-1.amazonaws.com", 3, true}, + {1, "s3-eu-west-1.amazonaws.com", 3, true}, + {1, "s3-eu-west-2.amazonaws.com", 3, true}, + {1, "s3-eu-west-3.amazonaws.com", 3, true}, + {1, "s3-external-1.amazonaws.com", 3, true}, + {1, "s3-fips-us-gov-west-1.amazonaws.com", 3, true}, + {1, "s3-sa-east-1.amazonaws.com", 3, true}, + {1, "s3-us-gov-west-1.amazonaws.com", 3, true}, + {1, "s3-us-east-2.amazonaws.com", 3, true}, + {1, "s3-us-west-1.amazonaws.com", 3, true}, + {1, "s3-us-west-2.amazonaws.com", 3, true}, + {1, "s3.ap-northeast-2.amazonaws.com", 4, true}, + {1, "s3.ap-south-1.amazonaws.com", 4, true}, + {1, "s3.cn-north-1.amazonaws.com.cn", 5, true}, + {1, "s3.ca-central-1.amazonaws.com", 4, true}, + {1, "s3.eu-central-1.amazonaws.com", 4, true}, + {1, "s3.eu-west-2.amazonaws.com", 4, true}, + {1, "s3.eu-west-3.amazonaws.com", 4, true}, + {1, "s3.us-east-2.amazonaws.com", 4, true}, + {1, "s3.dualstack.ap-northeast-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-northeast-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-south-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-southeast-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-southeast-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.ca-central-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-central-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-3.amazonaws.com", 5, true}, + {1, "s3.dualstack.sa-east-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.us-east-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.us-east-2.amazonaws.com", 5, true}, + {1, "s3-website-us-east-1.amazonaws.com", 3, true}, + {1, "s3-website-us-west-1.amazonaws.com", 3, true}, + {1, "s3-website-us-west-2.amazonaws.com", 3, true}, + {1, "s3-website-ap-northeast-1.amazonaws.com", 3, true}, + {1, "s3-website-ap-southeast-1.amazonaws.com", 3, true}, + {1, "s3-website-ap-southeast-2.amazonaws.com", 3, true}, + {1, "s3-website-eu-west-1.amazonaws.com", 3, true}, + {1, "s3-website-sa-east-1.amazonaws.com", 3, true}, + {1, "s3-website.ap-northeast-2.amazonaws.com", 4, true}, + {1, "s3-website.ap-south-1.amazonaws.com", 4, true}, + {1, "s3-website.ca-central-1.amazonaws.com", 4, true}, + {1, "s3-website.eu-central-1.amazonaws.com", 4, true}, + {1, "s3-website.eu-west-2.amazonaws.com", 4, true}, + {1, "s3-website.eu-west-3.amazonaws.com", 4, true}, + {1, "s3-website.us-east-2.amazonaws.com", 4, true}, + {1, "amsw.nl", 2, true}, + {1, "t3l3p0rt.net", 2, true}, + {1, "tele.amune.org", 3, true}, + {1, "apigee.io", 2, true}, + {1, "on-aptible.com", 2, true}, + {1, "user.aseinet.ne.jp", 4, true}, + {1, "gv.vc", 2, true}, + {1, "d.gv.vc", 3, true}, + {1, "user.party.eus", 3, true}, + {1, "pimienta.org", 2, true}, + {1, "poivron.org", 2, true}, + {1, "potager.org", 2, true}, + {1, "sweetpepper.org", 2, true}, + {1, "myasustor.com", 2, true}, + {1, "myfritz.net", 2, true}, + {2, "awdev.ca", 3, true}, + {2, "advisor.ws", 3, true}, + {1, "b-data.io", 2, true}, + {1, "backplaneapp.io", 2, true}, + {1, "balena-devices.com", 2, true}, + {1, "app.banzaicloud.io", 3, true}, + {1, "betainabox.com", 2, true}, + {1, "bnr.la", 2, true}, + {1, "blackbaudcdn.net", 2, true}, + {1, "boomla.net", 2, true}, + {1, "boxfuse.io", 2, true}, + {1, "square7.ch", 2, true}, + {1, "bplaced.com", 2, true}, + {1, "bplaced.de", 2, true}, + {1, "square7.de", 2, true}, + {1, "bplaced.net", 2, true}, + {1, "square7.net", 2, true}, + {1, "browsersafetymark.io", 2, true}, + {1, "uk0.bigv.io", 3, true}, + {1, "dh.bytemark.co.uk", 4, true}, + {1, "vm.bytemark.co.uk", 4, true}, + {1, "mycd.eu", 2, true}, + {1, "carrd.co", 2, true}, + {1, "crd.co", 2, true}, + {1, "uwu.ai", 2, true}, + {1, "ae.org", 2, true}, + {1, "ar.com", 2, true}, + {1, "br.com", 2, true}, + {1, "cn.com", 2, true}, + {1, "com.de", 2, true}, + {1, "com.se", 2, true}, + {1, "de.com", 2, true}, + {1, "eu.com", 2, true}, + {1, "gb.com", 2, true}, + {1, "gb.net", 2, true}, + {1, "hu.com", 2, true}, + {1, "hu.net", 2, true}, + {1, "jp.net", 2, true}, + {1, "jpn.com", 2, true}, + {1, "kr.com", 2, true}, + {1, "mex.com", 2, true}, + {1, "no.com", 2, true}, + {1, "qc.com", 2, true}, + {1, "ru.com", 2, true}, + {1, "sa.com", 2, true}, + {1, "se.net", 2, true}, + {1, "uk.com", 2, true}, + {1, "uk.net", 2, true}, + {1, "us.com", 2, true}, + {1, "uy.com", 2, true}, + {1, "za.bz", 2, true}, + {1, "za.com", 2, true}, + {1, "africa.com", 2, true}, + {1, "gr.com", 2, true}, + {1, "in.net", 2, true}, + {1, "us.org", 2, true}, + {1, "co.com", 2, true}, + {1, "c.la", 2, true}, + {1, "certmgr.org", 2, true}, + {1, "xenapponazure.com", 2, true}, + {1, "discourse.group", 2, true}, + {1, "discourse.team", 2, true}, + {1, "virtueeldomein.nl", 2, true}, + {1, "cleverapps.io", 2, true}, + {2, "lcl.dev", 3, true}, + {2, "stg.dev", 3, true}, + {1, "c66.me", 2, true}, + {1, "cloud66.ws", 2, true}, + {1, "cloud66.zone", 2, true}, + {1, "jdevcloud.com", 2, true}, + {1, "wpdevcloud.com", 2, true}, + {1, "cloudaccess.host", 2, true}, + {1, "freesite.host", 2, true}, + {1, "cloudaccess.net", 2, true}, + {1, "cloudcontrolled.com", 2, true}, + {1, "cloudcontrolapp.com", 2, true}, + {1, "cloudera.site", 2, true}, + {1, "trycloudflare.com", 2, true}, + {1, "workers.dev", 2, true}, + {1, "wnext.app", 2, true}, + {1, "co.ca", 2, true}, + {2, "otap.co", 3, true}, + {1, "co.cz", 2, true}, + {1, "c.cdn77.org", 3, true}, + {1, "cdn77-ssl.net", 2, true}, + {1, "r.cdn77.net", 3, true}, + {1, "rsc.cdn77.org", 3, true}, + {1, "ssl.origin.cdn77-secure.org", 4, true}, + {1, "cloudns.asia", 2, true}, + {1, "cloudns.biz", 2, true}, + {1, "cloudns.club", 2, true}, + {1, "cloudns.cc", 2, true}, + {1, "cloudns.eu", 2, true}, + {1, "cloudns.in", 2, true}, + {1, "cloudns.info", 2, true}, + {1, "cloudns.org", 2, true}, + {1, "cloudns.pro", 2, true}, + {1, "cloudns.pw", 2, true}, + {1, "cloudns.us", 2, true}, + {1, "cloudeity.net", 2, true}, + {1, "cnpy.gdn", 2, true}, + {1, "co.nl", 2, true}, + {1, "co.no", 2, true}, + {1, "webhosting.be", 2, true}, + {1, "hosting-cluster.nl", 2, true}, + {1, "ac.ru", 2, true}, + {1, "edu.ru", 2, true}, + {1, "gov.ru", 2, true}, + {1, "int.ru", 2, true}, + {1, "mil.ru", 2, true}, + {1, "test.ru", 2, true}, + {1, "dyn.cosidns.de", 3, true}, + {1, "dynamisches-dns.de", 2, true}, + {1, "dnsupdater.de", 2, true}, + {1, "internet-dns.de", 2, true}, + {1, "l-o-g-i-n.de", 2, true}, + {1, "dynamic-dns.info", 2, true}, + {1, "feste-ip.net", 2, true}, + {1, "knx-server.net", 2, true}, + {1, "static-access.net", 2, true}, + {1, "realm.cz", 2, true}, + {2, "cryptonomic.net", 3, true}, + {1, "cupcake.is", 2, true}, + {2, "customer-oci.com", 3, true}, + {2, "oci.customer-oci.com", 4, true}, + {2, "ocp.customer-oci.com", 4, true}, + {2, "ocs.customer-oci.com", 4, true}, + {1, "cyon.link", 2, true}, + {1, "cyon.site", 2, true}, + {1, "daplie.me", 2, true}, + {1, "localhost.daplie.me", 3, true}, + {1, "dattolocal.com", 2, true}, + {1, "dattorelay.com", 2, true}, + {1, "dattoweb.com", 2, true}, + {1, "mydatto.com", 2, true}, + {1, "dattolocal.net", 2, true}, + {1, "mydatto.net", 2, true}, + {1, "biz.dk", 2, true}, + {1, "co.dk", 2, true}, + {1, "firm.dk", 2, true}, + {1, "reg.dk", 2, true}, + {1, "store.dk", 2, true}, + {2, "dapps.earth", 3, true}, + {2, "bzz.dapps.earth", 4, true}, + {1, "builtwithdark.com", 2, true}, + {1, "edgestack.me", 2, true}, + {1, "debian.net", 2, true}, + {1, "dedyn.io", 2, true}, + {1, "dnshome.de", 2, true}, + {1, "online.th", 2, true}, + {1, "shop.th", 2, true}, + {1, "drayddns.com", 2, true}, + {1, "dreamhosters.com", 2, true}, + {1, "mydrobo.com", 2, true}, + {1, "drud.io", 2, true}, + {1, "drud.us", 2, true}, + {1, "duckdns.org", 2, true}, + {1, "dy.fi", 2, true}, + {1, "tunk.org", 2, true}, + {1, "dyndns-at-home.com", 2, true}, + {1, "dyndns-at-work.com", 2, true}, + {1, "dyndns-blog.com", 2, true}, + {1, "dyndns-free.com", 2, true}, + {1, "dyndns-home.com", 2, true}, + {1, "dyndns-ip.com", 2, true}, + {1, "dyndns-mail.com", 2, true}, + {1, "dyndns-office.com", 2, true}, + {1, "dyndns-pics.com", 2, true}, + {1, "dyndns-remote.com", 2, true}, + {1, "dyndns-server.com", 2, true}, + {1, "dyndns-web.com", 2, true}, + {1, "dyndns-wiki.com", 2, true}, + {1, "dyndns-work.com", 2, true}, + {1, "dyndns.biz", 2, true}, + {1, "dyndns.info", 2, true}, + {1, "dyndns.org", 2, true}, + {1, "dyndns.tv", 2, true}, + {1, "at-band-camp.net", 2, true}, + {1, "ath.cx", 2, true}, + {1, "barrel-of-knowledge.info", 2, true}, + {1, "barrell-of-knowledge.info", 2, true}, + {1, "better-than.tv", 2, true}, + {1, "blogdns.com", 2, true}, + {1, "blogdns.net", 2, true}, + {1, "blogdns.org", 2, true}, + {1, "blogsite.org", 2, true}, + {1, "boldlygoingnowhere.org", 2, true}, + {1, "broke-it.net", 2, true}, + {1, "buyshouses.net", 2, true}, + {1, "cechire.com", 2, true}, + {1, "dnsalias.com", 2, true}, + {1, "dnsalias.net", 2, true}, + {1, "dnsalias.org", 2, true}, + {1, "dnsdojo.com", 2, true}, + {1, "dnsdojo.net", 2, true}, + {1, "dnsdojo.org", 2, true}, + {1, "does-it.net", 2, true}, + {1, "doesntexist.com", 2, true}, + {1, "doesntexist.org", 2, true}, + {1, "dontexist.com", 2, true}, + {1, "dontexist.net", 2, true}, + {1, "dontexist.org", 2, true}, + {1, "doomdns.com", 2, true}, + {1, "doomdns.org", 2, true}, + {1, "dvrdns.org", 2, true}, + {1, "dyn-o-saur.com", 2, true}, + {1, "dynalias.com", 2, true}, + {1, "dynalias.net", 2, true}, + {1, "dynalias.org", 2, true}, + {1, "dynathome.net", 2, true}, + {1, "dyndns.ws", 2, true}, + {1, "endofinternet.net", 2, true}, + {1, "endofinternet.org", 2, true}, + {1, "endoftheinternet.org", 2, true}, + {1, "est-a-la-maison.com", 2, true}, + {1, "est-a-la-masion.com", 2, true}, + {1, "est-le-patron.com", 2, true}, + {1, "est-mon-blogueur.com", 2, true}, + {1, "for-better.biz", 2, true}, + {1, "for-more.biz", 2, true}, + {1, "for-our.info", 2, true}, + {1, "for-some.biz", 2, true}, + {1, "for-the.biz", 2, true}, + {1, "forgot.her.name", 3, true}, + {1, "forgot.his.name", 3, true}, + {1, "from-ak.com", 2, true}, + {1, "from-al.com", 2, true}, + {1, "from-ar.com", 2, true}, + {1, "from-az.net", 2, true}, + {1, "from-ca.com", 2, true}, + {1, "from-co.net", 2, true}, + {1, "from-ct.com", 2, true}, + {1, "from-dc.com", 2, true}, + {1, "from-de.com", 2, true}, + {1, "from-fl.com", 2, true}, + {1, "from-ga.com", 2, true}, + {1, "from-hi.com", 2, true}, + {1, "from-ia.com", 2, true}, + {1, "from-id.com", 2, true}, + {1, "from-il.com", 2, true}, + {1, "from-in.com", 2, true}, + {1, "from-ks.com", 2, true}, + {1, "from-ky.com", 2, true}, + {1, "from-la.net", 2, true}, + {1, "from-ma.com", 2, true}, + {1, "from-md.com", 2, true}, + {1, "from-me.org", 2, true}, + {1, "from-mi.com", 2, true}, + {1, "from-mn.com", 2, true}, + {1, "from-mo.com", 2, true}, + {1, "from-ms.com", 2, true}, + {1, "from-mt.com", 2, true}, + {1, "from-nc.com", 2, true}, + {1, "from-nd.com", 2, true}, + {1, "from-ne.com", 2, true}, + {1, "from-nh.com", 2, true}, + {1, "from-nj.com", 2, true}, + {1, "from-nm.com", 2, true}, + {1, "from-nv.com", 2, true}, + {1, "from-ny.net", 2, true}, + {1, "from-oh.com", 2, true}, + {1, "from-ok.com", 2, true}, + {1, "from-or.com", 2, true}, + {1, "from-pa.com", 2, true}, + {1, "from-pr.com", 2, true}, + {1, "from-ri.com", 2, true}, + {1, "from-sc.com", 2, true}, + {1, "from-sd.com", 2, true}, + {1, "from-tn.com", 2, true}, + {1, "from-tx.com", 2, true}, + {1, "from-ut.com", 2, true}, + {1, "from-va.com", 2, true}, + {1, "from-vt.com", 2, true}, + {1, "from-wa.com", 2, true}, + {1, "from-wi.com", 2, true}, + {1, "from-wv.com", 2, true}, + {1, "from-wy.com", 2, true}, + {1, "ftpaccess.cc", 2, true}, + {1, "fuettertdasnetz.de", 2, true}, + {1, "game-host.org", 2, true}, + {1, "game-server.cc", 2, true}, + {1, "getmyip.com", 2, true}, + {1, "gets-it.net", 2, true}, + {1, "go.dyndns.org", 3, true}, + {1, "gotdns.com", 2, true}, + {1, "gotdns.org", 2, true}, + {1, "groks-the.info", 2, true}, + {1, "groks-this.info", 2, true}, + {1, "ham-radio-op.net", 2, true}, + {1, "here-for-more.info", 2, true}, + {1, "hobby-site.com", 2, true}, + {1, "hobby-site.org", 2, true}, + {1, "home.dyndns.org", 3, true}, + {1, "homedns.org", 2, true}, + {1, "homeftp.net", 2, true}, + {1, "homeftp.org", 2, true}, + {1, "homeip.net", 2, true}, + {1, "homelinux.com", 2, true}, + {1, "homelinux.net", 2, true}, + {1, "homelinux.org", 2, true}, + {1, "homeunix.com", 2, true}, + {1, "homeunix.net", 2, true}, + {1, "homeunix.org", 2, true}, + {1, "iamallama.com", 2, true}, + {1, "in-the-band.net", 2, true}, + {1, "is-a-anarchist.com", 2, true}, + {1, "is-a-blogger.com", 2, true}, + {1, "is-a-bookkeeper.com", 2, true}, + {1, "is-a-bruinsfan.org", 2, true}, + {1, "is-a-bulls-fan.com", 2, true}, + {1, "is-a-candidate.org", 2, true}, + {1, "is-a-caterer.com", 2, true}, + {1, "is-a-celticsfan.org", 2, true}, + {1, "is-a-chef.com", 2, true}, + {1, "is-a-chef.net", 2, true}, + {1, "is-a-chef.org", 2, true}, + {1, "is-a-conservative.com", 2, true}, + {1, "is-a-cpa.com", 2, true}, + {1, "is-a-cubicle-slave.com", 2, true}, + {1, "is-a-democrat.com", 2, true}, + {1, "is-a-designer.com", 2, true}, + {1, "is-a-doctor.com", 2, true}, + {1, "is-a-financialadvisor.com", 2, true}, + {1, "is-a-geek.com", 2, true}, + {1, "is-a-geek.net", 2, true}, + {1, "is-a-geek.org", 2, true}, + {1, "is-a-green.com", 2, true}, + {1, "is-a-guru.com", 2, true}, + {1, "is-a-hard-worker.com", 2, true}, + {1, "is-a-hunter.com", 2, true}, + {1, "is-a-knight.org", 2, true}, + {1, "is-a-landscaper.com", 2, true}, + {1, "is-a-lawyer.com", 2, true}, + {1, "is-a-liberal.com", 2, true}, + {1, "is-a-libertarian.com", 2, true}, + {1, "is-a-linux-user.org", 2, true}, + {1, "is-a-llama.com", 2, true}, + {1, "is-a-musician.com", 2, true}, + {1, "is-a-nascarfan.com", 2, true}, + {1, "is-a-nurse.com", 2, true}, + {1, "is-a-painter.com", 2, true}, + {1, "is-a-patsfan.org", 2, true}, + {1, "is-a-personaltrainer.com", 2, true}, + {1, "is-a-photographer.com", 2, true}, + {1, "is-a-player.com", 2, true}, + {1, "is-a-republican.com", 2, true}, + {1, "is-a-rockstar.com", 2, true}, + {1, "is-a-socialist.com", 2, true}, + {1, "is-a-soxfan.org", 2, true}, + {1, "is-a-student.com", 2, true}, + {1, "is-a-teacher.com", 2, true}, + {1, "is-a-techie.com", 2, true}, + {1, "is-a-therapist.com", 2, true}, + {1, "is-an-accountant.com", 2, true}, + {1, "is-an-actor.com", 2, true}, + {1, "is-an-actress.com", 2, true}, + {1, "is-an-anarchist.com", 2, true}, + {1, "is-an-artist.com", 2, true}, + {1, "is-an-engineer.com", 2, true}, + {1, "is-an-entertainer.com", 2, true}, + {1, "is-by.us", 2, true}, + {1, "is-certified.com", 2, true}, + {1, "is-found.org", 2, true}, + {1, "is-gone.com", 2, true}, + {1, "is-into-anime.com", 2, true}, + {1, "is-into-cars.com", 2, true}, + {1, "is-into-cartoons.com", 2, true}, + {1, "is-into-games.com", 2, true}, + {1, "is-leet.com", 2, true}, + {1, "is-lost.org", 2, true}, + {1, "is-not-certified.com", 2, true}, + {1, "is-saved.org", 2, true}, + {1, "is-slick.com", 2, true}, + {1, "is-uberleet.com", 2, true}, + {1, "is-very-bad.org", 2, true}, + {1, "is-very-evil.org", 2, true}, + {1, "is-very-good.org", 2, true}, + {1, "is-very-nice.org", 2, true}, + {1, "is-very-sweet.org", 2, true}, + {1, "is-with-theband.com", 2, true}, + {1, "isa-geek.com", 2, true}, + {1, "isa-geek.net", 2, true}, + {1, "isa-geek.org", 2, true}, + {1, "isa-hockeynut.com", 2, true}, + {1, "issmarterthanyou.com", 2, true}, + {1, "isteingeek.de", 2, true}, + {1, "istmein.de", 2, true}, + {1, "kicks-ass.net", 2, true}, + {1, "kicks-ass.org", 2, true}, + {1, "knowsitall.info", 2, true}, + {1, "land-4-sale.us", 2, true}, + {1, "lebtimnetz.de", 2, true}, + {1, "leitungsen.de", 2, true}, + {1, "likes-pie.com", 2, true}, + {1, "likescandy.com", 2, true}, + {1, "merseine.nu", 2, true}, + {1, "mine.nu", 2, true}, + {1, "misconfused.org", 2, true}, + {1, "mypets.ws", 2, true}, + {1, "myphotos.cc", 2, true}, + {1, "neat-url.com", 2, true}, + {1, "office-on-the.net", 2, true}, + {1, "on-the-web.tv", 2, true}, + {1, "podzone.net", 2, true}, + {1, "podzone.org", 2, true}, + {1, "readmyblog.org", 2, true}, + {1, "saves-the-whales.com", 2, true}, + {1, "scrapper-site.net", 2, true}, + {1, "scrapping.cc", 2, true}, + {1, "selfip.biz", 2, true}, + {1, "selfip.com", 2, true}, + {1, "selfip.info", 2, true}, + {1, "selfip.net", 2, true}, + {1, "selfip.org", 2, true}, + {1, "sells-for-less.com", 2, true}, + {1, "sells-for-u.com", 2, true}, + {1, "sells-it.net", 2, true}, + {1, "sellsyourhome.org", 2, true}, + {1, "servebbs.com", 2, true}, + {1, "servebbs.net", 2, true}, + {1, "servebbs.org", 2, true}, + {1, "serveftp.net", 2, true}, + {1, "serveftp.org", 2, true}, + {1, "servegame.org", 2, true}, + {1, "shacknet.nu", 2, true}, + {1, "simple-url.com", 2, true}, + {1, "space-to-rent.com", 2, true}, + {1, "stuff-4-sale.org", 2, true}, + {1, "stuff-4-sale.us", 2, true}, + {1, "teaches-yoga.com", 2, true}, + {1, "thruhere.net", 2, true}, + {1, "traeumtgerade.de", 2, true}, + {1, "webhop.biz", 2, true}, + {1, "webhop.info", 2, true}, + {1, "webhop.net", 2, true}, + {1, "webhop.org", 2, true}, + {1, "worse-than.tv", 2, true}, + {1, "writesthisblog.com", 2, true}, + {1, "ddnss.de", 2, true}, + {1, "dyn.ddnss.de", 3, true}, + {1, "dyndns.ddnss.de", 3, true}, + {1, "dyndns1.de", 2, true}, + {1, "dyn-ip24.de", 2, true}, + {1, "home-webserver.de", 2, true}, + {1, "dyn.home-webserver.de", 3, true}, + {1, "myhome-server.de", 2, true}, + {1, "ddnss.org", 2, true}, + {1, "definima.net", 2, true}, + {1, "definima.io", 2, true}, + {1, "bci.dnstrace.pro", 3, true}, + {1, "ddnsfree.com", 2, true}, + {1, "ddnsgeek.com", 2, true}, + {1, "giize.com", 2, true}, + {1, "gleeze.com", 2, true}, + {1, "kozow.com", 2, true}, + {1, "loseyourip.com", 2, true}, + {1, "ooguy.com", 2, true}, + {1, "theworkpc.com", 2, true}, + {1, "casacam.net", 2, true}, + {1, "dynu.net", 2, true}, + {1, "accesscam.org", 2, true}, + {1, "camdvr.org", 2, true}, + {1, "freeddns.org", 2, true}, + {1, "mywire.org", 2, true}, + {1, "webredirect.org", 2, true}, + {1, "myddns.rocks", 2, true}, + {1, "blogsite.xyz", 2, true}, + {1, "dynv6.net", 2, true}, + {1, "e4.cz", 2, true}, + {1, "en-root.fr", 2, true}, + {1, "mytuleap.com", 2, true}, + {1, "onred.one", 2, true}, + {1, "staging.onred.one", 3, true}, + {1, "enonic.io", 2, true}, + {1, "customer.enonic.io", 3, true}, + {1, "eu.org", 2, true}, + {1, "al.eu.org", 3, true}, + {1, "asso.eu.org", 3, true}, + {1, "at.eu.org", 3, true}, + {1, "au.eu.org", 3, true}, + {1, "be.eu.org", 3, true}, + {1, "bg.eu.org", 3, true}, + {1, "ca.eu.org", 3, true}, + {1, "cd.eu.org", 3, true}, + {1, "ch.eu.org", 3, true}, + {1, "cn.eu.org", 3, true}, + {1, "cy.eu.org", 3, true}, + {1, "cz.eu.org", 3, true}, + {1, "de.eu.org", 3, true}, + {1, "dk.eu.org", 3, true}, + {1, "edu.eu.org", 3, true}, + {1, "ee.eu.org", 3, true}, + {1, "es.eu.org", 3, true}, + {1, "fi.eu.org", 3, true}, + {1, "fr.eu.org", 3, true}, + {1, "gr.eu.org", 3, true}, + {1, "hr.eu.org", 3, true}, + {1, "hu.eu.org", 3, true}, + {1, "ie.eu.org", 3, true}, + {1, "il.eu.org", 3, true}, + {1, "in.eu.org", 3, true}, + {1, "int.eu.org", 3, true}, + {1, "is.eu.org", 3, true}, + {1, "it.eu.org", 3, true}, + {1, "jp.eu.org", 3, true}, + {1, "kr.eu.org", 3, true}, + {1, "lt.eu.org", 3, true}, + {1, "lu.eu.org", 3, true}, + {1, "lv.eu.org", 3, true}, + {1, "mc.eu.org", 3, true}, + {1, "me.eu.org", 3, true}, + {1, "mk.eu.org", 3, true}, + {1, "mt.eu.org", 3, true}, + {1, "my.eu.org", 3, true}, + {1, "net.eu.org", 3, true}, + {1, "ng.eu.org", 3, true}, + {1, "nl.eu.org", 3, true}, + {1, "no.eu.org", 3, true}, + {1, "nz.eu.org", 3, true}, + {1, "paris.eu.org", 3, true}, + {1, "pl.eu.org", 3, true}, + {1, "pt.eu.org", 3, true}, + {1, "q-a.eu.org", 3, true}, + {1, "ro.eu.org", 3, true}, + {1, "ru.eu.org", 3, true}, + {1, "se.eu.org", 3, true}, + {1, "si.eu.org", 3, true}, + {1, "sk.eu.org", 3, true}, + {1, "tr.eu.org", 3, true}, + {1, "uk.eu.org", 3, true}, + {1, "us.eu.org", 3, true}, + {1, "eu-1.evennode.com", 3, true}, + {1, "eu-2.evennode.com", 3, true}, + {1, "eu-3.evennode.com", 3, true}, + {1, "eu-4.evennode.com", 3, true}, + {1, "us-1.evennode.com", 3, true}, + {1, "us-2.evennode.com", 3, true}, + {1, "us-3.evennode.com", 3, true}, + {1, "us-4.evennode.com", 3, true}, + {1, "twmail.cc", 2, true}, + {1, "twmail.net", 2, true}, + {1, "twmail.org", 2, true}, + {1, "mymailer.com.tw", 3, true}, + {1, "url.tw", 2, true}, + {1, "apps.fbsbx.com", 3, true}, + {1, "ru.net", 2, true}, + {1, "adygeya.ru", 2, true}, + {1, "bashkiria.ru", 2, true}, + {1, "bir.ru", 2, true}, + {1, "cbg.ru", 2, true}, + {1, "com.ru", 2, true}, + {1, "dagestan.ru", 2, true}, + {1, "grozny.ru", 2, true}, + {1, "kalmykia.ru", 2, true}, + {1, "kustanai.ru", 2, true}, + {1, "marine.ru", 2, true}, + {1, "mordovia.ru", 2, true}, + {1, "msk.ru", 2, true}, + {1, "mytis.ru", 2, true}, + {1, "nalchik.ru", 2, true}, + {1, "nov.ru", 2, true}, + {1, "pyatigorsk.ru", 2, true}, + {1, "spb.ru", 2, true}, + {1, "vladikavkaz.ru", 2, true}, + {1, "vladimir.ru", 2, true}, + {1, "abkhazia.su", 2, true}, + {1, "adygeya.su", 2, true}, + {1, "aktyubinsk.su", 2, true}, + {1, "arkhangelsk.su", 2, true}, + {1, "armenia.su", 2, true}, + {1, "ashgabad.su", 2, true}, + {1, "azerbaijan.su", 2, true}, + {1, "balashov.su", 2, true}, + {1, "bashkiria.su", 2, true}, + {1, "bryansk.su", 2, true}, + {1, "bukhara.su", 2, true}, + {1, "chimkent.su", 2, true}, + {1, "dagestan.su", 2, true}, + {1, "east-kazakhstan.su", 2, true}, + {1, "exnet.su", 2, true}, + {1, "georgia.su", 2, true}, + {1, "grozny.su", 2, true}, + {1, "ivanovo.su", 2, true}, + {1, "jambyl.su", 2, true}, + {1, "kalmykia.su", 2, true}, + {1, "kaluga.su", 2, true}, + {1, "karacol.su", 2, true}, + {1, "karaganda.su", 2, true}, + {1, "karelia.su", 2, true}, + {1, "khakassia.su", 2, true}, + {1, "krasnodar.su", 2, true}, + {1, "kurgan.su", 2, true}, + {1, "kustanai.su", 2, true}, + {1, "lenug.su", 2, true}, + {1, "mangyshlak.su", 2, true}, + {1, "mordovia.su", 2, true}, + {1, "msk.su", 2, true}, + {1, "murmansk.su", 2, true}, + {1, "nalchik.su", 2, true}, + {1, "navoi.su", 2, true}, + {1, "north-kazakhstan.su", 2, true}, + {1, "nov.su", 2, true}, + {1, "obninsk.su", 2, true}, + {1, "penza.su", 2, true}, + {1, "pokrovsk.su", 2, true}, + {1, "sochi.su", 2, true}, + {1, "spb.su", 2, true}, + {1, "tashkent.su", 2, true}, + {1, "termez.su", 2, true}, + {1, "togliatti.su", 2, true}, + {1, "troitsk.su", 2, true}, + {1, "tselinograd.su", 2, true}, + {1, "tula.su", 2, true}, + {1, "tuva.su", 2, true}, + {1, "vladikavkaz.su", 2, true}, + {1, "vladimir.su", 2, true}, + {1, "vologda.su", 2, true}, + {1, "channelsdvr.net", 2, true}, + {1, "u.channelsdvr.net", 3, true}, + {1, "fastly-terrarium.com", 2, true}, + {1, "fastlylb.net", 2, true}, + {1, "map.fastlylb.net", 3, true}, + {1, "freetls.fastly.net", 3, true}, + {1, "map.fastly.net", 3, true}, + {1, "a.prod.fastly.net", 4, true}, + {1, "global.prod.fastly.net", 4, true}, + {1, "a.ssl.fastly.net", 4, true}, + {1, "b.ssl.fastly.net", 4, true}, + {1, "global.ssl.fastly.net", 4, true}, + {1, "fastpanel.direct", 2, true}, + {1, "fastvps-server.com", 2, true}, + {1, "fhapp.xyz", 2, true}, + {1, "fedorainfracloud.org", 2, true}, + {1, "fedorapeople.org", 2, true}, + {1, "cloud.fedoraproject.org", 3, true}, + {1, "app.os.fedoraproject.org", 4, true}, + {1, "app.os.stg.fedoraproject.org", 5, true}, + {1, "mydobiss.com", 2, true}, + {1, "filegear.me", 2, true}, + {1, "filegear-au.me", 2, true}, + {1, "filegear-de.me", 2, true}, + {1, "filegear-gb.me", 2, true}, + {1, "filegear-ie.me", 2, true}, + {1, "filegear-jp.me", 2, true}, + {1, "filegear-sg.me", 2, true}, + {1, "firebaseapp.com", 2, true}, + {1, "flynnhub.com", 2, true}, + {1, "flynnhosting.net", 2, true}, + {1, "0e.vc", 2, true}, + {1, "freebox-os.com", 2, true}, + {1, "freeboxos.com", 2, true}, + {1, "fbx-os.fr", 2, true}, + {1, "fbxos.fr", 2, true}, + {1, "freebox-os.fr", 2, true}, + {1, "freeboxos.fr", 2, true}, + {1, "freedesktop.org", 2, true}, + {2, "futurecms.at", 3, true}, + {2, "ex.futurecms.at", 4, true}, + {2, "in.futurecms.at", 4, true}, + {1, "futurehosting.at", 2, true}, + {1, "futuremailing.at", 2, true}, + {2, "ex.ortsinfo.at", 4, true}, + {2, "kunden.ortsinfo.at", 4, true}, + {2, "statics.cloud", 3, true}, + {1, "service.gov.uk", 3, true}, + {1, "gehirn.ne.jp", 3, true}, + {1, "usercontent.jp", 2, true}, + {1, "gentapps.com", 2, true}, + {1, "lab.ms", 2, true}, + {1, "github.io", 2, true}, + {1, "githubusercontent.com", 2, true}, + {1, "gitlab.io", 2, true}, + {1, "glitch.me", 2, true}, + {1, "lolipop.io", 2, true}, + {1, "cloudapps.digital", 2, true}, + {1, "london.cloudapps.digital", 3, true}, + {1, "homeoffice.gov.uk", 3, true}, + {1, "ro.im", 2, true}, + {1, "shop.ro", 2, true}, + {1, "goip.de", 2, true}, + {1, "run.app", 2, true}, + {1, "a.run.app", 3, true}, + {1, "web.app", 2, true}, + {2, "0emm.com", 3, true}, + {1, "appspot.com", 2, true}, + {2, "r.appspot.com", 4, true}, + {1, "blogspot.ae", 2, true}, + {1, "blogspot.al", 2, true}, + {1, "blogspot.am", 2, true}, + {1, "blogspot.ba", 2, true}, + {1, "blogspot.be", 2, true}, + {1, "blogspot.bg", 2, true}, + {1, "blogspot.bj", 2, true}, + {1, "blogspot.ca", 2, true}, + {1, "blogspot.cf", 2, true}, + {1, "blogspot.ch", 2, true}, + {1, "blogspot.cl", 2, true}, + {1, "blogspot.co.at", 3, true}, + {1, "blogspot.co.id", 3, true}, + {1, "blogspot.co.il", 3, true}, + {1, "blogspot.co.ke", 3, true}, + {1, "blogspot.co.nz", 3, true}, + {1, "blogspot.co.uk", 3, true}, + {1, "blogspot.co.za", 3, true}, + {1, "blogspot.com", 2, true}, + {1, "blogspot.com.ar", 3, true}, + {1, "blogspot.com.au", 3, true}, + {1, "blogspot.com.br", 3, true}, + {1, "blogspot.com.by", 3, true}, + {1, "blogspot.com.co", 3, true}, + {1, "blogspot.com.cy", 3, true}, + {1, "blogspot.com.ee", 3, true}, + {1, "blogspot.com.eg", 3, true}, + {1, "blogspot.com.es", 3, true}, + {1, "blogspot.com.mt", 3, true}, + {1, "blogspot.com.ng", 3, true}, + {1, "blogspot.com.tr", 3, true}, + {1, "blogspot.com.uy", 3, true}, + {1, "blogspot.cv", 2, true}, + {1, "blogspot.cz", 2, true}, + {1, "blogspot.de", 2, true}, + {1, "blogspot.dk", 2, true}, + {1, "blogspot.fi", 2, true}, + {1, "blogspot.fr", 2, true}, + {1, "blogspot.gr", 2, true}, + {1, "blogspot.hk", 2, true}, + {1, "blogspot.hr", 2, true}, + {1, "blogspot.hu", 2, true}, + {1, "blogspot.ie", 2, true}, + {1, "blogspot.in", 2, true}, + {1, "blogspot.is", 2, true}, + {1, "blogspot.it", 2, true}, + {1, "blogspot.jp", 2, true}, + {1, "blogspot.kr", 2, true}, + {1, "blogspot.li", 2, true}, + {1, "blogspot.lt", 2, true}, + {1, "blogspot.lu", 2, true}, + {1, "blogspot.md", 2, true}, + {1, "blogspot.mk", 2, true}, + {1, "blogspot.mr", 2, true}, + {1, "blogspot.mx", 2, true}, + {1, "blogspot.my", 2, true}, + {1, "blogspot.nl", 2, true}, + {1, "blogspot.no", 2, true}, + {1, "blogspot.pe", 2, true}, + {1, "blogspot.pt", 2, true}, + {1, "blogspot.qa", 2, true}, + {1, "blogspot.re", 2, true}, + {1, "blogspot.ro", 2, true}, + {1, "blogspot.rs", 2, true}, + {1, "blogspot.ru", 2, true}, + {1, "blogspot.se", 2, true}, + {1, "blogspot.sg", 2, true}, + {1, "blogspot.si", 2, true}, + {1, "blogspot.sk", 2, true}, + {1, "blogspot.sn", 2, true}, + {1, "blogspot.td", 2, true}, + {1, "blogspot.tw", 2, true}, + {1, "blogspot.ug", 2, true}, + {1, "blogspot.vn", 2, true}, + {1, "cloudfunctions.net", 2, true}, + {1, "cloud.goog", 2, true}, + {1, "codespot.com", 2, true}, + {1, "googleapis.com", 2, true}, + {1, "googlecode.com", 2, true}, + {1, "pagespeedmobilizer.com", 2, true}, + {1, "publishproxy.com", 2, true}, + {1, "withgoogle.com", 2, true}, + {1, "withyoutube.com", 2, true}, + {1, "awsmppl.com", 2, true}, + {1, "fin.ci", 2, true}, + {1, "free.hr", 2, true}, + {1, "caa.li", 2, true}, + {1, "ua.rs", 2, true}, + {1, "conf.se", 2, true}, + {1, "hs.zone", 2, true}, + {1, "hs.run", 2, true}, + {1, "hashbang.sh", 2, true}, + {1, "hasura.app", 2, true}, + {1, "hasura-app.io", 2, true}, + {1, "hepforge.org", 2, true}, + {1, "herokuapp.com", 2, true}, + {1, "herokussl.com", 2, true}, + {1, "myravendb.com", 2, true}, + {1, "ravendb.community", 2, true}, + {1, "ravendb.me", 2, true}, + {1, "development.run", 2, true}, + {1, "ravendb.run", 2, true}, + {1, "bpl.biz", 2, true}, + {1, "orx.biz", 2, true}, + {1, "ng.city", 2, true}, + {1, "biz.gl", 2, true}, + {1, "ng.ink", 2, true}, + {1, "col.ng", 2, true}, + {1, "firm.ng", 2, true}, + {1, "gen.ng", 2, true}, + {1, "ltd.ng", 2, true}, + {1, "ngo.ng", 2, true}, + {1, "ng.school", 2, true}, + {1, "sch.so", 2, true}, + {1, "xn--hkkinen-5wa.fi", 2, true}, + {2, "moonscale.io", 3, true}, + {1, "moonscale.net", 2, true}, + {1, "iki.fi", 2, true}, + {1, "dyn-berlin.de", 2, true}, + {1, "in-berlin.de", 2, true}, + {1, "in-brb.de", 2, true}, + {1, "in-butter.de", 2, true}, + {1, "in-dsl.de", 2, true}, + {1, "in-dsl.net", 2, true}, + {1, "in-dsl.org", 2, true}, + {1, "in-vpn.de", 2, true}, + {1, "in-vpn.net", 2, true}, + {1, "in-vpn.org", 2, true}, + {1, "biz.at", 2, true}, + {1, "info.at", 2, true}, + {1, "info.cx", 2, true}, + {1, "ac.leg.br", 3, true}, + {1, "al.leg.br", 3, true}, + {1, "am.leg.br", 3, true}, + {1, "ap.leg.br", 3, true}, + {1, "ba.leg.br", 3, true}, + {1, "ce.leg.br", 3, true}, + {1, "df.leg.br", 3, true}, + {1, "es.leg.br", 3, true}, + {1, "go.leg.br", 3, true}, + {1, "ma.leg.br", 3, true}, + {1, "mg.leg.br", 3, true}, + {1, "ms.leg.br", 3, true}, + {1, "mt.leg.br", 3, true}, + {1, "pa.leg.br", 3, true}, + {1, "pb.leg.br", 3, true}, + {1, "pe.leg.br", 3, true}, + {1, "pi.leg.br", 3, true}, + {1, "pr.leg.br", 3, true}, + {1, "rj.leg.br", 3, true}, + {1, "rn.leg.br", 3, true}, + {1, "ro.leg.br", 3, true}, + {1, "rr.leg.br", 3, true}, + {1, "rs.leg.br", 3, true}, + {1, "sc.leg.br", 3, true}, + {1, "se.leg.br", 3, true}, + {1, "sp.leg.br", 3, true}, + {1, "to.leg.br", 3, true}, + {1, "pixolino.com", 2, true}, + {1, "ipifony.net", 2, true}, + {1, "mein-iserv.de", 2, true}, + {1, "test-iserv.de", 2, true}, + {1, "iserv.dev", 2, true}, + {1, "iobb.net", 2, true}, + {1, "myjino.ru", 2, true}, + {2, "hosting.myjino.ru", 4, true}, + {2, "landing.myjino.ru", 4, true}, + {2, "spectrum.myjino.ru", 4, true}, + {2, "vps.myjino.ru", 4, true}, + {2, "triton.zone", 3, true}, + {2, "cns.joyent.com", 4, true}, + {1, "js.org", 2, true}, + {1, "kaas.gg", 2, true}, + {1, "khplay.nl", 2, true}, + {1, "keymachine.de", 2, true}, + {1, "kinghost.net", 2, true}, + {1, "uni5.net", 2, true}, + {1, "knightpoint.systems", 2, true}, + {1, "oya.to", 2, true}, + {1, "co.krd", 2, true}, + {1, "edu.krd", 2, true}, + {1, "git-repos.de", 2, true}, + {1, "lcube-server.de", 2, true}, + {1, "svn-repos.de", 2, true}, + {1, "leadpages.co", 2, true}, + {1, "lpages.co", 2, true}, + {1, "lpusercontent.com", 2, true}, + {1, "lelux.site", 2, true}, + {1, "co.business", 2, true}, + {1, "co.education", 2, true}, + {1, "co.events", 2, true}, + {1, "co.financial", 2, true}, + {1, "co.network", 2, true}, + {1, "co.place", 2, true}, + {1, "co.technology", 2, true}, + {1, "app.lmpm.com", 3, true}, + {1, "linkitools.space", 2, true}, + {1, "linkyard.cloud", 2, true}, + {1, "linkyard-cloud.ch", 2, true}, + {1, "members.linode.com", 3, true}, + {1, "nodebalancer.linode.com", 3, true}, + {1, "we.bs", 2, true}, + {1, "loginline.app", 2, true}, + {1, "loginline.dev", 2, true}, + {1, "loginline.io", 2, true}, + {1, "loginline.services", 2, true}, + {1, "loginline.site", 2, true}, + {1, "krasnik.pl", 2, true}, + {1, "leczna.pl", 2, true}, + {1, "lubartow.pl", 2, true}, + {1, "lublin.pl", 2, true}, + {1, "poniatowa.pl", 2, true}, + {1, "swidnik.pl", 2, true}, + {1, "uklugs.org", 2, true}, + {1, "glug.org.uk", 3, true}, + {1, "lug.org.uk", 3, true}, + {1, "lugs.org.uk", 3, true}, + {1, "barsy.bg", 2, true}, + {1, "barsy.co.uk", 3, true}, + {1, "barsyonline.co.uk", 3, true}, + {1, "barsycenter.com", 2, true}, + {1, "barsyonline.com", 2, true}, + {1, "barsy.club", 2, true}, + {1, "barsy.de", 2, true}, + {1, "barsy.eu", 2, true}, + {1, "barsy.in", 2, true}, + {1, "barsy.info", 2, true}, + {1, "barsy.io", 2, true}, + {1, "barsy.me", 2, true}, + {1, "barsy.menu", 2, true}, + {1, "barsy.mobi", 2, true}, + {1, "barsy.net", 2, true}, + {1, "barsy.online", 2, true}, + {1, "barsy.org", 2, true}, + {1, "barsy.pro", 2, true}, + {1, "barsy.pub", 2, true}, + {1, "barsy.shop", 2, true}, + {1, "barsy.site", 2, true}, + {1, "barsy.support", 2, true}, + {1, "barsy.uk", 2, true}, + {2, "magentosite.cloud", 3, true}, + {1, "mayfirst.info", 2, true}, + {1, "mayfirst.org", 2, true}, + {1, "hb.cldmail.ru", 3, true}, + {1, "miniserver.com", 2, true}, + {1, "memset.net", 2, true}, + {1, "cloud.metacentrum.cz", 3, true}, + {1, "custom.metacentrum.cz", 3, true}, + {1, "flt.cloud.muni.cz", 4, true}, + {1, "usr.cloud.muni.cz", 4, true}, + {1, "meteorapp.com", 2, true}, + {1, "eu.meteorapp.com", 3, true}, + {1, "co.pl", 2, true}, + {1, "azurecontainer.io", 2, true}, + {1, "azurewebsites.net", 2, true}, + {1, "azure-mobile.net", 2, true}, + {1, "cloudapp.net", 2, true}, + {1, "mozilla-iot.org", 2, true}, + {1, "bmoattachments.org", 2, true}, + {1, "net.ru", 2, true}, + {1, "org.ru", 2, true}, + {1, "pp.ru", 2, true}, + {1, "ui.nabu.casa", 3, true}, + {1, "pony.club", 2, true}, + {1, "of.fashion", 2, true}, + {1, "on.fashion", 2, true}, + {1, "of.football", 2, true}, + {1, "in.london", 2, true}, + {1, "of.london", 2, true}, + {1, "for.men", 2, true}, + {1, "and.mom", 2, true}, + {1, "for.mom", 2, true}, + {1, "for.one", 2, true}, + {1, "for.sale", 2, true}, + {1, "of.work", 2, true}, + {1, "to.work", 2, true}, + {1, "nctu.me", 2, true}, + {1, "bitballoon.com", 2, true}, + {1, "netlify.com", 2, true}, + {1, "4u.com", 2, true}, + {1, "ngrok.io", 2, true}, + {1, "nh-serv.co.uk", 3, true}, + {1, "nfshost.com", 2, true}, + {1, "dnsking.ch", 2, true}, + {1, "mypi.co", 2, true}, + {1, "n4t.co", 2, true}, + {1, "001www.com", 2, true}, + {1, "ddnslive.com", 2, true}, + {1, "myiphost.com", 2, true}, + {1, "forumz.info", 2, true}, + {1, "16-b.it", 2, true}, + {1, "32-b.it", 2, true}, + {1, "64-b.it", 2, true}, + {1, "soundcast.me", 2, true}, + {1, "tcp4.me", 2, true}, + {1, "dnsup.net", 2, true}, + {1, "hicam.net", 2, true}, + {1, "now-dns.net", 2, true}, + {1, "ownip.net", 2, true}, + {1, "vpndns.net", 2, true}, + {1, "dynserv.org", 2, true}, + {1, "now-dns.org", 2, true}, + {1, "x443.pw", 2, true}, + {1, "now-dns.top", 2, true}, + {1, "ntdll.top", 2, true}, + {1, "freeddns.us", 2, true}, + {1, "crafting.xyz", 2, true}, + {1, "zapto.xyz", 2, true}, + {1, "nsupdate.info", 2, true}, + {1, "nerdpol.ovh", 2, true}, + {1, "blogsyte.com", 2, true}, + {1, "brasilia.me", 2, true}, + {1, "cable-modem.org", 2, true}, + {1, "ciscofreak.com", 2, true}, + {1, "collegefan.org", 2, true}, + {1, "couchpotatofries.org", 2, true}, + {1, "damnserver.com", 2, true}, + {1, "ddns.me", 2, true}, + {1, "ditchyourip.com", 2, true}, + {1, "dnsfor.me", 2, true}, + {1, "dnsiskinky.com", 2, true}, + {1, "dvrcam.info", 2, true}, + {1, "dynns.com", 2, true}, + {1, "eating-organic.net", 2, true}, + {1, "fantasyleague.cc", 2, true}, + {1, "geekgalaxy.com", 2, true}, + {1, "golffan.us", 2, true}, + {1, "health-carereform.com", 2, true}, + {1, "homesecuritymac.com", 2, true}, + {1, "homesecuritypc.com", 2, true}, + {1, "hopto.me", 2, true}, + {1, "ilovecollege.info", 2, true}, + {1, "loginto.me", 2, true}, + {1, "mlbfan.org", 2, true}, + {1, "mmafan.biz", 2, true}, + {1, "myactivedirectory.com", 2, true}, + {1, "mydissent.net", 2, true}, + {1, "myeffect.net", 2, true}, + {1, "mymediapc.net", 2, true}, + {1, "mypsx.net", 2, true}, + {1, "mysecuritycamera.com", 2, true}, + {1, "mysecuritycamera.net", 2, true}, + {1, "mysecuritycamera.org", 2, true}, + {1, "net-freaks.com", 2, true}, + {1, "nflfan.org", 2, true}, + {1, "nhlfan.net", 2, true}, + {1, "no-ip.ca", 2, true}, + {1, "no-ip.co.uk", 3, true}, + {1, "no-ip.net", 2, true}, + {1, "noip.us", 2, true}, + {1, "onthewifi.com", 2, true}, + {1, "pgafan.net", 2, true}, + {1, "point2this.com", 2, true}, + {1, "pointto.us", 2, true}, + {1, "privatizehealthinsurance.net", 2, true}, + {1, "quicksytes.com", 2, true}, + {1, "read-books.org", 2, true}, + {1, "securitytactics.com", 2, true}, + {1, "serveexchange.com", 2, true}, + {1, "servehumour.com", 2, true}, + {1, "servep2p.com", 2, true}, + {1, "servesarcasm.com", 2, true}, + {1, "stufftoread.com", 2, true}, + {1, "ufcfan.org", 2, true}, + {1, "unusualperson.com", 2, true}, + {1, "workisboring.com", 2, true}, + {1, "3utilities.com", 2, true}, + {1, "bounceme.net", 2, true}, + {1, "ddns.net", 2, true}, + {1, "ddnsking.com", 2, true}, + {1, "gotdns.ch", 2, true}, + {1, "hopto.org", 2, true}, + {1, "myftp.biz", 2, true}, + {1, "myftp.org", 2, true}, + {1, "myvnc.com", 2, true}, + {1, "no-ip.biz", 2, true}, + {1, "no-ip.info", 2, true}, + {1, "no-ip.org", 2, true}, + {1, "noip.me", 2, true}, + {1, "redirectme.net", 2, true}, + {1, "servebeer.com", 2, true}, + {1, "serveblog.net", 2, true}, + {1, "servecounterstrike.com", 2, true}, + {1, "serveftp.com", 2, true}, + {1, "servegame.com", 2, true}, + {1, "servehalflife.com", 2, true}, + {1, "servehttp.com", 2, true}, + {1, "serveirc.com", 2, true}, + {1, "serveminecraft.net", 2, true}, + {1, "servemp3.com", 2, true}, + {1, "servepics.com", 2, true}, + {1, "servequake.com", 2, true}, + {1, "sytes.net", 2, true}, + {1, "webhop.me", 2, true}, + {1, "zapto.org", 2, true}, + {1, "stage.nodeart.io", 3, true}, + {1, "nodum.co", 2, true}, + {1, "nodum.io", 2, true}, + {1, "pcloud.host", 2, true}, + {1, "nyc.mn", 2, true}, + {1, "nom.ae", 2, true}, + {1, "nom.af", 2, true}, + {1, "nom.ai", 2, true}, + {1, "nom.al", 2, true}, + {1, "nym.by", 2, true}, + {1, "nym.bz", 2, true}, + {1, "nom.cl", 2, true}, + {1, "nym.ec", 2, true}, + {1, "nom.gd", 2, true}, + {1, "nom.ge", 2, true}, + {1, "nom.gl", 2, true}, + {1, "nym.gr", 2, true}, + {1, "nom.gt", 2, true}, + {1, "nym.gy", 2, true}, + {1, "nym.hk", 2, true}, + {1, "nom.hn", 2, true}, + {1, "nym.ie", 2, true}, + {1, "nom.im", 2, true}, + {1, "nom.ke", 2, true}, + {1, "nym.kz", 2, true}, + {1, "nym.la", 2, true}, + {1, "nym.lc", 2, true}, + {1, "nom.li", 2, true}, + {1, "nym.li", 2, true}, + {1, "nym.lt", 2, true}, + {1, "nym.lu", 2, true}, + {1, "nym.me", 2, true}, + {1, "nom.mk", 2, true}, + {1, "nym.mn", 2, true}, + {1, "nym.mx", 2, true}, + {1, "nom.nu", 2, true}, + {1, "nym.nz", 2, true}, + {1, "nym.pe", 2, true}, + {1, "nym.pt", 2, true}, + {1, "nom.pw", 2, true}, + {1, "nom.qa", 2, true}, + {1, "nym.ro", 2, true}, + {1, "nom.rs", 2, true}, + {1, "nom.si", 2, true}, + {1, "nym.sk", 2, true}, + {1, "nom.st", 2, true}, + {1, "nym.su", 2, true}, + {1, "nym.sx", 2, true}, + {1, "nom.tj", 2, true}, + {1, "nym.tw", 2, true}, + {1, "nom.ug", 2, true}, + {1, "nom.uy", 2, true}, + {1, "nom.vc", 2, true}, + {1, "nom.vg", 2, true}, + {1, "static.observableusercontent.com", 3, true}, + {1, "cya.gg", 2, true}, + {1, "cloudycluster.net", 2, true}, + {1, "nid.io", 2, true}, + {1, "opencraft.hosting", 2, true}, + {1, "operaunite.com", 2, true}, + {1, "skygearapp.com", 2, true}, + {1, "outsystemscloud.com", 2, true}, + {1, "ownprovider.com", 2, true}, + {1, "own.pm", 2, true}, + {1, "ox.rs", 2, true}, + {1, "oy.lc", 2, true}, + {1, "pgfog.com", 2, true}, + {1, "pagefrontapp.com", 2, true}, + {1, "art.pl", 2, true}, + {1, "gliwice.pl", 2, true}, + {1, "krakow.pl", 2, true}, + {1, "poznan.pl", 2, true}, + {1, "wroc.pl", 2, true}, + {1, "zakopane.pl", 2, true}, + {1, "pantheonsite.io", 2, true}, + {1, "gotpantheon.com", 2, true}, + {1, "mypep.link", 2, true}, + {1, "perspecta.cloud", 2, true}, + {1, "on-web.fr", 2, true}, + {2, "platform.sh", 3, true}, + {2, "platformsh.site", 3, true}, + {1, "dyn53.io", 2, true}, + {1, "co.bn", 2, true}, + {1, "xen.prgmr.com", 3, true}, + {1, "priv.at", 2, true}, + {1, "prvcy.page", 2, true}, + {2, "dweb.link", 3, true}, + {1, "protonet.io", 2, true}, + {1, "chirurgiens-dentistes-en-france.fr", 2, true}, + {1, "byen.site", 2, true}, + {1, "pubtls.org", 2, true}, + {1, "qualifioapp.com", 2, true}, + {1, "qbuser.com", 2, true}, + {1, "instantcloud.cn", 2, true}, + {1, "ras.ru", 2, true}, + {1, "qa2.com", 2, true}, + {1, "qcx.io", 2, true}, + {2, "sys.qcx.io", 4, true}, + {1, "dev-myqnapcloud.com", 2, true}, + {1, "alpha-myqnapcloud.com", 2, true}, + {1, "myqnapcloud.com", 2, true}, + {2, "quipelements.com", 3, true}, + {1, "vapor.cloud", 2, true}, + {1, "vaporcloud.io", 2, true}, + {1, "rackmaze.com", 2, true}, + {1, "rackmaze.net", 2, true}, + {2, "on-k3s.io", 3, true}, + {2, "on-rancher.cloud", 3, true}, + {2, "on-rio.io", 3, true}, + {1, "readthedocs.io", 2, true}, + {1, "rhcloud.com", 2, true}, + {1, "app.render.com", 3, true}, + {1, "onrender.com", 2, true}, + {1, "repl.co", 2, true}, + {1, "repl.run", 2, true}, + {1, "resindevice.io", 2, true}, + {1, "devices.resinstaging.io", 3, true}, + {1, "hzc.io", 2, true}, + {1, "wellbeingzone.eu", 2, true}, + {1, "ptplus.fit", 2, true}, + {1, "wellbeingzone.co.uk", 3, true}, + {1, "git-pages.rit.edu", 3, true}, + {1, "sandcats.io", 2, true}, + {1, "logoip.de", 2, true}, + {1, "logoip.com", 2, true}, + {1, "schokokeks.net", 2, true}, + {1, "gov.scot", 2, true}, + {1, "scrysec.com", 2, true}, + {1, "firewall-gateway.com", 2, true}, + {1, "firewall-gateway.de", 2, true}, + {1, "my-gateway.de", 2, true}, + {1, "my-router.de", 2, true}, + {1, "spdns.de", 2, true}, + {1, "spdns.eu", 2, true}, + {1, "firewall-gateway.net", 2, true}, + {1, "my-firewall.org", 2, true}, + {1, "myfirewall.org", 2, true}, + {1, "spdns.org", 2, true}, + {1, "senseering.net", 2, true}, + {1, "biz.ua", 2, true}, + {1, "co.ua", 2, true}, + {1, "pp.ua", 2, true}, + {1, "shiftedit.io", 2, true}, + {1, "myshopblocks.com", 2, true}, + {1, "shopitsite.com", 2, true}, + {1, "mo-siemens.io", 2, true}, + {1, "1kapp.com", 2, true}, + {1, "appchizi.com", 2, true}, + {1, "applinzi.com", 2, true}, + {1, "sinaapp.com", 2, true}, + {1, "vipsinaapp.com", 2, true}, + {1, "siteleaf.net", 2, true}, + {1, "bounty-full.com", 2, true}, + {1, "alpha.bounty-full.com", 3, true}, + {1, "beta.bounty-full.com", 3, true}, + {1, "stackhero-network.com", 2, true}, + {1, "static.land", 2, true}, + {1, "dev.static.land", 3, true}, + {1, "sites.static.land", 3, true}, + {1, "apps.lair.io", 3, true}, + {2, "stolos.io", 3, true}, + {1, "spacekit.io", 2, true}, + {1, "customer.speedpartner.de", 3, true}, + {1, "api.stdlib.com", 3, true}, + {1, "storj.farm", 2, true}, + {1, "utwente.io", 2, true}, + {1, "soc.srcf.net", 3, true}, + {1, "user.srcf.net", 3, true}, + {1, "temp-dns.com", 2, true}, + {1, "applicationcloud.io", 2, true}, + {1, "scapp.io", 2, true}, + {2, "s5y.io", 3, true}, + {2, "sensiosite.cloud", 3, true}, + {1, "syncloud.it", 2, true}, + {1, "diskstation.me", 2, true}, + {1, "dscloud.biz", 2, true}, + {1, "dscloud.me", 2, true}, + {1, "dscloud.mobi", 2, true}, + {1, "dsmynas.com", 2, true}, + {1, "dsmynas.net", 2, true}, + {1, "dsmynas.org", 2, true}, + {1, "familyds.com", 2, true}, + {1, "familyds.net", 2, true}, + {1, "familyds.org", 2, true}, + {1, "i234.me", 2, true}, + {1, "myds.me", 2, true}, + {1, "synology.me", 2, true}, + {1, "vpnplus.to", 2, true}, + {1, "direct.quickconnect.to", 3, true}, + {1, "taifun-dns.de", 2, true}, + {1, "gda.pl", 2, true}, + {1, "gdansk.pl", 2, true}, + {1, "gdynia.pl", 2, true}, + {1, "med.pl", 2, true}, + {1, "sopot.pl", 2, true}, + {1, "edugit.org", 2, true}, + {1, "telebit.app", 2, true}, + {1, "telebit.io", 2, true}, + {2, "telebit.xyz", 3, true}, + {1, "gwiddle.co.uk", 3, true}, + {1, "thingdustdata.com", 2, true}, + {1, "cust.dev.thingdust.io", 4, true}, + {1, "cust.disrec.thingdust.io", 4, true}, + {1, "cust.prod.thingdust.io", 4, true}, + {1, "cust.testing.thingdust.io", 4, true}, + {1, "arvo.network", 2, true}, + {1, "azimuth.network", 2, true}, + {1, "bloxcms.com", 2, true}, + {1, "townnews-staging.com", 2, true}, + {1, "12hp.at", 2, true}, + {1, "2ix.at", 2, true}, + {1, "4lima.at", 2, true}, + {1, "lima-city.at", 2, true}, + {1, "12hp.ch", 2, true}, + {1, "2ix.ch", 2, true}, + {1, "4lima.ch", 2, true}, + {1, "lima-city.ch", 2, true}, + {1, "trafficplex.cloud", 2, true}, + {1, "de.cool", 2, true}, + {1, "12hp.de", 2, true}, + {1, "2ix.de", 2, true}, + {1, "4lima.de", 2, true}, + {1, "lima-city.de", 2, true}, + {1, "1337.pictures", 2, true}, + {1, "clan.rip", 2, true}, + {1, "lima-city.rocks", 2, true}, + {1, "webspace.rocks", 2, true}, + {1, "lima.zone", 2, true}, + {2, "transurl.be", 3, true}, + {2, "transurl.eu", 3, true}, + {2, "transurl.nl", 3, true}, + {1, "tuxfamily.org", 2, true}, + {1, "dd-dns.de", 2, true}, + {1, "diskstation.eu", 2, true}, + {1, "diskstation.org", 2, true}, + {1, "dray-dns.de", 2, true}, + {1, "draydns.de", 2, true}, + {1, "dyn-vpn.de", 2, true}, + {1, "dynvpn.de", 2, true}, + {1, "mein-vigor.de", 2, true}, + {1, "my-vigor.de", 2, true}, + {1, "my-wan.de", 2, true}, + {1, "syno-ds.de", 2, true}, + {1, "synology-diskstation.de", 2, true}, + {1, "synology-ds.de", 2, true}, + {1, "uber.space", 2, true}, + {2, "uberspace.de", 3, true}, + {1, "hk.com", 2, true}, + {1, "hk.org", 2, true}, + {1, "ltd.hk", 2, true}, + {1, "inc.hk", 2, true}, + {1, "virtualuser.de", 2, true}, + {1, "virtual-user.de", 2, true}, + {1, "lib.de.us", 3, true}, + {1, "2038.io", 2, true}, + {1, "router.management", 2, true}, + {1, "v-info.info", 2, true}, + {1, "voorloper.cloud", 2, true}, + {1, "v.ua", 2, true}, + {1, "wafflecell.com", 2, true}, + {2, "webhare.dev", 3, true}, + {1, "wedeploy.io", 2, true}, + {1, "wedeploy.me", 2, true}, + {1, "wedeploy.sh", 2, true}, + {1, "remotewd.com", 2, true}, + {1, "wmflabs.org", 2, true}, + {1, "myforum.community", 2, true}, + {1, "community-pro.de", 2, true}, + {1, "diskussionsbereich.de", 2, true}, + {1, "community-pro.net", 2, true}, + {1, "meinforum.net", 2, true}, + {1, "half.host", 2, true}, + {1, "xnbay.com", 2, true}, + {1, "u2.xnbay.com", 3, true}, + {1, "u2-local.xnbay.com", 3, true}, + {1, "cistron.nl", 2, true}, + {1, "demon.nl", 2, true}, + {1, "xs4all.space", 2, true}, + {1, "yandexcloud.net", 2, true}, + {1, "storage.yandexcloud.net", 3, true}, + {1, "website.yandexcloud.net", 3, true}, + {1, "official.academy", 2, true}, + {1, "yolasite.com", 2, true}, + {1, "ybo.faith", 2, true}, + {1, "yombo.me", 2, true}, + {1, "homelink.one", 2, true}, + {1, "ybo.party", 2, true}, + {1, "ybo.review", 2, true}, + {1, "ybo.science", 2, true}, + {1, "ybo.trade", 2, true}, + {1, "nohost.me", 2, true}, + {1, "noho.st", 2, true}, + {1, "za.net", 2, true}, + {1, "za.org", 2, true}, + {1, "now.sh", 2, true}, + {1, "bss.design", 2, true}, + {1, "basicserver.io", 2, true}, + {1, "virtualserver.io", 2, true}, + {1, "site.builder.nu", 3, true}, + {1, "enterprisecloud.nu", 2, true}, +} + +func init() { + for i := range r { + DefaultList.AddRule(&r[i]) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4620037b..33013c20 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -85,6 +85,9 @@ github.com/datarhei/joy4/utils/bits/pio # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/eggsampler/acme/v3 v3.1.1 +## explicit; go 1.11 +github.com/eggsampler/acme/v3 # github.com/go-ole/go-ole v1.2.6 ## explicit; go 1.12 github.com/go-ole/go-ole @@ -171,6 +174,14 @@ github.com/labstack/gommon/random # github.com/leodido/go-urn v1.2.1 ## explicit; go 1.13 github.com/leodido/go-urn +# github.com/letsdebug/letsdebug v1.6.1 +## explicit; go 1.15 +github.com/letsdebug/letsdebug +# github.com/lib/pq v1.8.0 +## explicit; go 1.13 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram # github.com/libdns/libdns v0.2.1 ## explicit; go 1.14 github.com/libdns/libdns @@ -201,6 +212,9 @@ github.com/mholt/acmez/acme # github.com/miekg/dns v1.1.50 ## explicit; go 1.14 github.com/miekg/dns +# github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba +## explicit +github.com/miekg/unbound # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -279,6 +293,10 @@ github.com/vektah/gqlparser/v2/lexer github.com/vektah/gqlparser/v2/parser github.com/vektah/gqlparser/v2/validator github.com/vektah/gqlparser/v2/validator/rules +# github.com/weppos/publicsuffix-go v0.13.0 +## explicit +github.com/weppos/publicsuffix-go/net/publicsuffix +github.com/weppos/publicsuffix-go/publicsuffix # github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb ## explicit github.com/xeipuuv/gojsonpointer From ee2a188be812cbb4dc26e0f9fc6394bc66b15a38 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 27 Dec 2022 13:41:07 +0100 Subject: [PATCH 04/39] Allow defaults for template parameter --- restream/replace/replace.go | 49 +++++++++++++++++++++++--------- restream/replace/replace_test.go | 38 +++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 16 deletions(-) diff --git a/restream/replace/replace.go b/restream/replace/replace.go index 47885a38..f87757eb 100644 --- a/restream/replace/replace.go +++ b/restream/replace/replace.go @@ -9,12 +9,13 @@ import ( type Replacer interface { // RegisterTemplate registers a template for a specific placeholder. Template // may contain placeholders as well of the form {name}. They will be replaced - // by the parameters of the placeholder (see Replace). - RegisterTemplate(placeholder, template string) + // by the parameters of the placeholder (see Replace). If a parameter is not of + // a template is not present, default values can be provided. + RegisterTemplate(placeholder, template string, defaults map[string]string) // RegisterTemplateFunc does the same as RegisterTemplate, but the template // is returned by the template function. - RegisterTemplateFunc(placeholder string, template func() string) + RegisterTemplateFunc(placeholder string, template func() string, defaults map[string]string) // Replace replaces all occurences of placeholder in str with value. The placeholder is of the // form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^ @@ -28,8 +29,13 @@ type Replacer interface { Replace(str, placeholder, value string) string } +type template struct { + fn func() string + defaults map[string]string +} + type replacer struct { - templates map[string]func() string + templates map[string]template re *regexp.Regexp templateRe *regexp.Regexp @@ -38,7 +44,7 @@ type replacer struct { // New returns a Replacer func New() Replacer { r := &replacer{ - templates: make(map[string]func() string), + templates: make(map[string]template), re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`), templateRe: regexp.MustCompile(`{([a-z]+)}`), } @@ -46,12 +52,18 @@ func New() Replacer { return r } -func (r *replacer) RegisterTemplate(placeholder, template string) { - r.templates[placeholder] = func() string { return template } +func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) { + r.templates[placeholder] = template{ + fn: func() string { return tmpl }, + defaults: defaults, + } } -func (r *replacer) RegisterTemplateFunc(placeholder string, template func() string) { - r.templates[placeholder] = template +func (r *replacer) RegisterTemplateFunc(placeholder string, tmplFn func() string, defaults map[string]string) { + r.templates[placeholder] = template{ + fn: tmplFn, + defaults: defaults, + } } func (r *replacer) Replace(str, placeholder, value string) string { @@ -63,16 +75,20 @@ func (r *replacer) Replace(str, placeholder, value string) string { // We need a copy from the value v := value + var tmpl template = template{ + fn: func() string { return v }, + } // Check for a registered template if len(v) == 0 { - tmplFunc, ok := r.templates[placeholder] + t, ok := r.templates[placeholder] if ok { - v = tmplFunc() + tmpl = t } } - v = r.compileTemplate(v, matches[3]) + v = tmpl.fn() + v = r.compileTemplate(v, matches[3], tmpl.defaults) if len(matches[2]) != 0 { // If there's a character to escape, we also have to escape the @@ -97,13 +113,18 @@ func (r *replacer) Replace(str, placeholder, value string) string { // placeholder name and will be replaced with the value. The resulting string is "Hello World!". // If a placeholder name is not present in the params string, it will not be replaced. The key // and values can be escaped as in net/url.QueryEscape. -func (r *replacer) compileTemplate(str, params string) string { - if len(params) == 0 { +func (r *replacer) compileTemplate(str, params string, defaults map[string]string) string { + if len(params) == 0 && len(defaults) == 0 { return str } p := make(map[string]string) + // Copy the defaults + for key, value := range defaults { + p[key] = value + } + // taken from net/url.ParseQuery for params != "" { var key string diff --git a/restream/replace/replace_test.go b/restream/replace/replace_test.go index 7474775d..f1ebcceb 100644 --- a/restream/replace/replace_test.go +++ b/restream/replace/replace_test.go @@ -34,7 +34,7 @@ func TestReplace(t *testing.T) { func TestReplaceTemplate(t *testing.T) { r := New() - r.RegisterTemplate("foobar", "Hello {who}! {what}?") + r.RegisterTemplate("foobar", "Hello {who}! {what}?", nil) replaced := r.Replace("{foobar,who=World}", "foobar", "") require.Equal(t, "Hello World! {what}?", replaced) @@ -46,6 +46,20 @@ func TestReplaceTemplate(t *testing.T) { require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) } +func TestReplaceTemplateDefaults(t *testing.T) { + r := New() + r.RegisterTemplate("foobar", "Hello {who}! {what}?", map[string]string{ + "who": "someone", + "what": "something", + }) + + replaced := r.Replace("{foobar}", "foobar", "") + require.Equal(t, "Hello someone! something?", replaced) + + replaced = r.Replace("{foobar,who=World}", "foobar", "") + require.Equal(t, "Hello World! something?", replaced) +} + func TestReplaceCompileTemplate(t *testing.T) { samples := [][3]string{ {"Hello {who}!", "who=World", "Hello World!"}, @@ -58,7 +72,27 @@ func TestReplaceCompileTemplate(t *testing.T) { r := New().(*replacer) for _, e := range samples { - replaced := r.compileTemplate(e[0], e[1]) + replaced := r.compileTemplate(e[0], e[1], nil) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceCompileTemplateDefaults(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "", "Hello someone!"}, + {"Hello {who}!", "who=World", "Hello World!"}, + {"Hello {who}! {what}?", "who=World", "Hello World! something?"}, + {"Hello {who}! {what}?", "who=World,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=World,what=", "Hello World! ?"}, + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], map[string]string{ + "who": "someone", + "what": "something", + }) require.Equal(t, e[2], replaced, e[0]) } } From 8a1dc59a81610426049c3c7260955951e029238e Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 27 Dec 2022 13:46:02 +0100 Subject: [PATCH 05/39] Set a default of 20ms for internal SRT latency --- app/api/api.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/app/api/api.go b/app/api/api.go index fb36559b..5cd52e35 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -445,8 +445,8 @@ func (a *api) start() error { a.replacer = replace.New() { - a.replacer.RegisterTemplate("diskfs", a.diskfs.Base()) - a.replacer.RegisterTemplate("memfs", a.memfs.Base()) + a.replacer.RegisterTemplate("diskfs", a.diskfs.Base(), nil) + a.replacer.RegisterTemplate("memfs", a.memfs.Base(), nil) host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) if len(host) == 0 { @@ -463,21 +463,23 @@ func (a *api) start() error { template += "?token=" + cfg.RTMP.Token } - a.replacer.RegisterTemplate("rtmp", template) + a.replacer.RegisterTemplate("rtmp", template, nil) host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) if len(host) == 0 { host = "localhost" } - template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid={name},mode:{mode}" + template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name},mode:{mode}" if len(cfg.SRT.Token) != 0 { template += ",token:" + cfg.SRT.Token } if len(cfg.SRT.Passphrase) != 0 { template += "&passphrase=" + cfg.SRT.Passphrase } - a.replacer.RegisterTemplate("srt", template) + a.replacer.RegisterTemplate("srt", template, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) } store := store.NewJSONStore(store.JSONConfig{ From 65a617c2af31fffd613045ec9d34122d542ffc37 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Thu, 29 Dec 2022 10:43:15 +0100 Subject: [PATCH 06/39] Fix modifying DTS in RTMP packets (datarhei/restreamer#487, datarhei/restreamer#367) --- rtmp/rtmp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rtmp/rtmp.go b/rtmp/rtmp.go index fafb466b..3c219709 100644 --- a/rtmp/rtmp.go +++ b/rtmp/rtmp.go @@ -381,7 +381,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) { } // Adjust the timestamp such that the stream starts from 0 - filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: true}) + filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: false}) demuxer := &pktque.FilterDemuxer{ Filter: filters, From 0cd8be130c813e4a4d7da3140b14fe30af574874 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Sat, 31 Dec 2022 17:46:46 +0100 Subject: [PATCH 07/39] Remove letsdebug module This module has a dependency of a modules that requires cgo, that's a no-go. --- app/api/api.go | 27 +- go.mod | 5 - go.sum | 32 - .../github.com/eggsampler/acme/v3/.gitignore | 3 - .../github.com/eggsampler/acme/v3/.travis.yml | 26 - vendor/github.com/eggsampler/acme/v3/LICENSE | 21 - vendor/github.com/eggsampler/acme/v3/Makefile | 66 - .../github.com/eggsampler/acme/v3/README.md | 43 - .../github.com/eggsampler/acme/v3/THIRD-PARTY | 35 - .../github.com/eggsampler/acme/v3/account.go | 128 - vendor/github.com/eggsampler/acme/v3/acme.go | 294 - .../eggsampler/acme/v3/authorization.go | 43 - .../github.com/eggsampler/acme/v3/autocert.go | 430 - .../eggsampler/acme/v3/certificate.go | 106 - .../eggsampler/acme/v3/challenge.go | 102 - vendor/github.com/eggsampler/acme/v3/jws.go | 187 - vendor/github.com/eggsampler/acme/v3/nonce.go | 45 - .../github.com/eggsampler/acme/v3/options.go | 70 - vendor/github.com/eggsampler/acme/v3/order.go | 136 - .../github.com/eggsampler/acme/v3/problem.go | 65 - vendor/github.com/eggsampler/acme/v3/types.go | 163 - .../github.com/letsdebug/letsdebug/.gitignore | 6 - .../letsdebug/letsdebug/.travis.yml | 23 - .../github.com/letsdebug/letsdebug/Makefile | 32 - .../github.com/letsdebug/letsdebug/README.md | 170 - .../github.com/letsdebug/letsdebug/checker.go | 107 - .../github.com/letsdebug/letsdebug/context.go | 81 - .../github.com/letsdebug/letsdebug/dns01.go | 156 - .../letsdebug/letsdebug/dns_util.go | 127 - .../github.com/letsdebug/letsdebug/generic.go | 861 -- .../github.com/letsdebug/letsdebug/http01.go | 268 - .../letsdebug/letsdebug/http_util.go | 310 - .../letsdebug/letsdebug/letsdebug.go | 85 - .../github.com/letsdebug/letsdebug/problem.go | 75 - vendor/github.com/lib/pq/.gitignore | 4 - vendor/github.com/lib/pq/.travis.sh | 73 - vendor/github.com/lib/pq/.travis.yml | 44 - vendor/github.com/lib/pq/LICENSE.md | 8 - vendor/github.com/lib/pq/README.md | 30 - vendor/github.com/lib/pq/TESTS.md | 33 - vendor/github.com/lib/pq/array.go | 756 -- vendor/github.com/lib/pq/buf.go | 91 - vendor/github.com/lib/pq/conn.go | 1996 ---- vendor/github.com/lib/pq/conn_go18.go | 149 - vendor/github.com/lib/pq/connector.go | 115 - vendor/github.com/lib/pq/copy.go | 307 - vendor/github.com/lib/pq/doc.go | 268 - vendor/github.com/lib/pq/encode.go | 622 -- vendor/github.com/lib/pq/error.go | 515 - vendor/github.com/lib/pq/krb.go | 27 - vendor/github.com/lib/pq/notice.go | 71 - vendor/github.com/lib/pq/notify.go | 858 -- vendor/github.com/lib/pq/oid/doc.go | 6 - vendor/github.com/lib/pq/oid/types.go | 343 - vendor/github.com/lib/pq/rows.go | 93 - vendor/github.com/lib/pq/scram/scram.go | 264 - vendor/github.com/lib/pq/ssl.go | 175 - vendor/github.com/lib/pq/ssl_permissions.go | 20 - vendor/github.com/lib/pq/ssl_windows.go | 9 - vendor/github.com/lib/pq/url.go | 76 - vendor/github.com/lib/pq/user_posix.go | 24 - vendor/github.com/lib/pq/user_windows.go | 27 - vendor/github.com/lib/pq/uuid.go | 23 - vendor/github.com/miekg/unbound/.travis.yml | 8 - vendor/github.com/miekg/unbound/README.md | 14 - vendor/github.com/miekg/unbound/dns.go | 87 - vendor/github.com/miekg/unbound/lookup.go | 164 - vendor/github.com/miekg/unbound/unbound.go | 386 - .../weppos/publicsuffix-go/LICENSE.txt | 21 - .../net/publicsuffix/publicsuffix.go | 39 - .../publicsuffix/publicsuffix.go | 544 - .../publicsuffix-go/publicsuffix/rules.go | 8847 ----------------- vendor/modules.txt | 18 - 73 files changed, 13 insertions(+), 21470 deletions(-) delete mode 100644 vendor/github.com/eggsampler/acme/v3/.gitignore delete mode 100644 vendor/github.com/eggsampler/acme/v3/.travis.yml delete mode 100644 vendor/github.com/eggsampler/acme/v3/LICENSE delete mode 100644 vendor/github.com/eggsampler/acme/v3/Makefile delete mode 100644 vendor/github.com/eggsampler/acme/v3/README.md delete mode 100644 vendor/github.com/eggsampler/acme/v3/THIRD-PARTY delete mode 100644 vendor/github.com/eggsampler/acme/v3/account.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/acme.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/authorization.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/autocert.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/certificate.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/challenge.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/jws.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/nonce.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/options.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/order.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/problem.go delete mode 100644 vendor/github.com/eggsampler/acme/v3/types.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/.gitignore delete mode 100644 vendor/github.com/letsdebug/letsdebug/.travis.yml delete mode 100644 vendor/github.com/letsdebug/letsdebug/Makefile delete mode 100644 vendor/github.com/letsdebug/letsdebug/README.md delete mode 100644 vendor/github.com/letsdebug/letsdebug/checker.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/context.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/dns01.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/dns_util.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/generic.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/http01.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/http_util.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/letsdebug.go delete mode 100644 vendor/github.com/letsdebug/letsdebug/problem.go delete mode 100644 vendor/github.com/lib/pq/.gitignore delete mode 100644 vendor/github.com/lib/pq/.travis.sh delete mode 100644 vendor/github.com/lib/pq/.travis.yml delete mode 100644 vendor/github.com/lib/pq/LICENSE.md delete mode 100644 vendor/github.com/lib/pq/README.md delete mode 100644 vendor/github.com/lib/pq/TESTS.md delete mode 100644 vendor/github.com/lib/pq/array.go delete mode 100644 vendor/github.com/lib/pq/buf.go delete mode 100644 vendor/github.com/lib/pq/conn.go delete mode 100644 vendor/github.com/lib/pq/conn_go18.go delete mode 100644 vendor/github.com/lib/pq/connector.go delete mode 100644 vendor/github.com/lib/pq/copy.go delete mode 100644 vendor/github.com/lib/pq/doc.go delete mode 100644 vendor/github.com/lib/pq/encode.go delete mode 100644 vendor/github.com/lib/pq/error.go delete mode 100644 vendor/github.com/lib/pq/krb.go delete mode 100644 vendor/github.com/lib/pq/notice.go delete mode 100644 vendor/github.com/lib/pq/notify.go delete mode 100644 vendor/github.com/lib/pq/oid/doc.go delete mode 100644 vendor/github.com/lib/pq/oid/types.go delete mode 100644 vendor/github.com/lib/pq/rows.go delete mode 100644 vendor/github.com/lib/pq/scram/scram.go delete mode 100644 vendor/github.com/lib/pq/ssl.go delete mode 100644 vendor/github.com/lib/pq/ssl_permissions.go delete mode 100644 vendor/github.com/lib/pq/ssl_windows.go delete mode 100644 vendor/github.com/lib/pq/url.go delete mode 100644 vendor/github.com/lib/pq/user_posix.go delete mode 100644 vendor/github.com/lib/pq/user_windows.go delete mode 100644 vendor/github.com/lib/pq/uuid.go delete mode 100644 vendor/github.com/miekg/unbound/.travis.yml delete mode 100644 vendor/github.com/miekg/unbound/README.md delete mode 100644 vendor/github.com/miekg/unbound/dns.go delete mode 100644 vendor/github.com/miekg/unbound/lookup.go delete mode 100644 vendor/github.com/miekg/unbound/unbound.go delete mode 100644 vendor/github.com/weppos/publicsuffix-go/LICENSE.txt delete mode 100644 vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go delete mode 100644 vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go delete mode 100644 vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go diff --git a/app/api/api.go b/app/api/api.go index 5cd52e35..a1062800 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -39,7 +39,6 @@ import ( "github.com/datarhei/core/v16/update" "github.com/caddyserver/certmagic" - "github.com/letsdebug/letsdebug" "go.uber.org/zap" ) @@ -719,19 +718,19 @@ func (a *api) start() error { if err != nil { logger.Error().WithField("error", err).Log("Failed to acquire certificate") certerror = true - - problems, err := letsdebug.Check(host, letsdebug.HTTP01) - if err != nil { - logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition") - } - - for _, p := range problems { - logger.Error().WithFields(log.Fields{ - "name": p.Name, - "detail": p.Detail, - }).Log(p.Explanation) - } - + /* + problems, err := letsdebug.Check(host, letsdebug.HTTP01) + if err != nil { + logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition") + } + + for _, p := range problems { + logger.Error().WithFields(log.Fields{ + "name": p.Name, + "detail": p.Detail, + }).Log(p.Explanation) + } + */ break } diff --git a/go.mod b/go.mod index c0984c44..c5e49d28 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.4.0 github.com/labstack/echo/v4 v4.9.1 - github.com/letsdebug/letsdebug v1.6.1 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.16 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 @@ -39,7 +38,6 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/eggsampler/acme/v3 v3.1.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -56,7 +54,6 @@ require ( github.com/klauspost/cpuid/v2 v2.1.2 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect - github.com/lib/pq v1.8.0 // indirect github.com/libdns/libdns v0.2.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -64,7 +61,6 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mholt/acmez v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect - github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect @@ -78,7 +74,6 @@ require ( github.com/urfave/cli/v2 v2.8.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - github.com/weppos/publicsuffix-go v0.13.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect diff --git a/go.sum b/go.sum index a845dca5..8712f523 100644 --- a/go.sum +++ b/go.sum @@ -40,7 +40,6 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= @@ -90,19 +89,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/eggsampler/acme/v3 v3.1.1 h1:hSze1Cw4bHtCUdiQE2R0GKfXjAuLirSFPUX1IBz9wKw= -github.com/eggsampler/acme/v3 v3.1.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -137,7 +128,6 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -146,7 +136,6 @@ github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keL github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -217,7 +206,6 @@ github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -230,7 +218,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= @@ -256,11 +243,6 @@ github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8 github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/letsdebug/letsdebug v1.6.1 h1:ef4qwhKAXbyoLB2jGWsIWeI245UjyDYvOgenwr/pblA= -github.com/letsdebug/letsdebug v1.6.1/go.mod h1:Bl1mFMHJqyTb3kzsznBpfTpcQLKaChV7xCsWEIdA2Ew= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40= github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= @@ -281,17 +263,13 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80= github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba h1:RHTbLjrNIt6k3R4Aq2Q9KNBwFw8rZcZuoJVASoeB6Es= -github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba/go.mod h1:lGLaihw972wB1AFBO88/Q69nOTzLqG/qR/uSp2YBLgM= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -303,7 +281,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -361,7 +338,6 @@ github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZ github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -402,8 +378,6 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4= github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= -github.com/weppos/publicsuffix-go v0.13.0 h1:0Tu1uzLBd1jPn4k6OnMmOPZH/l/9bj9kUOMMkoRs6Gg= -github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -499,7 +473,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -514,7 +487,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -562,7 +534,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -582,7 +553,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -648,7 +618,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -763,7 +732,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/vendor/github.com/eggsampler/acme/v3/.gitignore b/vendor/github.com/eggsampler/acme/v3/.gitignore deleted file mode 100644 index 236968e6..00000000 --- a/vendor/github.com/eggsampler/acme/v3/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea/ -*.out -coverage* diff --git a/vendor/github.com/eggsampler/acme/v3/.travis.yml b/vendor/github.com/eggsampler/acme/v3/.travis.yml deleted file mode 100644 index 900ffd6d..00000000 --- a/vendor/github.com/eggsampler/acme/v3/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -language: go - -go: - - "1.11" - - "1.x" - -env: - - GO111MODULE=on - -sudo: required - -services: - - docker - -before_install: - - GO111MODULE=off go get github.com/mattn/goveralls - -script: - - unset TRAVIS_GO_VERSION - # test the examples first - - make clean examples - # test pebble integration - - make clean pebble - # test boulder integration - - make clean boulder - - goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/eggsampler/acme/v3/LICENSE b/vendor/github.com/eggsampler/acme/v3/LICENSE deleted file mode 100644 index b9a4b365..00000000 --- a/vendor/github.com/eggsampler/acme/v3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Isaac - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/eggsampler/acme/v3/Makefile b/vendor/github.com/eggsampler/acme/v3/Makefile deleted file mode 100644 index aebecc9a..00000000 --- a/vendor/github.com/eggsampler/acme/v3/Makefile +++ /dev/null @@ -1,66 +0,0 @@ - -.PHONY: test examples clean test_full pebble pebble_setup pebble_start pebble_wait pebble_stop boulder boulder_setup boulder_start boulder_stop - - -GOPATH ?= $(HOME)/go -BOULDER_PATH ?= $(GOPATH)/src/github.com/letsencrypt/boulder -PEBBLE_PATH ?= $(GOPATH)/src/github.com/letsencrypt/pebble -TEST_PATH ?= github.com/eggsampler/acme/v3 - - -# tests the code against a running ca instance -test: - -go clean -testcache - go test -v -race -coverprofile=coverage.out -covermode=atomic $(TEST_PATH) - -examples: - go build -o /dev/null examples/certbot/certbot.go - go build -o /dev/null examples/autocert/autocert.go - -clean: - rm -f coverage.out - -test_full: clean examples pebble pebble_stop boulder boulder_stop - - -pebble: pebble_setup pebble_start pebble_wait test pebble_stop - -pebble_setup: - mkdir -p $(PEBBLE_PATH) - git clone --depth 1 https://github.com/letsencrypt/pebble.git $(PEBBLE_PATH) \ - || (cd $(PEBBLE_PATH); git checkout -f master && git reset --hard HEAD && git pull -q) - docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down - -# runs an instance of pebble using docker -pebble_start: - docker-compose -f $(PEBBLE_PATH)/docker-compose.yml up -d - -# waits until pebble responds -pebble_wait: - while ! wget --delete-after -q --no-check-certificate "https://localhost:14000/dir" ; do sleep 1 ; done - -# stops the running pebble instance -pebble_stop: - docker-compose -f $(PEBBLE_PATH)/docker-compose.yml down - - -boulder: boulder_setup boulder_start boulder_wait test boulder_stop - -# NB: this edits docker-compose.yml -boulder_setup: - mkdir -p $(BOULDER_PATH) - git clone --depth 1 https://github.com/letsencrypt/boulder.git $(BOULDER_PATH) \ - || (cd $(BOULDER_PATH); git checkout -f master && git reset --hard HEAD && git pull -q) - docker-compose -f $(BOULDER_PATH)/docker-compose.yml down - -# runs an instance of boulder -boulder_start: - docker-compose -f $(BOULDER_PATH)/docker-compose.yml up -d - -# waits until boulder responds -boulder_wait: - while ! wget --delete-after -q --no-check-certificate "http://localhost:4001/directory" ; do sleep 1 ; done - -# stops the running docker instance -boulder_stop: - docker-compose -f $(BOULDER_PATH)/docker-compose.yml down diff --git a/vendor/github.com/eggsampler/acme/v3/README.md b/vendor/github.com/eggsampler/acme/v3/README.md deleted file mode 100644 index 389f3b80..00000000 --- a/vendor/github.com/eggsampler/acme/v3/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# eggsampler/acme - -[![GoDoc](https://godoc.org/github.com/eggsampler/acme?status.svg)](https://godoc.org/github.com/eggsampler/acme) -[![Build Status](https://travis-ci.com/eggsampler/acme.svg?branch=master)](https://travis-ci.com/eggsampler/acme) -[![Coverage Status](https://coveralls.io/repos/github/eggsampler/acme/badge.svg?branch=master)](https://coveralls.io/github/eggsampler/acme?branch=master) - -## About - -`eggsampler/acme` is a Go client library implementation for [RFC8555](https://tools.ietf.org/html/rfc8555) (previously ACME v2), specifically for use with the [Let's Encrypt](https://letsencrypt.org/)™ service. - -The library is designed to provide a zero external dependency wrapper over exposed directory endpoints and provide objects in easy to use structures. - -## Requirements - -A Go version of at least 1.11 is required as this repository is designed to be imported as a Go module. - -## Usage - -Simply import the module into a project, - -```go -import "github.com/eggsampler/acme/v3" -``` - -Note the `/v3` major version at the end. Due to the way modules function, this is the major version as represented in the `go.mod` file and latest git repo [semver](https://semver.org/) tag. -All functions are still exported and called using the `acme` package name. - -## Examples - -A simple [certbot](https://certbot.eff.org/)-like example is provided in the examples/certbot directory. -This code demonstrates account registration, new order submission, fulfilling challenges, finalising an order and fetching the issued certificate chain. - -An example of how to use the autocert package is also provided in examples/autocert. - -## Tests - -The tests can be run against an instance of [boulder](https://github.com/letsencrypt/boulder) or [pebble](https://github.com/letsencrypt/pebble). - -Challenge fulfilment is designed to use the new `challtestsrv` server present inside boulder and pebble which responds to dns queries and challenges as required. - -To run tests against an already running instance of boulder or pebble, use the `test` target in the Makefile. - -Some convenience targets for launching pebble/boulder using their respective docker compose files have also been included in the Makefile. diff --git a/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY b/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY deleted file mode 100644 index 1c53d754..00000000 --- a/vendor/github.com/eggsampler/acme/v3/THIRD-PARTY +++ /dev/null @@ -1,35 +0,0 @@ -This document contains Third Party Software Notices and/or Additional Terms and Conditions for licensed third party software components included within this product. - -== - -https://github.com/golang/crypto/blob/master/acme/jws.go -https://github.com/golang/crypto/blob/master/acme/jws_test.go -(with modifications) - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/eggsampler/acme/v3/account.go b/vendor/github.com/eggsampler/acme/v3/account.go deleted file mode 100644 index 2dbd30a5..00000000 --- a/vendor/github.com/eggsampler/acme/v3/account.go +++ /dev/null @@ -1,128 +0,0 @@ -package acme - -import ( - "crypto" - "encoding/json" - "errors" - "fmt" - "net/http" - "reflect" -) - -// NewAccount registers a new account with the acme service -func (c Client) NewAccount(privateKey crypto.Signer, onlyReturnExisting, termsOfServiceAgreed bool, contact ...string) (Account, error) { - newAccountReq := struct { - OnlyReturnExisting bool `json:"onlyReturnExisting"` - TermsOfServiceAgreed bool `json:"termsOfServiceAgreed"` - Contact []string `json:"contact,omitempty"` - }{ - OnlyReturnExisting: onlyReturnExisting, - TermsOfServiceAgreed: termsOfServiceAgreed, - Contact: contact, - } - - account := Account{} - resp, err := c.post(c.dir.NewAccount, "", privateKey, newAccountReq, &account, http.StatusOK, http.StatusCreated) - if err != nil { - return account, err - } - - account.URL = resp.Header.Get("Location") - account.PrivateKey = privateKey - - if account.Thumbprint == "" { - account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public()) - if err != nil { - return account, fmt.Errorf("acme: error computing account thumbprint: %v", err) - } - } - - return account, nil -} - -// UpdateAccount updates an existing account with the acme service. -func (c Client) UpdateAccount(account Account, contact ...string) (Account, error) { - var updateAccountReq interface{} - - if !reflect.DeepEqual(account.Contact, contact) { - // Only provide a non-nil updateAccountReq when there is an update to be made. - updateAccountReq = struct { - Contact []string `json:"contact,omitempty"` - }{ - Contact: contact, - } - } else { - // Otherwise use "" to trigger a POST-as-GET to fetch up-to-date account - // information from the acme service. - updateAccountReq = "" - } - - _, err := c.post(account.URL, account.URL, account.PrivateKey, updateAccountReq, &account, http.StatusOK) - if err != nil { - return account, err - } - - if account.Thumbprint == "" { - account.Thumbprint, err = JWKThumbprint(account.PrivateKey.Public()) - if err != nil { - return account, fmt.Errorf("acme: error computing account thumbprint: %v", err) - } - } - - return account, nil -} - -// AccountKeyChange rolls over an account to a new key. -func (c Client) AccountKeyChange(account Account, newPrivateKey crypto.Signer) (Account, error) { - oldJwkKeyPub, err := jwkEncode(account.PrivateKey.Public()) - if err != nil { - return account, fmt.Errorf("acme: error encoding new private key: %v", err) - } - - keyChangeReq := struct { - Account string `json:"account"` - OldKey json.RawMessage `json:"oldKey"` - }{ - Account: account.URL, - OldKey: []byte(oldJwkKeyPub), - } - - innerJws, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange) - if err != nil { - return account, fmt.Errorf("acme: error encoding inner jws: %v", err) - } - - if _, err := c.post(c.dir.KeyChange, account.URL, account.PrivateKey, json.RawMessage(innerJws), nil, http.StatusOK); err != nil { - return account, err - } - - account.PrivateKey = newPrivateKey - - return account, nil -} - -// DeactivateAccount deactivates a given account. -func (c Client) DeactivateAccount(account Account) (Account, error) { - deactivateReq := struct { - Status string `json:"status"` - }{ - Status: "deactivated", - } - - _, err := c.post(account.URL, account.URL, account.PrivateKey, deactivateReq, &account, http.StatusOK) - - return account, err -} - -// FetchOrderList fetches a list of orders from the account url provided in the account Orders field -func (c Client) FetchOrderList(account Account) (OrderList, error) { - orderList := OrderList{} - - if account.Orders == "" { - return orderList, errors.New("no order list for account") - } - - _, err := c.post(account.Orders, account.URL, account.PrivateKey, "", &orderList, http.StatusOK) - - return orderList, err -} diff --git a/vendor/github.com/eggsampler/acme/v3/acme.go b/vendor/github.com/eggsampler/acme/v3/acme.go deleted file mode 100644 index d104b908..00000000 --- a/vendor/github.com/eggsampler/acme/v3/acme.go +++ /dev/null @@ -1,294 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" - "time" -) - -const ( - // LetsEncryptProduction holds the production directory url - LetsEncryptProduction = "https://acme-v02.api.letsencrypt.org/directory" - - // LetsEncryptStaging holds the staging directory url - LetsEncryptStaging = "https://acme-staging-v02.api.letsencrypt.org/directory" - - userAgentString = "eggsampler-acme/1.0 Go-http-client/1.1" -) - -// NewClient creates a new acme client given a valid directory url. -func NewClient(directoryURL string, options ...OptionFunc) (Client, error) { - // Set a default http timeout of 60 seconds, this can be overridden - // via an OptionFunc eg: acme.NewClient(url, WithHTTPTimeout(10 * time.Second)) - httpClient := &http.Client{ - Timeout: 60 * time.Second, - } - - acmeClient := Client{ - httpClient: httpClient, - nonces: &nonceStack{}, - retryCount: 5, - } - - acmeClient.dir.URL = directoryURL - - for _, opt := range options { - if err := opt(&acmeClient); err != nil { - return acmeClient, fmt.Errorf("acme: error setting option: %v", err) - } - } - - if _, err := acmeClient.get(directoryURL, &acmeClient.dir, http.StatusOK); err != nil { - return acmeClient, err - } - - return acmeClient, nil -} - -// The directory object returned by the client connecting to a directory url. -func (c Client) Directory() Directory { - return c.dir -} - -// Helper function to get the poll interval and poll timeout, defaulting if 0 -func (c Client) getPollingDurations() (time.Duration, time.Duration) { - pollInterval := c.PollInterval - if pollInterval == 0 { - pollInterval = 500 * time.Millisecond - } - pollTimeout := c.PollTimeout - if pollTimeout == 0 { - pollTimeout = 30 * time.Second - } - return pollInterval, pollTimeout -} - -// Helper function to have a central point for performing http requests. -// Stores any returned nonces in the stack. -func (c Client) do(req *http.Request, addNonce bool) (*http.Response, error) { - // identifier for this client, as well as the default go user agent - if c.userAgentSuffix != "" { - req.Header.Set("User-Agent", userAgentString+" "+c.userAgentSuffix) - } else { - req.Header.Set("User-Agent", userAgentString) - } - - if c.acceptLanguage != "" { - req.Header.Set("Accept-Language", c.acceptLanguage) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return resp, err - } - - if addNonce { - c.nonces.push(resp.Header.Get("Replay-Nonce")) - } - - return resp, nil -} - -// Helper function to perform an http get request and read the body. -func (c Client) getRaw(url string, expectedStatus ...int) (*http.Response, []byte, error) { - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, nil, fmt.Errorf("acme: error creating request: %v", err) - } - - resp, err := c.do(req, true) - if err != nil { - return resp, nil, fmt.Errorf("acme: error fetching response: %v", err) - } - defer resp.Body.Close() - - if err := checkError(resp, expectedStatus...); err != nil { - return resp, nil, err - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, body, fmt.Errorf("acme: error reading response body: %v", err) - } - - return resp, body, nil -} - -// Helper function for performing a http get on an acme resource. -func (c Client) get(url string, out interface{}, expectedStatus ...int) (*http.Response, error) { - resp, body, err := c.getRaw(url, expectedStatus...) - if err != nil { - return resp, err - } - - if len(body) > 0 && out != nil { - if err := json.Unmarshal(body, out); err != nil { - return resp, fmt.Errorf("acme: error parsing response body: %v", err) - } - } - - return resp, nil -} - -func (c Client) nonce() (string, error) { - nonce := c.nonces.pop() - if nonce != "" { - return nonce, nil - } - - if c.dir.NewNonce == "" { - return "", errors.New("acme: no new nonce url") - } - - req, err := http.NewRequest("HEAD", c.dir.NewNonce, nil) - if err != nil { - return "", fmt.Errorf("acme: error creating new nonce request: %v", err) - } - - resp, err := c.do(req, false) - if err != nil { - return "", fmt.Errorf("acme: error fetching new nonce: %v", err) - } - - nonce = resp.Header.Get("Replay-Nonce") - return nonce, nil -} - -// Helper function to perform an http post request and read the body. -// Will attempt to retry if error is badNonce -func (c Client) postRaw(retryCount int, requestURL, kid string, privateKey crypto.Signer, payload interface{}, expectedStatus []int) (*http.Response, []byte, error) { - nonce, err := c.nonce() - if err != nil { - return nil, nil, err - } - - data, err := jwsEncodeJSON(payload, privateKey, keyID(kid), nonce, requestURL) - if err != nil { - return nil, nil, fmt.Errorf("acme: error encoding json payload: %v", err) - } - - req, err := http.NewRequest(http.MethodPost, requestURL, bytes.NewReader(data)) - if err != nil { - return nil, nil, fmt.Errorf("acme: error creating request: %v", err) - } - req.Header.Set("Content-Type", "application/jose+json") - - resp, err := c.do(req, true) - if err != nil { - return resp, nil, fmt.Errorf("acme: error sending request: %v", err) - } - defer resp.Body.Close() - - if err := checkError(resp, expectedStatus...); err != nil { - prob, ok := err.(Problem) - if !ok { - // don't retry for an error we don't know about - return resp, nil, err - } - if retryCount >= c.retryCount { - // don't attempt to retry if too many retries - return resp, nil, err - } - if strings.HasSuffix(prob.Type, ":badNonce") { - // only retry if error is badNonce - return c.postRaw(retryCount+1, requestURL, kid, privateKey, payload, expectedStatus) - } - return resp, nil, err - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, body, fmt.Errorf("acme: error reading response body: %v", err) - } - - return resp, body, nil -} - -// Helper function for performing a http post to an acme resource. -func (c Client) post(requestURL, keyID string, privateKey crypto.Signer, payload interface{}, out interface{}, expectedStatus ...int) (*http.Response, error) { - resp, body, err := c.postRaw(0, requestURL, keyID, privateKey, payload, expectedStatus) - if err != nil { - return resp, err - } - - if _, b := os.LookupEnv("ACME_DEBUG_POST"); b { - fmt.Println() - fmt.Println(string(body)) - fmt.Println() - } - - if len(body) > 0 && out != nil { - if err := json.Unmarshal(body, out); err != nil { - return resp, fmt.Errorf("acme: error parsing response: %v - %s", err, string(body)) - } - } - - return resp, nil -} - -var regLink = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`) - -// Fetches a http Link header from a http response -func fetchLink(resp *http.Response, wantedLink string) string { - if resp == nil { - return "" - } - linkHeader := resp.Header["Link"] - if len(linkHeader) == 0 { - return "" - } - for _, l := range linkHeader { - matches := regLink.FindAllStringSubmatch(l, -1) - for _, m := range matches { - if len(m) != 3 { - continue - } - if m[2] == wantedLink { - return m[1] - } - } - } - return "" -} - -// FetchRaw is a helper function to assist with POST-AS-GET requests -func (c Client) Fetch(account Account, requestURL string, result interface{}, expectedStatus ...int) error { - if len(expectedStatus) == 0 { - expectedStatus = []int{http.StatusOK} - } - _, err := c.post(requestURL, account.URL, account.PrivateKey, "", result, expectedStatus...) - - return err -} - -// Fetches all http Link header from a http response -func fetchLinks(resp *http.Response, wantedLink string) []string { - if resp == nil { - return nil - } - linkHeader := resp.Header["Link"] - if len(linkHeader) == 0 { - return nil - } - var links []string - for _, l := range linkHeader { - matches := regLink.FindAllStringSubmatch(l, -1) - for _, m := range matches { - if len(m) != 3 { - continue - } - if m[2] == wantedLink { - links = append(links, m[1]) - } - } - } - return links -} diff --git a/vendor/github.com/eggsampler/acme/v3/authorization.go b/vendor/github.com/eggsampler/acme/v3/authorization.go deleted file mode 100644 index 09d5906d..00000000 --- a/vendor/github.com/eggsampler/acme/v3/authorization.go +++ /dev/null @@ -1,43 +0,0 @@ -package acme - -import "net/http" - -// FetchAuthorization fetches an authorization from an authorization url provided in an order. -func (c Client) FetchAuthorization(account Account, authURL string) (Authorization, error) { - authResp := Authorization{} - _, err := c.post(authURL, account.URL, account.PrivateKey, "", &authResp, http.StatusOK) - if err != nil { - return authResp, err - } - - for i := 0; i < len(authResp.Challenges); i++ { - if authResp.Challenges[i].KeyAuthorization == "" { - authResp.Challenges[i].KeyAuthorization = authResp.Challenges[i].Token + "." + account.Thumbprint - } - } - - authResp.ChallengeMap = map[string]Challenge{} - authResp.ChallengeTypes = []string{} - for _, c := range authResp.Challenges { - authResp.ChallengeMap[c.Type] = c - authResp.ChallengeTypes = append(authResp.ChallengeTypes, c.Type) - } - - authResp.URL = authURL - - return authResp, nil -} - -// DeactivateAuthorization deactivate a provided authorization url from an order. -func (c Client) DeactivateAuthorization(account Account, authURL string) (Authorization, error) { - deactivateReq := struct { - Status string `json:"status"` - }{ - Status: "deactivated", - } - deactivateResp := Authorization{} - - _, err := c.post(authURL, account.URL, account.PrivateKey, deactivateReq, &deactivateResp, http.StatusOK) - - return deactivateResp, err -} diff --git a/vendor/github.com/eggsampler/acme/v3/autocert.go b/vendor/github.com/eggsampler/acme/v3/autocert.go deleted file mode 100644 index eea65ec3..00000000 --- a/vendor/github.com/eggsampler/acme/v3/autocert.go +++ /dev/null @@ -1,430 +0,0 @@ -package acme - -// Similar to golang.org/x/crypto/acme/autocert - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "path" - "strings" - "sync" -) - -// HostCheck function prototype to implement for checking hosts against before issuing certificates -type HostCheck func(host string) error - -// WhitelistHosts implements a simple whitelist HostCheck -func WhitelistHosts(hosts ...string) HostCheck { - m := map[string]bool{} - for _, v := range hosts { - m[v] = true - } - - return func(host string) error { - if !m[host] { - return errors.New("autocert: host not whitelisted") - } - return nil - } -} - -// AutoCert is a stateful certificate manager for issuing certificates on connecting hosts -type AutoCert struct { - // Acme directory Url - // If nil, uses `LetsEncryptStaging` - DirectoryURL string - - // Options contains the options used for creating the acme client - Options []OptionFunc - - // A function to check whether a host is allowed or not - // If nil, all hosts allowed - // Use `WhitelistHosts(hosts ...string)` for a simple white list of hostnames - HostCheck HostCheck - - // Cache dir to store account data and certificates - // If nil, does not write cache data to file - CacheDir string - - // When using a staging environment, include a root certificate for verification purposes - RootCert string - - // Called before updating challenges - PreUpdateChallengeHook func(Account, Challenge) - - // Mapping of token -> keyauth - // Protected by a mutex, but not rwmutex because tokens are deleted once read - tokensLock sync.RWMutex - tokens map[string][]byte - - // Mapping of cache key -> value - cacheLock sync.Mutex - cache map[string][]byte - - // read lock around getting existing certs - // write lock around issuing new certificate - certLock sync.RWMutex - - client Client -} - -// HTTPHandler Wraps a handler and provides serving of http-01 challenge tokens from /.well-known/acme-challenge/ -// If handler is nil, will redirect all traffic otherwise to https -func (m *AutoCert) HTTPHandler(handler http.Handler) http.Handler { - if handler == nil { - handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "https://"+r.Host+r.URL.RequestURI(), http.StatusMovedPermanently) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { - handler.ServeHTTP(w, r) - return - } - - if err := m.checkHost(r.Host); err != nil { - http.Error(w, err.Error(), http.StatusForbidden) - return - } - - token := path.Base(r.URL.Path) - m.tokensLock.RLock() - defer m.tokensLock.RUnlock() - keyAuth := m.tokens[token] - if len(keyAuth) == 0 { - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - _, _ = w.Write(keyAuth) - }) -} - -// GetCertificate implements a tls.Config.GetCertificate hook -func (m *AutoCert) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - name := strings.TrimSuffix(hello.ServerName, ".") - - if name == "" { - return nil, errors.New("autocert: missing server name") - } - if !strings.Contains(strings.Trim(name, "."), ".") { - return nil, errors.New("autocert: server name component count invalid") - } - if strings.ContainsAny(name, `/\`) { - return nil, errors.New("autocert: server name contains invalid character") - } - - // check the hostname is allowed - if err := m.checkHost(name); err != nil { - return nil, err - } - - // check if there's an existing cert - m.certLock.RLock() - existingCert := m.getExistingCert(name) - m.certLock.RUnlock() - if existingCert != nil { - return existingCert, nil - } - - // if not, attempt to issue a new cert - m.certLock.Lock() - defer m.certLock.Unlock() - return m.issueCert(name) -} - -func (m *AutoCert) getDirectoryURL() string { - if m.DirectoryURL != "" { - return m.DirectoryURL - } - - return LetsEncryptStaging -} - -func (m *AutoCert) getCache(keys ...string) []byte { - key := strings.Join(keys, "-") - - m.cacheLock.Lock() - defer m.cacheLock.Unlock() - - b := m.cache[key] - if len(b) > 0 { - return b - } - - if m.CacheDir == "" { - return nil - } - - b, _ = ioutil.ReadFile(path.Join(m.CacheDir, key)) - if len(b) == 0 { - return nil - } - - if m.cache == nil { - m.cache = map[string][]byte{} - } - m.cache[key] = b - return b -} - -func (m *AutoCert) putCache(data []byte, keys ...string) context.Context { - ctx, cancel := context.WithCancel(context.Background()) - - key := strings.Join(keys, "-") - - m.cacheLock.Lock() - defer m.cacheLock.Unlock() - - if m.cache == nil { - m.cache = map[string][]byte{} - } - m.cache[key] = data - - if m.CacheDir == "" { - cancel() - return ctx - } - - go func() { - _ = ioutil.WriteFile(path.Join(m.CacheDir, key), data, 0700) - cancel() - }() - - return ctx -} - -func (m *AutoCert) checkHost(name string) error { - if m.HostCheck == nil { - return nil - } - return m.HostCheck(name) -} - -func (m *AutoCert) getExistingCert(name string) *tls.Certificate { - // check for a stored cert - certData := m.getCache("cert", name) - if len(certData) == 0 { - // no cert - return nil - } - - privBlock, pubData := pem.Decode(certData) - if len(pubData) == 0 { - // no public key data (cert/issuer), ignore - return nil - } - - // decode pub chain - var pubDER [][]byte - var pub []byte - for len(pubData) > 0 { - var b *pem.Block - b, pubData = pem.Decode(pubData) - if b == nil { - break - } - pubDER = append(pubDER, b.Bytes) - pub = append(pub, b.Bytes...) - } - if len(pubData) > 0 { - // leftover data in file - possibly corrupt, ignore - return nil - } - - certs, err := x509.ParseCertificates(pub) - if err != nil { - // bad certificates, ignore - return nil - } - - leaf := certs[0] - - // add any intermediate certs if present - var intermediates *x509.CertPool - if len(certs) > 1 { - intermediates = x509.NewCertPool() - for i := 1; i < len(certs); i++ { - intermediates.AddCert(certs[i]) - } - } - - // add a root certificate if present - var roots *x509.CertPool - if m.RootCert != "" { - roots = x509.NewCertPool() - rootBlock, _ := pem.Decode([]byte(m.RootCert)) - rootCert, err := x509.ParseCertificate(rootBlock.Bytes) - if err != nil { - return nil - } - roots.AddCert(rootCert) - } - - if _, err := leaf.Verify(x509.VerifyOptions{DNSName: name, Intermediates: intermediates, Roots: roots}); err != nil { - // invalid certificates , ignore - return nil - } - - privKey, err := x509.ParseECPrivateKey(privBlock.Bytes) - if err != nil { - // invalid private key, ignore - return nil - } - - return &tls.Certificate{ - Certificate: pubDER, - PrivateKey: privKey, - Leaf: leaf, - } -} - -func (m *AutoCert) issueCert(domainName string) (*tls.Certificate, error) { - // attempt to load an existing account key - var privKey *ecdsa.PrivateKey - if keyData := m.getCache("account"); len(keyData) > 0 { - block, _ := pem.Decode(keyData) - x509Encoded := block.Bytes - privKey, _ = x509.ParseECPrivateKey(x509Encoded) - } - - // otherwise generate a new one - if privKey == nil { - var err error - privKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, fmt.Errorf("autocert: error generating new account key: %v", err) - } - - x509Encoded, _ := x509.MarshalECPrivateKey(privKey) - pemEncoded := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: x509Encoded}) - - m.putCache(pemEncoded, "account") - } - - // create a new client if one doesn't exist - if m.client.Directory().URL == "" { - var err error - m.client, err = NewClient(m.getDirectoryURL(), m.Options...) - if err != nil { - return nil, err - } - } - - // create/fetch acme account - account, err := m.client.NewAccount(privKey, false, true) - if err != nil { - return nil, fmt.Errorf("autocert: error creating/fetching account: %v", err) - } - - // start a new order process - order, err := m.client.NewOrderDomains(account, domainName) - if err != nil { - return nil, fmt.Errorf("autocert: error creating new order for domain %s: %v", domainName, err) - } - - // loop through each of the provided authorization Urls - for _, authURL := range order.Authorizations { - auth, err := m.client.FetchAuthorization(account, authURL) - if err != nil { - return nil, fmt.Errorf("autocert: error fetching authorization Url %q: %v", authURL, err) - } - - if auth.Status == "valid" { - continue - } - - chal, ok := auth.ChallengeMap[ChallengeTypeHTTP01] - if !ok { - return nil, fmt.Errorf("autocert: unable to find http-01 challenge for auth %s, Url: %s", auth.Identifier.Value, authURL) - } - - m.tokensLock.Lock() - if m.tokens == nil { - m.tokens = map[string][]byte{} - } - m.tokens[chal.Token] = []byte(chal.KeyAuthorization) - m.tokensLock.Unlock() - - if m.PreUpdateChallengeHook != nil { - m.PreUpdateChallengeHook(account, chal) - } - - chal, err = m.client.UpdateChallenge(account, chal) - if err != nil { - return nil, fmt.Errorf("autocert: error updating authorization %s challenge (Url: %s) : %v", auth.Identifier.Value, authURL, err) - } - - m.tokensLock.Lock() - delete(m.tokens, chal.Token) - m.tokensLock.Unlock() - } - - // generate private key for cert - certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, fmt.Errorf("autocert: error generating certificate key for %s: %v", domainName, err) - } - certKeyEnc, err := x509.MarshalECPrivateKey(certKey) - if err != nil { - return nil, fmt.Errorf("autocert: error encoding certificate key for %s: %v", domainName, err) - } - certKeyPem := pem.EncodeToMemory(&pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: certKeyEnc, - }) - - // create the new csr template - tpl := &x509.CertificateRequest{ - SignatureAlgorithm: x509.ECDSAWithSHA256, - PublicKeyAlgorithm: x509.ECDSA, - PublicKey: certKey.Public(), - Subject: pkix.Name{CommonName: domainName}, - DNSNames: []string{domainName}, - } - csrDer, err := x509.CreateCertificateRequest(rand.Reader, tpl, certKey) - if err != nil { - return nil, fmt.Errorf("autocert: error creating certificate request for %s: %v", domainName, err) - } - csr, err := x509.ParseCertificateRequest(csrDer) - if err != nil { - return nil, fmt.Errorf("autocert: error parsing certificate request for %s: %v", domainName, err) - } - - // finalize the order with the acme server given a csr - order, err = m.client.FinalizeOrder(account, order, csr) - if err != nil { - return nil, fmt.Errorf("autocert: error finalizing order for %s: %v", domainName, err) - } - - // fetch the certificate chain from the finalized order provided by the acme server - certs, err := m.client.FetchCertificates(account, order.Certificate) - if err != nil { - return nil, fmt.Errorf("autocert: error fetching order certificates for %s: %v", domainName, err) - } - - certPem := certKeyPem - // var certDer [][]byte - for _, c := range certs { - b := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: c.Raw, - }) - certPem = append(certPem, b...) - // certDer = append(certDer, c.Raw) - } - m.putCache(certPem, "cert", domainName) - - return m.getExistingCert(domainName), nil -} diff --git a/vendor/github.com/eggsampler/acme/v3/certificate.go b/vendor/github.com/eggsampler/acme/v3/certificate.go deleted file mode 100644 index 0791f67f..00000000 --- a/vendor/github.com/eggsampler/acme/v3/certificate.go +++ /dev/null @@ -1,106 +0,0 @@ -package acme - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "fmt" - "net/http" -) - -func (c Client) decodeCertificateChain(body []byte, resp *http.Response, account Account) ([]*x509.Certificate, error) { - var certs []*x509.Certificate - for { - var p *pem.Block - p, body = pem.Decode(body) - if p == nil { - break - } - cert, err := x509.ParseCertificate(p.Bytes) - if err != nil { - return certs, fmt.Errorf("acme: error parsing certificate: %v", err) - } - certs = append(certs, cert) - } - - up := fetchLink(resp, "up") - if up != "" { - upCerts, err := c.FetchCertificates(account, up) - if err != nil { - return certs, fmt.Errorf("acme: error fetching up cert: %v", err) - } - if len(upCerts) != 0 { - certs = append(certs, upCerts...) - } - } - - return certs, nil -} - -// FetchCertificates downloads a certificate chain from a url given in an order certificate. -func (c Client) FetchCertificates(account Account, certificateURL string) ([]*x509.Certificate, error) { - resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) - if err != nil { - return nil, err - } - - return c.decodeCertificateChain(body, resp, account) -} - -// FetchAllCertificates downloads a certificate chain from a url given in an order certificate, as well as any alternate certificates if provided. -// Returns a mapping of certificate urls to the certificate chain. -func (c Client) FetchAllCertificates(account Account, certificateURL string) (map[string][]*x509.Certificate, error) { - resp, body, err := c.postRaw(0, certificateURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) - if err != nil { - return nil, err - } - - certChain, err := c.decodeCertificateChain(body, resp, account) - if err != nil { - return nil, err - } - - certs := map[string][]*x509.Certificate{ - certificateURL: certChain, - } - - alternates := fetchLinks(resp, "alternate") - - for _, altURL := range alternates { - altResp, altBody, err := c.postRaw(0, altURL, account.URL, account.PrivateKey, "", []int{http.StatusOK}) - if err != nil { - return certs, fmt.Errorf("acme: error fetching alt cert chain at %q - %v", altURL, err) - } - altCertChain, err := c.decodeCertificateChain(altBody, altResp, account) - if err != nil { - return certs, fmt.Errorf("acme: error decoding alt cert chain at %q - %v", altURL, err) - } - certs[altURL] = altCertChain - } - - return certs, nil - -} - -// RevokeCertificate revokes a given certificate given the certificate key or account key, and a reason. -func (c Client) RevokeCertificate(account Account, cert *x509.Certificate, key crypto.Signer, reason int) error { - revokeReq := struct { - Certificate string `json:"certificate"` - Reason int `json:"reason"` - }{ - Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw), - Reason: reason, - } - - kid := "" - if key == account.PrivateKey { - kid = account.URL - } - - if _, err := c.post(c.dir.RevokeCert, kid, key, revokeReq, nil, http.StatusOK); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/eggsampler/acme/v3/challenge.go b/vendor/github.com/eggsampler/acme/v3/challenge.go deleted file mode 100644 index 6d57bfb9..00000000 --- a/vendor/github.com/eggsampler/acme/v3/challenge.go +++ /dev/null @@ -1,102 +0,0 @@ -package acme - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "net/http" - "time" -) - -// EncodeDNS01KeyAuthorization encodes a key authorization and provides a value to be put in the TXT record for the _acme-challenge DNS entry. -func EncodeDNS01KeyAuthorization(keyAuth string) string { - h := sha256.Sum256([]byte(keyAuth)) - return base64.RawURLEncoding.EncodeToString(h[:]) -} - -// Helper function to determine whether a challenge is "finished" by it's status. -func checkUpdatedChallengeStatus(challenge Challenge) (bool, error) { - switch challenge.Status { - case "pending": - // Challenge objects are created in the "pending" state. - // TODO: https://github.com/letsencrypt/boulder/issues/3346 - // return true, errors.New("acme: unexpected 'pending' challenge state") - return false, nil - - case "processing": - // They transition to the "processing" state when the client responds to the - // challenge and the server begins attempting to validate that the client has completed the challenge. - return false, nil - - case "valid": - // If validation is successful, the challenge moves to the "valid" state - return true, nil - - case "invalid": - // if there is an error, the challenge moves to the "invalid" state. - if challenge.Error.Type != "" { - return true, challenge.Error - } - return true, errors.New("acme: challenge is invalid, no error provided") - - default: - return true, fmt.Errorf("acme: unknown challenge status: %s", challenge.Status) - } -} - -// UpdateChallenge responds to a challenge to indicate to the server to complete the challenge. -func (c Client) UpdateChallenge(account Account, challenge Challenge) (Challenge, error) { - resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, struct{}{}, &challenge, http.StatusOK) - if err != nil { - return challenge, err - } - - if loc := resp.Header.Get("Location"); loc != "" { - challenge.URL = loc - } - challenge.AuthorizationURL = fetchLink(resp, "up") - - if finished, err := checkUpdatedChallengeStatus(challenge); finished { - return challenge, err - } - - pollInterval, pollTimeout := c.getPollingDurations() - end := time.Now().Add(pollTimeout) - for { - if time.Now().After(end) { - return challenge, errors.New("acme: challenge update timeout") - } - time.Sleep(pollInterval) - - resp, err := c.post(challenge.URL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK) - if err != nil { - // i don't think it's worth exiting the loop on this error - // it could just be connectivity issue that's resolved before the timeout duration - continue - } - - if loc := resp.Header.Get("Location"); loc != "" { - challenge.URL = loc - } - challenge.AuthorizationURL = fetchLink(resp, "up") - - if finished, err := checkUpdatedChallengeStatus(challenge); finished { - return challenge, err - } - } -} - -// FetchChallenge fetches an existing challenge from the given url. -func (c Client) FetchChallenge(account Account, challengeURL string) (Challenge, error) { - challenge := Challenge{} - resp, err := c.post(challengeURL, account.URL, account.PrivateKey, "", &challenge, http.StatusOK) - if err != nil { - return challenge, err - } - - challenge.URL = resp.Header.Get("Location") - challenge.AuthorizationURL = fetchLink(resp, "up") - - return challenge, nil -} diff --git a/vendor/github.com/eggsampler/acme/v3/jws.go b/vendor/github.com/eggsampler/acme/v3/jws.go deleted file mode 100644 index 9461d969..00000000 --- a/vendor/github.com/eggsampler/acme/v3/jws.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the THIRD-PARTY file. - -package acme - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - _ "crypto/sha512" // need for EC keys - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "math/big" -) - -var errUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") - -// keyID is the account identity provided by a CA during registration. -type keyID string - -// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID. -// See jwsEncodeJSON for details. -const noKeyID = keyID("") - -// noPayload indicates jwsEncodeJSON will encode zero-length octet string -// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make -// authenticated GET requests via POSTing with an empty payload. -// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details. -const noPayload = "" - -// jwsEncodeJSON signs claimset using provided key and a nonce. -// The result is serialized in JSON format containing either kid or jwk -// fields based on the provided keyID value. -// -// If kid is non-empty, its quoted value is inserted in the protected head -// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted -// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive. -// -// See https://tools.ietf.org/html/rfc7515#section-7. -func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) { - alg, sha := jwsHasher(key.Public()) - if alg == "" || !sha.Available() { - return nil, errUnsupportedKey - } - var phead string - switch kid { - case noKeyID: - jwk, err := jwkEncode(key.Public()) - if err != nil { - return nil, err - } - phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url) - default: - phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url) - } - phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) - var payload string - if claimset != noPayload { - cs, err := json.Marshal(claimset) - if err != nil { - return nil, err - } - payload = base64.RawURLEncoding.EncodeToString(cs) - } - hash := sha.New() - _, _ = hash.Write([]byte(phead + "." + payload)) - sig, err := jwsSign(key, sha, hash.Sum(nil)) - if err != nil { - return nil, err - } - - enc := struct { - Protected string `json:"protected"` - Payload string `json:"payload"` - Sig string `json:"signature"` - }{ - Protected: phead, - Payload: payload, - Sig: base64.RawURLEncoding.EncodeToString(sig), - } - return json.Marshal(&enc) -} - -// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. -// The result is also suitable for creating a JWK thumbprint. -// https://tools.ietf.org/html/rfc7517 -func jwkEncode(pub crypto.PublicKey) (string, error) { - switch pub := pub.(type) { - case *rsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.3.1 - n := pub.N - e := big.NewInt(int64(pub.E)) - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, - base64.RawURLEncoding.EncodeToString(e.Bytes()), - base64.RawURLEncoding.EncodeToString(n.Bytes()), - ), nil - case *ecdsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.2.1 - p := pub.Curve.Params() - n := p.BitSize / 8 - if p.BitSize%8 != 0 { - n++ - } - x := pub.X.Bytes() - if n > len(x) { - x = append(make([]byte, n-len(x)), x...) - } - y := pub.Y.Bytes() - if n > len(y) { - y = append(make([]byte, n-len(y)), y...) - } - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, - p.Name, - base64.RawURLEncoding.EncodeToString(x), - base64.RawURLEncoding.EncodeToString(y), - ), nil - } - return "", errUnsupportedKey -} - -// jwsSign signs the digest using the given key. -// The hash is unused for ECDSA keys. -// -// Note: non-stdlib crypto.Signer implementations are expected to return -// the signature in the format as specified in RFC7518. -// See https://tools.ietf.org/html/rfc7518 for more details. -func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { - if key, ok := key.(*ecdsa.PrivateKey); ok { - // The key.Sign method of ecdsa returns ASN1-encoded signature. - // So, we use the package Sign function instead - // to get R and S values directly and format the result accordingly. - r, s, err := ecdsa.Sign(rand.Reader, key, digest) - if err != nil { - return nil, err - } - rb, sb := r.Bytes(), s.Bytes() - size := key.Params().BitSize / 8 - if size%8 > 0 { - size++ - } - sig := make([]byte, size*2) - copy(sig[size-len(rb):], rb) - copy(sig[size*2-len(sb):], sb) - return sig, nil - } - return key.Sign(rand.Reader, digest, hash) -} - -// jwsHasher indicates suitable JWS algorithm name and a hash function -// to use for signing a digest with the provided key. -// It returns ("", 0) if the key is not supported. -func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { - switch pub := pub.(type) { - case *rsa.PublicKey: - return "RS256", crypto.SHA256 - case *ecdsa.PublicKey: - switch pub.Params().Name { - case "P-256": - return "ES256", crypto.SHA256 - case "P-384": - return "ES384", crypto.SHA384 - case "P-521": - return "ES512", crypto.SHA512 - } - } - return "", 0 -} - -// JWKThumbprint creates a JWK thumbprint out of pub -// as specified in https://tools.ietf.org/html/rfc7638. -func JWKThumbprint(pub crypto.PublicKey) (string, error) { - jwk, err := jwkEncode(pub) - if err != nil { - return "", err - } - b := sha256.Sum256([]byte(jwk)) - return base64.RawURLEncoding.EncodeToString(b[:]), nil -} diff --git a/vendor/github.com/eggsampler/acme/v3/nonce.go b/vendor/github.com/eggsampler/acme/v3/nonce.go deleted file mode 100644 index 2ef9aca7..00000000 --- a/vendor/github.com/eggsampler/acme/v3/nonce.go +++ /dev/null @@ -1,45 +0,0 @@ -package acme - -import ( - "sync" -) - -// Simple thread-safe stack impl -type nonceStack struct { - lock sync.Mutex - stack []string -} - -// Pushes a nonce to the stack. -// Doesn't push empty nonces, or if there's more than 100 nonces on the stack -func (ns *nonceStack) push(v string) { - if v == "" { - return - } - - ns.lock.Lock() - defer ns.lock.Unlock() - - if len(ns.stack) > 100 { - return - } - - ns.stack = append(ns.stack, v) -} - -// Pops a nonce from the stack. -// Returns empty string if there are no nonces -func (ns *nonceStack) pop() string { - ns.lock.Lock() - defer ns.lock.Unlock() - - n := len(ns.stack) - if n == 0 { - return "" - } - - v := ns.stack[n-1] - ns.stack = ns.stack[:n-1] - - return v -} diff --git a/vendor/github.com/eggsampler/acme/v3/options.go b/vendor/github.com/eggsampler/acme/v3/options.go deleted file mode 100644 index ff19867f..00000000 --- a/vendor/github.com/eggsampler/acme/v3/options.go +++ /dev/null @@ -1,70 +0,0 @@ -package acme - -import ( - "crypto/tls" - "errors" - "net/http" - "time" -) - -// OptionFunc function prototype for passing options to NewClient -type OptionFunc func(client *Client) error - -// WithHTTPTimeout sets a timeout on the http client used by the Client -func WithHTTPTimeout(duration time.Duration) OptionFunc { - return func(client *Client) error { - client.httpClient.Timeout = duration - return nil - } -} - -// WithInsecureSkipVerify sets InsecureSkipVerify on the http client transport tls client config used by the Client -func WithInsecureSkipVerify() OptionFunc { - return func(client *Client) error { - client.httpClient.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - return nil - } -} - -// WithUserAgentSuffix appends a user agent suffix for http requests to acme resources -func WithUserAgentSuffix(userAgentSuffix string) OptionFunc { - return func(client *Client) error { - client.userAgentSuffix = userAgentSuffix - return nil - } -} - -// WithAcceptLanguage sets an Accept-Language header on http requests -func WithAcceptLanguage(acceptLanguage string) OptionFunc { - return func(client *Client) error { - client.acceptLanguage = acceptLanguage - return nil - } -} - -// WithRetryCount sets the number of times the acme client retries when receiving an api error (eg, nonce failures, etc). -// Default: 5 -func WithRetryCount(retryCount int) OptionFunc { - return func(client *Client) error { - if retryCount < 1 { - return errors.New("retryCount must be > 0") - } - client.retryCount = retryCount - return nil - } -} - -// WithHTTPClient Allows setting a custom http client for acme connections -func WithHTTPClient(httpClient *http.Client) OptionFunc { - return func(client *Client) error { - if httpClient == nil { - return errors.New("client must not be nil") - } - client.httpClient = httpClient - return nil - } -} diff --git a/vendor/github.com/eggsampler/acme/v3/order.go b/vendor/github.com/eggsampler/acme/v3/order.go deleted file mode 100644 index 6604a137..00000000 --- a/vendor/github.com/eggsampler/acme/v3/order.go +++ /dev/null @@ -1,136 +0,0 @@ -package acme - -import ( - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "net/http" - "time" -) - -// NewOrder initiates a new order for a new certificate. -func (c Client) NewOrder(account Account, identifiers []Identifier) (Order, error) { - newOrderReq := struct { - Identifiers []Identifier `json:"identifiers"` - }{ - Identifiers: identifiers, - } - newOrderResp := Order{} - resp, err := c.post(c.dir.NewOrder, account.URL, account.PrivateKey, newOrderReq, &newOrderResp, http.StatusCreated) - if err != nil { - return newOrderResp, err - } - - newOrderResp.URL = resp.Header.Get("Location") - - return newOrderResp, nil -} - -// NewOrderDomains is a wrapper for NewOrder(AcmeAccount, []AcmeIdentifiers) -// Creates a dns identifier for each provided domain -func (c Client) NewOrderDomains(account Account, domains ...string) (Order, error) { - if len(domains) == 0 { - return Order{}, errors.New("acme: no domains provided") - } - - var ids []Identifier - for _, d := range domains { - ids = append(ids, Identifier{Type: "dns", Value: d}) - } - - return c.NewOrder(account, ids) -} - -// FetchOrder fetches an existing order given an order url. -func (c Client) FetchOrder(account Account, orderURL string) (Order, error) { - orderResp := Order{ - URL: orderURL, // boulder response doesn't seem to contain location header for this request - } - _, err := c.post(orderURL, account.URL, account.PrivateKey, "", &orderResp, http.StatusOK) - - return orderResp, err -} - -// Helper function to determine whether an order is "finished" by it's status. -func checkFinalizedOrderStatus(order Order) (bool, error) { - switch order.Status { - case "invalid": - // "invalid": The certificate will not be issued. Consider this - // order process abandoned. - if order.Error.Type != "" { - return true, order.Error - } - return true, errors.New("acme: finalized order is invalid, no error provided") - - case "pending": - // "pending": The server does not believe that the client has - // fulfilled the requirements. Check the "authorizations" array for - // entries that are still pending. - return true, errors.New("acme: authorizations not fulfilled") - - case "ready": - // "ready": The server agrees that the requirements have been - // fulfilled, and is awaiting finalization. Submit a finalization - // request. - return true, errors.New("acme: unexpected 'ready' state") - - case "processing": - // "processing": The certificate is being issued. Send a GET request - // after the time given in the "Retry-After" header field of the - // response, if any. - return false, nil - - case "valid": - // "valid": The server has issued the certificate and provisioned its - // URL to the "certificate" field of the order. Download the - // certificate. - return true, nil - - default: - return true, fmt.Errorf("acme: unknown order status: %s", order.Status) - } -} - -// FinalizeOrder indicates to the acme server that the client considers an order complete and "finalizes" it. -// If the server believes the authorizations have been filled successfully, a certificate should then be available. -// This function assumes that the order status is "ready". -func (c Client) FinalizeOrder(account Account, order Order, csr *x509.CertificateRequest) (Order, error) { - finaliseReq := struct { - Csr string `json:"csr"` - }{ - Csr: base64.RawURLEncoding.EncodeToString(csr.Raw), - } - - resp, err := c.post(order.Finalize, account.URL, account.PrivateKey, finaliseReq, &order, http.StatusOK) - if err != nil { - return order, err - } - - order.URL = resp.Header.Get("Location") - - if finished, err := checkFinalizedOrderStatus(order); finished { - return order, err - } - - pollInterval, pollTimeout := c.getPollingDurations() - end := time.Now().Add(pollTimeout) - for { - if time.Now().After(end) { - return order, errors.New("acme: finalized order timeout") - } - time.Sleep(pollInterval) - - if _, err := c.post(order.URL, account.URL, account.PrivateKey, "", &order, http.StatusOK); err != nil { - // i dont think it's worth exiting the loop on this error - // it could just be connectivity issue thats resolved before the timeout duration - continue - } - - order.URL = resp.Header.Get("Location") - - if finished, err := checkFinalizedOrderStatus(order); finished { - return order, err - } - } -} diff --git a/vendor/github.com/eggsampler/acme/v3/problem.go b/vendor/github.com/eggsampler/acme/v3/problem.go deleted file mode 100644 index 4c3ae0d5..00000000 --- a/vendor/github.com/eggsampler/acme/v3/problem.go +++ /dev/null @@ -1,65 +0,0 @@ -package acme - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" -) - -// Problem document as defined in, -// https://tools.ietf.org/html/rfc7807 - -// Problem represents an error returned by an acme server. -type Problem struct { - Type string `json:"type"` - Detail string `json:"detail,omitempty"` - Status int `json:"status,omitempty"` - Instance string `json:"instance,omitempty"` - SubProblems []SubProblem `json:"subproblems,omitempty"` -} - -type SubProblem struct { - Type string `json:"type"` - Detail string `json:"detail"` - Identifier Identifier `json:"identifier"` -} - -// Returns a human readable error string. -func (err Problem) Error() string { - s := fmt.Sprintf("acme: error code %d %q: %s", err.Status, err.Type, err.Detail) - if len(err.SubProblems) > 0 { - for _, v := range err.SubProblems { - s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail) - } - } - if err.Instance != "" { - s += ", url: " + err.Instance - } - return s -} - -// Helper function to determine if a response contains an expected status code, or otherwise an error object. -func checkError(resp *http.Response, expectedStatuses ...int) error { - for _, statusCode := range expectedStatuses { - if resp.StatusCode == statusCode { - return nil - } - } - - if resp.StatusCode < 400 || resp.StatusCode >= 600 { - return fmt.Errorf("acme: expected status codes: %d, got: %d %s", expectedStatuses, resp.StatusCode, resp.Status) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("acme: error reading error body: %v", err) - } - - acmeError := Problem{} - if err := json.Unmarshal(body, &acmeError); err != nil { - return fmt.Errorf("acme: parsing error body: %v - %s", err, string(body)) - } - - return acmeError -} diff --git a/vendor/github.com/eggsampler/acme/v3/types.go b/vendor/github.com/eggsampler/acme/v3/types.go deleted file mode 100644 index d15a59fe..00000000 --- a/vendor/github.com/eggsampler/acme/v3/types.go +++ /dev/null @@ -1,163 +0,0 @@ -package acme - -import ( - "crypto" - "net/http" - "time" -) - -// Different possible challenge types provided by an ACME server. -// See https://tools.ietf.org/html/rfc8555#section-9.7.8 -const ( - ChallengeTypeDNS01 = "dns-01" - ChallengeTypeHTTP01 = "http-01" - ChallengeTypeTLSALPN01 = "tls-alpn-01" - - // ChallengeTypeTLSSNI01 is deprecated and should not be used. - // See: https://community.letsencrypt.org/t/important-what-you-need-to-know-about-tls-sni-validation-issues/50811 - ChallengeTypeTLSSNI01 = "tls-sni-01" -) - -// Constants used for certificate revocation, used for RevokeCertificate -// See https://tools.ietf.org/html/rfc5280#section-5.3.1 -const ( - ReasonUnspecified = iota // 0 - ReasonKeyCompromise // 1 - ReasonCaCompromise // 2 - ReasonAffiliationChanged // 3 - ReasonSuperseded // 4 - ReasonCessationOfOperation // 5 - ReasonCertificateHold // 6 - _ // 7 - Unused - ReasonRemoveFromCRL // 8 - ReasonPrivilegeWithdrawn // 9 - ReasonAaCompromise // 10 -) - -// Directory object as returned from the client's directory url upon creation of client. -// See https://tools.ietf.org/html/rfc8555#section-7.1.1 -type Directory struct { - NewNonce string `json:"newNonce"` // url to new nonce endpoint - NewAccount string `json:"newAccount"` // url to new account endpoint - NewOrder string `json:"newOrder"` // url to new order endpoint - NewAuthz string `json:"newAuthz"` // url to new authz endpoint - RevokeCert string `json:"revokeCert"` // url to revoke cert endpoint - KeyChange string `json:"keyChange"` // url to key change endpoint - - // meta object containing directory metadata - Meta struct { - TermsOfService string `json:"termsOfService"` - Website string `json:"website"` - CaaIdentities []string `json:"caaIdentities"` - ExternalAccountRequired bool `json:"externalAccountRequired"` - } `json:"meta"` - - // Directory url provided when creating a new acme client. - URL string `json:"-"` -} - -// Client structure to interact with an ACME server. -// This is typically how most, if not all, of the communication between the client and server occurs. -type Client struct { - httpClient *http.Client - nonces *nonceStack - dir Directory - userAgentSuffix string - acceptLanguage string - retryCount int - - // The amount of total time the Client will wait at most for a challenge to be updated or a certificate to be issued. - // Default 30 seconds if duration is not set or if set to 0. - PollTimeout time.Duration - - // The time between checking if a challenge has been updated or a certificate has been issued. - // Default 0.5 seconds if duration is not set or if set to 0. - PollInterval time.Duration -} - -// Account structure representing fields in an account object. -// See https://tools.ietf.org/html/rfc8555#section-7.1.2 -// See also https://tools.ietf.org/html/rfc8555#section-9.7.1 -type Account struct { - Status string `json:"status"` - Contact []string `json:"contact"` - Orders string `json:"orders"` - - // Provided by the Location http header when creating a new account or fetching an existing account. - URL string `json:"-"` - - // The private key used to create or fetch the account. - // Not fetched from server. - PrivateKey crypto.Signer `json:"-"` - - // Thumbprint is the SHA-256 digest JWK_Thumbprint of the account key. - // See https://tools.ietf.org/html/rfc8555#section-8.1 - Thumbprint string `json:"-"` -} - -// Identifier object used in order and authorization objects -// See https://tools.ietf.org/html/rfc8555#section-7.1.4 -type Identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -// Order object returned when fetching or creating a new order. -// See https://tools.ietf.org/html/rfc8555#section-7.1.3 -type Order struct { - Status string `json:"status"` - Expires time.Time `json:"expires"` - Identifiers []Identifier `json:"identifiers"` - NotBefore time.Time `json:"notBefore"` - NotAfter time.Time `json:"notAfter"` - Error Problem `json:"error"` - Authorizations []string `json:"authorizations"` - Finalize string `json:"finalize"` - Certificate string `json:"certificate"` - - // URL for the order object. - // Provided by the rel="Location" Link http header - URL string `json:"-"` -} - -// Authorization object returned when fetching an authorization in an order. -// See https://tools.ietf.org/html/rfc8555#section-7.1.4 -type Authorization struct { - Identifier Identifier `json:"identifier"` - Status string `json:"status"` - Expires time.Time `json:"expires"` - Challenges []Challenge `json:"challenges"` - Wildcard bool `json:"wildcard"` - - // For convenience access to the provided challenges - ChallengeMap map[string]Challenge `json:"-"` - ChallengeTypes []string `json:"-"` - - URL string `json:"-"` -} - -// Challenge object fetched in an authorization or directly from the challenge url. -// See https://tools.ietf.org/html/rfc8555#section-7.1.5 -type Challenge struct { - Type string `json:"type"` - URL string `json:"url"` - Status string `json:"status"` - Validated string `json:"validated"` - Error Problem `json:"error"` - - // Based on the challenge used - Token string `json:"token"` - KeyAuthorization string `json:"keyAuthorization"` - - // Authorization url provided by the rel="up" Link http header - AuthorizationURL string `json:"-"` -} - -// OrderList of challenge objects. -type OrderList struct { - Orders []string `json:"orders"` - - // Order list pagination, url to next orders. - // Provided by the rel="next" Link http header - Next string `json:"-"` -} diff --git a/vendor/github.com/letsdebug/letsdebug/.gitignore b/vendor/github.com/letsdebug/letsdebug/.gitignore deleted file mode 100644 index 7d54ea20..00000000 --- a/vendor/github.com/letsdebug/letsdebug/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -vendor/ -.idea/ -acme-account.json -web/*_gen.go -letsdebug-server -letsdebug-cli diff --git a/vendor/github.com/letsdebug/letsdebug/.travis.yml b/vendor/github.com/letsdebug/letsdebug/.travis.yml deleted file mode 100644 index 27038e52..00000000 --- a/vendor/github.com/letsdebug/letsdebug/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -go: - - '1.15.x' -before_install: - - sudo apt-get update - - sudo apt-get -y install libunbound-dev make - - sudo mkdir -p $GOPATH/bin - - export PATH=$PATH:$GOPATH/bin -install: - - make deps -script: - - make letsdebug-server letsdebug-cli -deploy: - provider: releases - api_key: - secure: f65vxdzq7SoIooXNAPJaHEiCcnG1Q2R7muomm/5qWyRXgaXyBu6Yo0oOGQNcYLbs22PbRCVS8xnN2cSfFV5jeilRKuhpTYq0tforjJbtjL1DEs9ODyLZnXIXH+uacIPwM/ioxFbFVTnSCkZx90+9I+WHY0taqc2AW49RvQKPTzOmWYZ4ATsQxsv5jBLXZIuhhl3cEJayeogT2yToYump3AZN+8o67kP94a/vSbPMTRcKOeLQa+gjxSoHVBfjmpYvGdFTp1iE8bWsJpfo/i2snF6eMdAig4Vy9Ajk/SVEmSzEBWk31JceDrT9n7VNmlDN9Us2PhdjQLR5KD3OCLx6QN/P72iBN1zq9bTRiHaF4TEUq6IyP3cnDQStfTYzE+IIZtl7DQQKY+Dp5mTO3QSq17Kp7Dvw9mNyGsyE7Oo4VmxHuH8XXbuCoyN2ywJ6l2rv/wuBPylIC5iuguJyVK9WnMxt8vOaBWIAPmm8HbviU3FHnHic6s4DPDpLfwpvsqbxSvEYcj+mRYKhMSD3pF2E/a9wFhph+Wj6sbPhiWkI84D4kmwH42h7WmYqaJfTMGyiZiiFdcF4J/M4c66csWbBLza1GIeNGmxPpKLjilIIBDo6gfpKqQYZllt+ZfeLdwIydE8m5NBVw1d6I0ctF9GNWUG8yfHPKHxNwY05kziW5qA= - file: - - letsdebug-server - - letsdebug-cli - skip_cleanup: true - on: - repo: letsdebug/letsdebug - tags: true diff --git a/vendor/github.com/letsdebug/letsdebug/Makefile b/vendor/github.com/letsdebug/letsdebug/Makefile deleted file mode 100644 index 3fcbb33c..00000000 --- a/vendor/github.com/letsdebug/letsdebug/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -.PHONY: clean all deps server-dev server-dev-db-up deploy - -clean: - rm -f letsdebug-server - -deps: - go get -u github.com/go-bindata/go-bindata/... - -generate: - go generate ./... - -test: - go test -v ./... - -server-dev: generate - LETSDEBUG_WEB_DEBUG=1 \ - LETSDEBUG_WEB_DB_DSN="user=letsdebug dbname=letsdebug password=password sslmode=disable" \ - LETSDEBUG_DEBUG=1 go \ - run -race cmd/server/server.go - -server-dev-db-up: - docker run -d --name letsdebug-db -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_USER=letsdebug postgres:10.3-alpine - -letsdebug-server: generate - go build -o letsdebug-server cmd/server/server.go - -letsdebug-cli: - go build -o letsdebug-cli cmd/cli/cli.go - -deploy: clean letsdebug-server - rsync -vhz --progress letsdebug-server root@letsdebug.net:/usr/local/bin/ && \ - ssh root@letsdebug.net "systemctl restart letsdebug" diff --git a/vendor/github.com/letsdebug/letsdebug/README.md b/vendor/github.com/letsdebug/letsdebug/README.md deleted file mode 100644 index 99abc4be..00000000 --- a/vendor/github.com/letsdebug/letsdebug/README.md +++ /dev/null @@ -1,170 +0,0 @@ -# Let's Debug - -[![Build Status](https://travis-ci.org/letsdebug/letsdebug.svg?branch=master)](https://travis-ci.org/letsdebug/letsdebug) -[![godoc](https://godoc.org/github.com/letsdebug/letsdebug?status.svg)](https://godoc.org/github.com/letsdebug/letsdebug) - -Let's Debug is a diagnostic website, API, CLI and Go package for quickly and accurately finding and reporting issues for any domain that may prevent issuance of a Let's Encrypt SSL certificate for any ACME validation method. - -It is motivated by [this community thread](https://community.letsencrypt.org/t/creating-a-webservice-for-analysis-of-common-problems/45836). - -## Status -Currently [deployed to letsdebug.net and regularly in use](https://letsdebug.net). - -## Problems Detected - -| Name | Description | Examples --------|-------------|--------| -| InvalidMethod, ValidationMethodDisabled, ValidationMethodNotSuitable | Checks the ACME validation method is valid and usable for the provided domain name. | [Example](https://letsdebug.net/*.letsencrypt.org/1) | -| InvalidDomain | Checks the domain is a valid domain name on a public TLD. | [Example](https://letsdebug.net/ooga.booga/2) | -| StatusNotOperational| Checks that the Let's Encrypt service is not experiencing an outage, according to status.io | - -| DNSLookupFailed, TXTRecordError | Checks that the Unbound resolver (via libunbound) is able to resolve a variety records relevant to Let's Encrypt. Discovers problems such as DNSSEC issues, 0x20 mixed case randomization, timeouts etc, in the spirit of jsha's unboundtest.com | [Example](https://letsdebug.net/dnssec-failed.org/3) | -CAAIssuanceNotAllowed | Checks that no CAA records are preventing the issuance of Let's Encrypt certificates. | [Example](https://letsdebug.net/id-rsa.pub/4) | -CAACriticalUnknown | Checks that no CAA critical flags unknown to Let's Encrypt are used | - | -RateLimit | Checks that the domain name is not currently affected by any of the domain-based rate limits imposed by Let's Encrypt, using the public certwatch Postgres interface from Comodo's crt.sh. | [Example](https://letsdebug.net/targettec.ddns.net/13) | -NoRecords, ReservedAddress | Checks that sufficient valid A/AAAA records are present to perform HTTP-01 validation | [Example](https://letsdebug.net/localtest.me/6) | -BadRedirect | Checks that no bad HTTP redirects are present. Discovers redirects that aren't accessible, unacceptable ports, unacceptable schemes, accidental missing trailing slash on redirect. | [Example](https://letsdebug.net/foo.monkas.xyz/7) | -WebserverMisconfiguration | Checks whether the server is serving the wrong protocol on the wrong port as the result of an HTTP-01 validation request. | - | -ANotWorking, AAAANotWorking | Checks whether listed IP addresses are not functioning properly for HTTP-01 validation, including timeouts and other classes of network and HTTP errors. | [Example](https://letsdebug.net/network-fail.foo.monkas.xyz/8) | -MultipleIPAddressDiscrepancy | For domains with multiple A/AAAA records, checks whether there are major discrepancies between the server responses to reveal when the addresses may be pointing to different servers accidentally. | [Example](https://letsdebug.net/v4v6fail.monkas.xyz/51916) -CloudflareCDN | Checks whether the domain is being served via Cloudflare's proxy service (and therefore SSL termination is occurring at Cloudflare) | - | -CloudflareSSLNotProvisioned | Checks whether the domain has its SSL terminated by Cloudflare and Cloudflare has not provisioned a certificate yet (leading to a TLS handshake error). | [Example](https://letsdebug.net/cf-no-ssl.fleetssl.com/10) | -IssueFromLetsEncrypt | Attempts to detect issues with a high degree of accuracy via the Let's Encrypt v2 staging service by attempting to perform an authorization for the domain. Discovers issues such as CA-based domain blacklists & other policies, specific networking issues. | [Example](https://letsdebug.net/bankofamerica.com/12) | -| TXTDoubleLabel | Checks for the presence of records that are doubled up (e.g. `_acme-challenge.example.org.example.org`). Usually indicates that the user has been incorrectly creating records in their DNS user interface. | [Example](https://letsdebug.net/double.monkas.xyz/2477) | -PortForwarding | Checks whether the domain is serving a modem-router administrative interface instead of an intended webserver, which is indicative of a port-forwarding misconfiguration. | [Example](https://letsdebug.net/cdkauffmannnextcloud.duckdns.org/11450) | -| SanctionedDomain | Checks whether the Registered Domain is present on the [USG OFAC SDN List](https://sanctionssearch.ofac.treas.gov/). Updated daily. | [Example](https://letsdebug.net/unomasuno.com.mx/48081) | -| BlockedByNginxTestCookie | Checks whether the HTTP-01 validation requests are being intercepted by [testcookie-nginx-module](https://github.com/kyprizel/testcookie-nginx-module). | [Example](https://letsdebug.net/13513427185.ifastnet.org/51860) | -| HttpOnHttpsPort | Checks whether the server reported receiving an HTTP request on an HTTPS-only port | [Example](https://letsdebug.net/clep-energy.org/107591) | - -## Web API Usage - -There is a JSON-based API available as part of the web frontend. - -### Submitting a test - -```bash -$ curl --data '{"method":"http-01","domain":"letsdebug.net"}' -H 'content-type: application/json' https://letsdebug.net -``` -```javascript -{"Domain":"letsdebug.net","ID":14} -``` - -### Submitting a test with custom options - -```bash -curl --data '{"method":"http-01","domain":"letsdebug.net","options":{"http_request_path":"custom-path","http_expect_response":"abc123"}}' -H 'content-type: application/json' https://letsdebug.net -``` - -Available options are as follows: - -| Option | Description | --------|-------------| -`http_request_path` | What path within `/.well-known/acme-challenge/` to use instead of `letsdebug-test` (default) for the HTTP check. Max length 255. | -`http_expect_response` | What exact response to expect from each server during the HTTP check. By default, no particular response is expected. If present and the response does not match, the test will fail with an Error severity. It is highly recommended to always use a completely random value. Max length 255. | - -### Viewing tests - -```bash -$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net/14 -``` -```javascript -{"id":14,"domain":"letsdebug.net","method":"http-01","status":"Complete","created_at":"2018-04-30T01:58:34.765829Z","started_at":"2018-04-30T01:58:34.769815Z","completed_at":"2018-04-30T01:58:41.39023Z","result":{}} -``` - -or to view all recent tests - -```bash -$ curl -H 'accept: application/json' https://letsdebug.net/letsdebug.net -``` - -### Performing a query against the Certwatch database - -```bash -$ curl "https://letsdebug.net/certwatch-query?q=" -``` -```javascript -{ - "query": "select c.id as crtsh_id, x509_subjectName(c.CERTIFICATE), x509_notAfter(c.CERTIFICATE) from certificate c where x509_notAfter(c.CERTIFICATE) = '2018-06-01 16:25:44' AND x509_issuerName(c.CERTIFICATE) LIKE 'C=US, O=Let''s Encrypt%';", - "results": [ - { - "crtsh_id": 346300797, - "x509_notafter": "2018-06-01T16:25:44Z", - "x509_subjectname": "CN=hivdatingzimbabwe.com" - }, - /* ... */ - ] -} -``` - -## CLI Usage - -You can download binaries for tagged releases for Linux for both the CLi and the server [from the releases page](https://github.com/letsdebug/letsdebug/releases). - - - letsdebug-cli -domain example.org -method http-01 -debug - -## Library Usage - -```go - -import "github.com/letsdebug/letsdebug" - -problems, _ := letsdebug.Check("example.org", letsdebug.HTTP01) -``` - -## Installation - -### Dependencies - -This package relies on a fairly recent version of libunbound. - -* On Debian-based distributions: - - `apt install libunbound2 libunbound-dev` - -* On EL-based distributions, you may need to build from source because the packages are ancient on e.g. CentOS, but you can try: - - `yum install unbound-libs unbound-devel` - -* On OSX, [Homebrew](https://brew.sh/) contains the latest version of unbound: - - `brew install unbound` - -You will also need Go's [dep](https://github.com/golang/dep) dependency manager. - -### Releases -You can save time by [downloading tagged releases for 64-bit Linux](https://github.com/letsdebug/letsdebug/releases). Keep in mind you will still need to have libunbound present on your system. - -### Building - - go get -u github.com/letsdebug/letsdebug/... - cd $GOPATH/src/github.com/letsdebug/letsdebug - make deps - make letsdebug-cli letsdebug-server - - -## Contributing -Any contributions containing JavaScript will be discarded, but other feedback, bug reports, suggestions and enhancements are welcome - please open an issue first. - -## LICENSE - - MIT License - - Copyright (c) 2018 Let's Debug - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/letsdebug/letsdebug/checker.go b/vendor/github.com/letsdebug/letsdebug/checker.go deleted file mode 100644 index 21bc88bb..00000000 --- a/vendor/github.com/letsdebug/letsdebug/checker.go +++ /dev/null @@ -1,107 +0,0 @@ -package letsdebug - -import ( - "crypto/sha256" - "errors" - "fmt" - "reflect" - "time" -) - -// ValidationMethod represents an ACME validation method -type ValidationMethod string - -const ( - HTTP01 ValidationMethod = "http-01" // HTTP01 represents the ACME http-01 validation method. - DNS01 ValidationMethod = "dns-01" // DNS01 represents the ACME dns-01 validation method. - TLSALPN01 ValidationMethod = "tls-alpn-01" // TLSALPN01 represents the ACME tls-alpn-01 validation method. -) - -var ( - validMethods = map[ValidationMethod]bool{HTTP01: true, DNS01: true, TLSALPN01: true} - errNotApplicable = errors.New("Checker not applicable for this domain and method") - checkers []checker -) - -func init() { - // Since the OFAC SDN checker polls, we need to initialize it - ofac := &ofacSanctionChecker{} - ofac.setup() - - // We want to launch the slowest checkers as early as possible, - // unless they have a dependency on an earlier checker - checkers = []checker{ - asyncCheckerBlock{ - validMethodChecker{}, - validDomainChecker{}, - wildcardDNS01OnlyChecker{}, - statusioChecker{}, - ofac, - }, - - asyncCheckerBlock{ - caaChecker{}, // depends on valid*Checker - &rateLimitChecker{}, // depends on valid*Checker - dnsAChecker{}, // depends on valid*Checker - txtRecordChecker{}, // depends on valid*Checker - txtDoubledLabelChecker{}, // depends on valid*Checker - }, - - asyncCheckerBlock{ - httpAccessibilityChecker{}, // depends on dnsAChecker - cloudflareChecker{}, // depends on dnsAChecker to some extent - &acmeStagingChecker{}, // Gets the final word - }, - } -} - -type checker interface { - Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) -} - -// asyncCheckerBlock represents a checker which is composed of other checkers that can be run simultaneously. -type asyncCheckerBlock []checker - -type asyncResult struct { - Problems []Problem - Error error -} - -func (c asyncCheckerBlock) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - resultCh := make(chan asyncResult, len(c)) - - id := fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%d", time.Now().UnixNano()))))[:4] - debug("[%s] Launching async\n", id) - - for _, task := range c { - go func(task checker, ctx *scanContext, domain string, method ValidationMethod) { - defer func() { - if r := recover(); r != nil { - resultCh <- asyncResult{nil, fmt.Errorf("Check %T paniced: %v", task, r)} - } - }() - t := reflect.TypeOf(task) - debug("[%s] async: + %v\n", id, t) - start := time.Now() - probs, err := task.Check(ctx, domain, method) - debug("[%s] async: - %v in %v\n", id, t, time.Since(start)) - resultCh <- asyncResult{probs, err} - }(task, ctx, domain, method) - } - - var probs []Problem - - for i := 0; i < len(c); i++ { - result := <-resultCh - if result.Error != nil && result.Error != errNotApplicable { - debug("[%s] Exiting async via error\n", id) - return nil, result.Error - } - if len(result.Problems) > 0 { - probs = append(probs, result.Problems...) - } - } - - debug("[%s] Exiting async gracefully\n", id) - return probs, nil -} diff --git a/vendor/github.com/letsdebug/letsdebug/context.go b/vendor/github.com/letsdebug/letsdebug/context.go deleted file mode 100644 index 48e87257..00000000 --- a/vendor/github.com/letsdebug/letsdebug/context.go +++ /dev/null @@ -1,81 +0,0 @@ -package letsdebug - -import ( - "fmt" - "math/rand" - "net" - "sync" - - "github.com/miekg/dns" -) - -type lookupResult struct { - RRs []dns.RR - Error error -} - -type scanContext struct { - rrs map[string]map[uint16]lookupResult - rrsMutex sync.Mutex - - httpRequestPath string - httpExpectResponse string -} - -func newScanContext() *scanContext { - return &scanContext{ - rrs: map[string]map[uint16]lookupResult{}, - httpRequestPath: "letsdebug-test", - } -} - -func (sc *scanContext) Lookup(name string, rrType uint16) ([]dns.RR, error) { - sc.rrsMutex.Lock() - rrMap, ok := sc.rrs[name] - if !ok { - rrMap = map[uint16]lookupResult{} - sc.rrs[name] = rrMap - } - result, ok := rrMap[rrType] - sc.rrsMutex.Unlock() - - if ok { - return result.RRs, result.Error - } - - resolved, err := lookup(name, rrType) - - sc.rrsMutex.Lock() - rrMap[rrType] = lookupResult{ - RRs: resolved, - Error: err, - } - sc.rrsMutex.Unlock() - - return resolved, err -} - -// Only slightly random - it will use AAAA over A if possible. -func (sc *scanContext) LookupRandomHTTPRecord(name string) (net.IP, error) { - v6RRs, err := sc.Lookup(name, dns.TypeAAAA) - if err != nil { - return net.IP{}, err - } - if len(v6RRs) > 0 { - if selected, ok := v6RRs[rand.Intn(len(v6RRs))].(*dns.AAAA); ok { - return selected.AAAA, nil - } - } - - v4RRs, err := sc.Lookup(name, dns.TypeA) - if err != nil { - return net.IP{}, err - } - if len(v4RRs) > 0 { - if selected, ok := v4RRs[rand.Intn(len(v4RRs))].(*dns.A); ok { - return selected.A, nil - } - } - - return net.IP{}, fmt.Errorf("No AAAA or A records were found for %s", name) -} diff --git a/vendor/github.com/letsdebug/letsdebug/dns01.go b/vendor/github.com/letsdebug/letsdebug/dns01.go deleted file mode 100644 index f945969c..00000000 --- a/vendor/github.com/letsdebug/letsdebug/dns01.go +++ /dev/null @@ -1,156 +0,0 @@ -package letsdebug - -import ( - "crypto/rand" - "fmt" - "sort" - "strings" - "sync" - - "github.com/miekg/dns" - "github.com/weppos/publicsuffix-go/publicsuffix" -) - -// wildcardDNS01OnlyChecker ensures that a wildcard domain is only validated via dns-01. -type wildcardDNS01OnlyChecker struct{} - -func (c wildcardDNS01OnlyChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if !strings.HasPrefix(domain, "*.") { - return nil, errNotApplicable - } - - if method == DNS01 { - return nil, errNotApplicable - } - - return []Problem{wildcardHTTP01(domain, method)}, nil -} - -func wildcardHTTP01(domain string, method ValidationMethod) Problem { - return Problem{ - Name: "MethodNotSuitable", - Explanation: fmt.Sprintf("A wildcard domain like %s can only be issued using a dns-01 validation method.", domain), - Detail: fmt.Sprintf("Invalid method: %s", method), - Severity: SeverityFatal, - } -} - -// txtRecordChecker ensures there is no resolution errors with the _acme-challenge txt record -type txtRecordChecker struct{} - -func (c txtRecordChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if method != DNS01 { - return nil, errNotApplicable - } - - domain = strings.TrimPrefix(domain, "*.") - - if _, err := ctx.Lookup("_acme-challenge."+domain, dns.TypeTXT); err != nil { - // report this problem as a fatal problem as that is the purpose of this checker - return []Problem{txtRecordError(domain, err)}, nil - } - - return nil, nil -} - -func txtRecordError(domain string, err error) Problem { - return Problem{ - Name: "TXTRecordError", - Explanation: fmt.Sprintf(`An error occurred while attempting to lookup the TXT record on _acme-challenge.%s . `+ - `Any resolver errors that the Let's Encrypt CA encounters on this record will cause certificate issuance to fail.`, domain), - Detail: err.Error(), - Severity: SeverityFatal, - } -} - -// txtDoubledLabelChecker ensures that a record for _acme-challenge.example.org.example.org -// wasn't accidentally created -type txtDoubledLabelChecker struct{} - -func (c txtDoubledLabelChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if method != DNS01 { - return nil, errNotApplicable - } - - registeredDomain, _ := publicsuffix.Domain(domain) - - variants := []string{ - fmt.Sprintf("_acme-challenge.%s.%s", domain, domain), // _acme-challenge.www.example.org.www.example.org - fmt.Sprintf("_acme-challenge.%s.%s", domain, registeredDomain), // _acme-challenge.www.example.org.example.org - } - - var found []string - distinctCombined := map[string]struct{}{} - var randomCombined string - - var foundMu sync.Mutex - - var wg sync.WaitGroup - wg.Add(len(variants) + 1) - - doQuery := func(q string) ([]string, string) { - found := []string{} - combined := []string{} - rrs, _ := ctx.Lookup(q, dns.TypeTXT) - for _, rr := range rrs { - txt, ok := rr.(*dns.TXT) - if !ok { - continue - } - found = append(found, txt.String()) - combined = append(combined, txt.Txt...) - } - sort.Strings(combined) - return found, strings.Join(combined, "\n") - } - - // Check the double label variants - for _, variant := range variants { - go func(q string) { - defer wg.Done() - - values, combined := doQuery(q) - if len(values) == 0 { - return - } - - foundMu.Lock() - defer foundMu.Unlock() - - found = append(found, values...) - distinctCombined[combined] = struct{}{} - }(variant) - } - - // Check the response for a random subdomain, to detect the presence of a wildcard TXT record - go func() { - defer wg.Done() - - nonce := make([]byte, 4) - _, _ = rand.Read(nonce) - _, randomCombined = doQuery(fmt.Sprintf("_acme-challenge.%s.%s", fmt.Sprintf("rand-%x", nonce), domain)) - }() - - wg.Wait() - - // If a randomized subdomain has the exact same non-empty TXT response as any of the "double labels", then - // we are probably dealing with a wildcard TXT record in the zone, and it is probably not a meaningful - // misconfiguration. In this case, say nothing. - if _, ok := distinctCombined[randomCombined]; ok && randomCombined != "" { - return nil, nil - } - - if len(found) > 0 { - return []Problem{{ - Name: "TXTDoubleLabel", - Explanation: "Some DNS records were found that indicate TXT records may have been incorrectly manually entered into " + - `DNS editor interfaces. The correct way to enter these records is to either remove the domain from the label (so ` + - `enter "_acme-challenge.www.example.org" as "_acme-challenge.www") or include a period (.) at the ` + - `end of the label (enter "_acme-challenge.example.org.").`, - Detail: fmt.Sprintf("The following probably-erroneous TXT records were found:\n%s", strings.Join(found, "\n")), - Severity: SeverityWarning, - }}, nil - } - - return nil, nil -} diff --git a/vendor/github.com/letsdebug/letsdebug/dns_util.go b/vendor/github.com/letsdebug/letsdebug/dns_util.go deleted file mode 100644 index 662ced9f..00000000 --- a/vendor/github.com/letsdebug/letsdebug/dns_util.go +++ /dev/null @@ -1,127 +0,0 @@ -package letsdebug - -import ( - "fmt" - "net" - "strings" - - "github.com/miekg/dns" - "github.com/miekg/unbound" -) - -var ( - reservedNets []*net.IPNet -) - -func lookup(name string, rrType uint16) ([]dns.RR, error) { - ub := unbound.New() - defer ub.Destroy() - - if err := setUnboundConfig(ub); err != nil { - return nil, fmt.Errorf("Failed to configure Unbound resolver: %v", err) - } - - result, err := ub.Resolve(name, rrType, dns.ClassINET) - if err != nil { - return nil, err - } - - if result.Bogus { - return nil, fmt.Errorf("DNS response for %s had fatal DNSSEC issues: %v", name, result.WhyBogus) - } - - if result.Rcode == dns.RcodeServerFailure || result.Rcode == dns.RcodeRefused { - return nil, fmt.Errorf("DNS response for %s/%s did not have an acceptable response code: %s", - name, dns.TypeToString[rrType], dns.RcodeToString[result.Rcode]) - } - - return result.Rr, nil -} - -func normalizeFqdn(name string) string { - name = strings.TrimSpace(name) - name = strings.TrimSuffix(name, ".") - return strings.ToLower(name) -} - -func isAddressReserved(ip net.IP) bool { - for _, reserved := range reservedNets { - if reserved.Contains(ip) { - return true - } - } - return false -} - -func init() { - reservedNets = []*net.IPNet{} - reservedCIDRs := []string{ - "0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", - "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", - "192.0.0.0/24", "192.0.2.0/24", "192.88.99.0/24", - "192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24", - "203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4", - "255.255.255.255/32", "::/128", "::1/128", /*"::ffff:0:0/96",*/ - "64:ff9b::/96", "100::/64", "2001::/32", "2001:10::/28", - "2001:20::/28", "2001:db8::/32", "2002::/16", "fc00::/7", - "fe80::/10", "ff00::/8", - } - for _, cidr := range reservedCIDRs { - _, n, err := net.ParseCIDR(cidr) - if err != nil { - panic(err) - } - reservedNets = append(reservedNets, n) - } -} - -func setUnboundConfig(ub *unbound.Unbound) error { - // options need the : in the option key according to docs - opts := []struct { - Opt string - Val string - }{ - {"verbosity:", "0"}, - {"use-syslog:", "no"}, - {"do-ip4:", "yes"}, - {"do-ip6:", "yes"}, - {"do-udp:", "yes"}, - {"do-tcp:", "yes"}, - {"tcp-upstream:", "no"}, - {"harden-glue:", "yes"}, - {"harden-dnssec-stripped:", "yes"}, - {"cache-min-ttl:", "0"}, - {"cache-max-ttl:", "0"}, - {"cache-max-negative-ttl:", "0"}, - {"neg-cache-size:", "0"}, - {"prefetch:", "no"}, - {"unwanted-reply-threshold:", "10000"}, - {"do-not-query-localhost:", "yes"}, - {"val-clean-additional:", "yes"}, - {"harden-algo-downgrade:", "yes"}, - {"edns-buffer-size:", "512"}, - {"val-sig-skew-min:", "0"}, - {"val-sig-skew-max:", "0"}, - {"target-fetch-policy:", "0 0 0 0 0"}, - } - - for _, opt := range opts { - // Can't ignore these because we cant silently have policies being ignored - if err := ub.SetOption(opt.Opt, opt.Val); err != nil { - return fmt.Errorf("Failed to configure unbound with option %s %v", opt.Opt, err) - } - } - - // use-caps-for-id was bugged (no colon) < 1.7.1, try both ways in order to be compatible - // https://www.nlnetlabs.nl/bugs-script/show_bug.cgi?id=4092 - if err := ub.SetOption("use-caps-for-id:", "yes"); err != nil { - if err = ub.SetOption("use-caps-for-id", "yes"); err != nil { - return fmt.Errorf("Failed to configure unbound with use-caps-for-id: %v", err) - } - } - - return ub.AddTa(`. 172800 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU= - . 172800 IN DNSKEY 256 3 8 AwEAAdp440E6Mz7c+Vl4sPd0lTv2Qnc85dTW64j0RDD7sS/zwxWDJ3QRES2VKDO0OXLMqVJSs2YCCSDKuZXpDPuf++YfAu0j7lzYYdWTGwyNZhEaXtMQJIKYB96pW6cRkiG2Dn8S2vvo/PxW9PKQsyLbtd8PcwWglHgReBVp7kEv/Dd+3b3YMukt4jnWgDUddAySg558Zld+c9eGWkgWoOiuhg4rQRkFstMX1pRyOSHcZuH38o1WcsT4y3eT0U/SR6TOSLIB/8Ftirux/h297oS7tCcwSPt0wwry5OFNTlfMo8v7WGurogfk8hPipf7TTKHIi20LWen5RCsvYsQBkYGpF78= - . 172800 IN DNSKEY 257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0= - . 172800 IN RRSIG DNSKEY 8 0 172800 20181101000000 20181011000000 20326 . M/LTswhCjuJUTvX1CFqC+TiJ4Fez7AROa5mM+1AI2MJ+zLHhr3JaMxyydFLWrBHR0056Hz7hNqQ9i63hGeiR6uMfanF0jIRb9XqgGP8nY37T8ESpS1UiM9rJn4b40RFqDSEvuFdd4hGwK3EX0snOCLdUT8JezxtreXI0RilmqDC2g44TAKyFw+Is9Qwl+k6+fbMQ/atA8adANbYgyuHfiwQCCUtXRaTCpRgQtsAz9izO0VYIGeHIoJta0demAIrLCOHNVH2ogHTqMEQ18VqUNzTd0aGURACBdS7PeP2KogPD7N8Q970O84TFmO4ahPIvqO+milCn5OQTbbgsjHqY6Q==`) -} diff --git a/vendor/github.com/letsdebug/letsdebug/generic.go b/vendor/github.com/letsdebug/letsdebug/generic.go deleted file mode 100644 index a84fe8a3..00000000 --- a/vendor/github.com/letsdebug/letsdebug/generic.go +++ /dev/null @@ -1,861 +0,0 @@ -package letsdebug - -import ( - "context" - "crypto/x509" - "database/sql" - "encoding/pem" - "encoding/xml" - "io/ioutil" - "net" - "os" - "sort" - "strings" - "sync" - - "github.com/eggsampler/acme/v3" - - "fmt" - - "net/http" - "net/url" - - "time" - - "encoding/json" - - // Driver for crtwatch/ratelimitChecker - _ "github.com/lib/pq" - "github.com/miekg/dns" - "github.com/weppos/publicsuffix-go/net/publicsuffix" - psl "github.com/weppos/publicsuffix-go/publicsuffix" -) - -// validMethodChecker ensures that the provided authorization method is valid and supported. -type validMethodChecker struct{} - -func (c validMethodChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if validMethods[method] { - return nil, errNotApplicable - } - - return []Problem{notValidMethod(method)}, nil -} - -func notValidMethod(method ValidationMethod) Problem { - var supportedMethods []string - for k := range validMethods { - supportedMethods = append(supportedMethods, string(k)) - } - return Problem{ - Name: "InvalidMethod", - Explanation: fmt.Sprintf(`"%s" is not a supported validation method.`, method), - Detail: fmt.Sprintf("Supported methods: %s", strings.Join(supportedMethods, ", ")), - Severity: SeverityFatal, - } -} - -// validDomainChecker ensures that the FQDN is well-formed and is part of a public suffix. -type validDomainChecker struct{} - -func (c validDomainChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - var probs []Problem - - domain = strings.TrimPrefix(domain, "*.") - - for _, ch := range []byte(domain) { - if !(('a' <= ch && ch <= 'z') || - ('A' <= ch && ch <= 'A') || - ('0' <= ch && ch <= '9') || - ch == '.' || ch == '-') { - probs = append(probs, invalidDomain(domain, fmt.Sprintf("Invalid character present: %c", ch))) - return probs, nil - } - } - - if len(domain) > 230 { - probs = append(probs, invalidDomain(domain, "Domain too long")) - return probs, nil - } - - if ip := net.ParseIP(domain); ip != nil { - probs = append(probs, invalidDomain(domain, "Domain is an IP address")) - return probs, nil - } - - rule := psl.DefaultList.Find(domain, &psl.FindOptions{IgnorePrivate: true, DefaultRule: nil}) - if rule == nil { - probs = append(probs, invalidDomain(domain, "Domain doesn't end in a public TLD")) - return probs, nil - } - - if r := rule.Decompose(domain)[1]; r == "" { - probs = append(probs, invalidDomain(domain, "Domain is a TLD")) - return probs, nil - } else { - probs = append(probs, debugProblem("PublicSuffix", "The IANA public suffix is the TLD of the Registered Domain", - fmt.Sprintf("The TLD for %s is: %s", domain, r))) - } - - return probs, nil -} - -// caaChecker ensures that any caa record on the domain, or up the domain tree, allow issuance for letsencrypt.org -type caaChecker struct{} - -func (c caaChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - var probs []Problem - - wildcard := false - if strings.HasPrefix(domain, "*.") { - wildcard = true - domain = domain[2:] - } - - rrs, err := ctx.Lookup(domain, dns.TypeCAA) - if err != nil { - probs = append(probs, dnsLookupFailed(domain, "CAA", err)) - return probs, nil - } - - // check any found caa records - if len(rrs) > 0 { - var issue []*dns.CAA - var issuewild []*dns.CAA - var criticalUnknown []*dns.CAA - - for _, rr := range rrs { - caaRr, ok := rr.(*dns.CAA) - if !ok { - continue - } - - switch caaRr.Tag { - case "issue": - issue = append(issue, caaRr) - case "issuewild": - issuewild = append(issuewild, caaRr) - default: - if caaRr.Flag == 1 { - criticalUnknown = append(criticalUnknown, caaRr) - } - } - } - - probs = append(probs, debugProblem("CAA", - "CAA records control authorization for certificate authorities to issue certificates for a domain", - collateRecords(append(issue, issuewild...)))) - - if len(criticalUnknown) > 0 { - probs = append(probs, caaCriticalUnknown(domain, wildcard, criticalUnknown)) - return probs, nil - } - - if len(issue) == 0 && !wildcard { - return probs, nil - } - - records := issue - if wildcard && len(issuewild) > 0 { - records = issuewild - } - - for _, r := range records { - if extractIssuerDomain(r.Value) == "letsencrypt.org" { - return probs, nil - } - } - - probs = append(probs, caaIssuanceNotAllowed(domain, wildcard, records)) - return probs, nil - } - - // recurse up to the public suffix domain until a caa record is found - // a.b.c.com -> b.c.com -> c.com until - if ps, _ := publicsuffix.PublicSuffix(domain); domain != ps && ps != "" { - splitDomain := strings.SplitN(domain, ".", 2) - - parentProbs, err := c.Check(ctx, splitDomain[1], method) - if err != nil { - return nil, fmt.Errorf("error checking caa record on domain: %s, %v", splitDomain[1], err) - } - - probs = append(probs, parentProbs...) - } - - return probs, nil -} - -func extractIssuerDomain(value string) string { - // record can be: - // issuedomain.tld; someparams - return strings.Trim(strings.SplitN(value, ";", 2)[0], " \t") -} - -func collateRecords(records []*dns.CAA) string { - var s []string - for _, r := range records { - s = append(s, r.String()) - } - return strings.Join(s, "\n") -} - -func caaCriticalUnknown(domain string, wildcard bool, records []*dns.CAA) Problem { - return Problem{ - Name: "CAACriticalUnknown", - Explanation: fmt.Sprintf(`CAA record(s) exist on %s (wildcard=%t) that are marked as critical but are unknown to Let's Encrypt. `+ - `These record(s) as shown in the detail must be removed, or marked as non-critical, before a certificate can be issued by the Let's Encrypt CA.`, domain, wildcard), - Detail: collateRecords(records), - Severity: SeverityFatal, - } -} - -func caaIssuanceNotAllowed(domain string, wildcard bool, records []*dns.CAA) Problem { - return Problem{ - Name: "CAAIssuanceNotAllowed", - Explanation: fmt.Sprintf(`No CAA record on %s (wildcard=%t) contains the issuance domain "letsencrypt.org". `+ - `You must either add an additional record to include "letsencrypt.org" or remove every existing CAA record. `+ - `A list of the CAA records are provided in the details.`, domain, wildcard), - Detail: collateRecords(records), - Severity: SeverityFatal, - } -} - -func invalidDomain(domain, reason string) Problem { - return Problem{ - Name: "InvalidDomain", - Explanation: fmt.Sprintf(`"%s" is not a valid domain name that Let's Encrypt would be able to issue a certificate for.`, domain), - Detail: reason, - Severity: SeverityFatal, - } -} - -// cloudflareChecker determines if the domain is using cloudflare, and whether a certificate has been provisioned by cloudflare yet. -type cloudflareChecker struct{} - -func (c cloudflareChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - var probs []Problem - - domain = strings.TrimPrefix(domain, "*.") - - cl := http.Client{ - Timeout: httpTimeout * time.Second, - Transport: makeSingleShotHTTPTransport(), - } - resp, err := cl.Get("https://" + domain) - if err == nil { // no tls error, cert must be issued - // check if it's cloudflare - if hasCloudflareHeader(resp.Header) { - probs = append(probs, cloudflareCDN(domain)) - } - - return probs, nil - } - - // disable redirects - cl.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - } - - // attempt to connect over http with redirects disabled to check cloudflare header - resp, err = cl.Get("http://" + domain) - if err != nil { - return probs, nil - } - - if hasCloudflareHeader(resp.Header) { - probs = append(probs, cloudflareCDN(domain)) - probs = append(probs, cloudflareSslNotProvisioned(domain)) - } - - return probs, nil -} - -func hasCloudflareHeader(h http.Header) bool { - return strings.Contains(strings.ToLower(h.Get("server")), "cloudflare") -} - -func cloudflareCDN(domain string) Problem { - return Problem{ - Name: "CloudflareCDN", - Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN. Any Let's Encrypt certificate installed on the `+ - `origin server will only encrypt traffic between the server and Cloudflare. It is strongly recommended that the SSL option 'Full SSL (strict)' `+ - `be enabled.`, domain), - Detail: "https://support.cloudflare.com/hc/en-us/articles/200170416-What-do-the-SSL-options-mean-", - Severity: SeverityWarning, - } -} - -func cloudflareSslNotProvisioned(domain string) Problem { - return Problem{ - Name: "CloudflareSSLNotProvisioned", - Explanation: fmt.Sprintf(`The domain %s is being served through Cloudflare CDN and a certificate has not yet been provisioned yet by Cloudflare.`, domain), - Detail: "https://support.cloudflare.com/hc/en-us/articles/203045244-How-long-does-it-take-for-Cloudflare-s-SSL-to-activate-", - Severity: SeverityWarning, - } -} - -// statusioChecker ensures there is no reported operational problem with the Let's Encrypt service via the status.io public api. -type statusioChecker struct{} - -// statusioSignificantStatuses denotes which statuses warrant raising a warning. -// 100 (operational) and 200 (undocumented but assume "Planned Maintenance") should not be included. -// https://kb.status.io/developers/status-codes/ -var statusioSignificantStatuses = map[int]bool{ - 300: true, // Degraded Performance - 400: true, // Partial Service Disruption - 500: true, // Service Disruption - 600: true, // Security Event -} - -func (c statusioChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - var probs []Problem - - resp, err := http.Get("https://api.status.io/1.0/status/55957a99e800baa4470002da") - if err != nil { - // some connectivity errors with status.io is probably not worth reporting - return probs, nil - } - defer resp.Body.Close() - - apiResp := struct { - Result struct { - StatusOverall struct { - Updated time.Time `json:"updated"` - Status string `json:"status"` - StatusCode int `json:"status_code"` - } `json:"status_overall"` - } `json:"result"` - }{} - - if err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil { - return probs, fmt.Errorf("error decoding status.io api response: %v", err) - } - - if statusioSignificantStatuses[apiResp.Result.StatusOverall.StatusCode] { - probs = append(probs, statusioNotOperational(apiResp.Result.StatusOverall.Status, apiResp.Result.StatusOverall.Updated)) - } - - probs = append(probs, debugProblem("StatusIO", "The current status.io status for Let's Encrypt", - fmt.Sprintf("%v", apiResp.Result.StatusOverall.Status))) - - return probs, nil -} - -func statusioNotOperational(status string, updated time.Time) Problem { - return Problem{ - Name: "StatusNotOperational", - Explanation: fmt.Sprintf(`The current status as reported by the Let's Encrypt status page is %s as at %v. `+ - `Depending on the reported problem, this may affect certificate issuance. For more information, please visit the status page.`, status, updated), - Detail: "https://letsencrypt.status.io/", - Severity: SeverityWarning, - } -} - -type crtList map[string]*x509.Certificate - -// FindCommonPSLCertificates finds any certificates which contain any DNSName -// that shares the Registered Domain `registeredDomain`. -func (l crtList) FindWithCommonRegisteredDomain(registeredDomain string) sortedCertificates { - var out sortedCertificates - - for _, cert := range l { - for _, name := range cert.DNSNames { - if nameRegDomain, _ := publicsuffix.EffectiveTLDPlusOne(name); nameRegDomain == registeredDomain { - out = append(out, cert) - break - } - } - } - - sort.Sort(out) - - return out -} - -func (l crtList) GetOldestCertificate() *x509.Certificate { - var oldest *x509.Certificate - for _, crt := range l { - if oldest == nil || crt.NotBefore.Before(oldest.NotBefore) { - oldest = crt - } - } - return oldest -} - -// CountDuplicates counts how many duplicate certificates there are -// that also contain the name `domain` -func (l crtList) CountDuplicates(domain string) map[string]int { - counts := map[string]int{} - - for _, cert := range l { - found := false - for _, name := range cert.DNSNames { - if name == domain { - found = true - break - } - } - if !found { - continue - } - names := make([]string, len(cert.DNSNames)) - copy(names, cert.DNSNames) - sort.Strings(names) - k := strings.Join(names, ",") - counts[k]++ - } - - return counts -} - -// rateLimitChecker ensures that the domain is not currently affected -// by domain-based rate limits using crtwatch's database -type rateLimitChecker struct { -} - -type sortedCertificates []*x509.Certificate - -func (certs sortedCertificates) Len() int { return len(certs) } -func (certs sortedCertificates) Swap(i, j int) { certs[i], certs[j] = certs[j], certs[i] } -func (certs sortedCertificates) Less(i, j int) bool { - return certs[j].NotBefore.Before(certs[i].NotBefore) -} - -const rateLimitCheckerQuery = ` -WITH ci AS - (SELECT min(sub.CERTIFICATE_ID) ID, - min(sub.ISSUER_CA_ID) ISSUER_CA_ID, - sub.CERTIFICATE DER - FROM - (SELECT * - FROM certificate_and_identities cai - WHERE plainto_tsquery('%s') @@ identities(cai.CERTIFICATE) - AND cai.NAME_VALUE ILIKE ('%%%s%%') - AND x509_notBefore(cai.CERTIFICATE) >= '%s' - AND cai.issuer_ca_id IN (16418, 183267, 183283) - LIMIT 1000) sub - GROUP BY sub.CERTIFICATE) -SELECT ci.DER der -FROM ci -LEFT JOIN LATERAL - (SELECT min(ctle.ENTRY_TIMESTAMP) ENTRY_TIMESTAMP - FROM ct_log_entry ctle - WHERE ctle.CERTIFICATE_ID = ci.ID ) le ON TRUE, - ca -WHERE ci.ISSUER_CA_ID = ca.ID -ORDER BY le.ENTRY_TIMESTAMP DESC;` - -// Pointer receiver because we're keeping state across runs -func (c *rateLimitChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if os.Getenv("LETSDEBUG_DISABLE_CERTWATCH") != "" { - return nil, errNotApplicable - } - - domain = strings.TrimPrefix(domain, "*.") - - db, err := sql.Open("postgres", "user=guest dbname=certwatch host=crt.sh sslmode=disable connect_timeout=5") - if err != nil { - return []Problem{ - internalProblem(fmt.Sprintf("Failed to connect to certwatch database to check rate limits: %v", err), SeverityDebug), - }, nil - } - defer db.Close() - - // Since we are checking rate limits, we need to query the Registered Domain - // for the domain in question - registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain) - - timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Avoiding using a prepared statement here because it's being weird with crt.sh - q := fmt.Sprintf(rateLimitCheckerQuery, - registeredDomain, registeredDomain, time.Now().Add(-168*time.Hour).Format(time.RFC3339)) - rows, err := db.QueryContext(timeoutCtx, q) - if err != nil && err != sql.ErrNoRows { - return []Problem{ - internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug), - }, nil - } - - probs := []Problem{} - - // Read in the DER-encoded certificates - certs := crtList{} - var certBytes []byte - for rows.Next() { - if err := rows.Scan(&certBytes); err != nil { - probs = append(probs, internalProblem(fmt.Sprintf("Failed to query certwatch database while checking rate limits: %v", err), SeverityDebug)) - break - } - crt, err := x509.ParseCertificate(certBytes) - if err != nil { - probs = append(probs, internalProblem(fmt.Sprintf("Failed to parse certificate while checking rate limits: %v", err), SeverityDebug)) - continue - } - certs[crt.SerialNumber.String()] = crt - } - if err := rows.Err(); err != nil { - return []Problem{ - internalProblem(fmt.Sprintf("Failed to query certwatch database to check rate limits: %v", err), SeverityDebug), - }, nil - } - - var debug string - - // Limit: Certificates per Registered Domain - // TODO: implement Renewal Exemption - certsTowardsRateLimit := certs.FindWithCommonRegisteredDomain(registeredDomain) - if len(certs) > 0 && len(certsTowardsRateLimit) >= 50 { - dropOff := certs.GetOldestCertificate().NotBefore.Add(7 * 24 * time.Hour) - dropOffDiff := time.Until(dropOff).Truncate(time.Minute) - - probs = append(probs, rateLimited(domain, fmt.Sprintf("The 'Certificates per Registered Domain' limit ("+ - "50 certificates per week that share the same Registered Domain: %s) has been exceeded. "+ - "There is no way to work around this rate limit. "+ - "The next non-renewal certificate for this Registered Domain should be issuable after %v (%v from now).", - registeredDomain, dropOff, dropOffDiff))) - } - - for _, cert := range certsTowardsRateLimit { - debug = fmt.Sprintf("%s\nSerial: %s\nNotBefore: %v\nNames: %v\n", debug, cert.SerialNumber.String(), cert.NotBefore, cert.DNSNames) - } - - // Limit: Duplicate Certificate limit of 5 certificates per week - for names, dupes := range certs.CountDuplicates(domain) { - if dupes < 5 { - continue - } - probs = append(probs, rateLimited(domain, - fmt.Sprintf(`The Duplicate Certificate limit (5 certificates with the exact same set of domains per week) has been `+ - `exceeded and is affecting the domain "%s". The exact set of domains affected is: "%v". It may be possible to avoid this `+ - `rate limit by issuing a certificate with an additional or different domain name.`, domain, names))) - } - - if debug != "" { - probs = append(probs, debugProblem("RateLimit", - fmt.Sprintf("%d Certificates contributing to rate limits for this domain", len(certsTowardsRateLimit)), debug)) - } - - return probs, nil -} - -func rateLimited(domain, detail string) Problem { - registeredDomain, _ := publicsuffix.EffectiveTLDPlusOne(domain) - return Problem{ - Name: "RateLimit", - Explanation: fmt.Sprintf(`%s is currently affected by Let's Encrypt-based rate limits (https://letsencrypt.org/docs/rate-limits/). `+ - `You may review certificates that have already been issued by visiting https://crt.sh/?q=%%%s . `+ - `Please note that it is not possible to ask for a rate limit to be manually cleared.`, domain, registeredDomain), - Detail: detail, - Severity: SeverityError, - } -} - -// acmeStagingChecker tries to create an authorization on -// Let's Encrypt's staging server and parse the error urn -// to see if there's anything interesting reported. -type acmeStagingChecker struct { - client acme.Client - account acme.Account - clientMu sync.Mutex -} - -func (c *acmeStagingChecker) buildAcmeClient() error { - cl, err := acme.NewClient("https://acme-staging-v02.api.letsencrypt.org/directory") - if err != nil { - return err - } - - // Give the ACME CA more time to complete challenges - cl.PollTimeout = 100 * time.Second - - regrPath := os.Getenv("LETSDEBUG_ACMESTAGING_ACCOUNTFILE") - if regrPath == "" { - regrPath = "acme-account.json" - } - buf, err := ioutil.ReadFile(regrPath) - if err != nil { - return err - } - - var out struct { - PEM string `json:"pem"` - URL string `json:"url"` - } - if err := json.Unmarshal(buf, &out); err != nil { - return err - } - - block, _ := pem.Decode([]byte(out.PEM)) - pk, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return err - } - - c.account = acme.Account{PrivateKey: pk, URL: out.URL} - c.client = cl - - return nil -} - -func (c *acmeStagingChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if os.Getenv("LETSDEBUG_DISABLE_ACMESTAGING") != "" { - return nil, errNotApplicable - } - - c.clientMu.Lock() - if c.account.PrivateKey == nil { - if err := c.buildAcmeClient(); err != nil { - c.clientMu.Unlock() - return []Problem{ - internalProblem(fmt.Sprintf("Couldn't setup Let's Encrypt staging checker, skipping: %v", err), SeverityWarning), - }, nil - } - } - c.clientMu.Unlock() - - probs := []Problem{} - - order, err := c.client.NewOrder(c.account, []acme.Identifier{{Type: "dns", Value: domain}}) - if err != nil { - if p := translateAcmeError(domain, err); p.Name != "" { - probs = append(probs, p) - } - probs = append(probs, debugProblem("LetsEncryptStaging", "Order creation error", err.Error())) - return probs, nil - } - - var wg sync.WaitGroup - wg.Add(len(order.Authorizations)) - var probsMu sync.Mutex - - unhandledError := func(err error) { - probsMu.Lock() - defer probsMu.Unlock() - - probs = append(probs, internalProblem("An unknown problem occurred while performing a test "+ - "authorization against the Let's Encrypt staging service: "+err.Error(), SeverityWarning)) - } - - authzFailures := []string{} - - for _, authzURL := range order.Authorizations { - go func(authzURL string) { - defer wg.Done() - - authz, err := c.client.FetchAuthorization(c.account, authzURL) - if err != nil { - unhandledError(err) - return - } - - chal, ok := authz.ChallengeMap[string(method)] - if !ok { - unhandledError(fmt.Errorf("Missing challenge method (want %v): %v", method, authz.ChallengeMap)) - return - } - - if _, err := c.client.UpdateChallenge(c.account, chal); err != nil { - probsMu.Lock() - if p := translateAcmeError(domain, err); p.Name != "" { - probs = append(probs, p) - } - authzFailures = append(authzFailures, err.Error()) - probsMu.Unlock() - } - }(authzURL) - } - - wg.Wait() - - if len(authzFailures) > 0 { - probs = append(probs, debugProblem("LetsEncryptStaging", - fmt.Sprintf("Challenge update failures for %s in order %s", domain, order.URL), - strings.Join(authzFailures, "\n"))) - } else { - probs = append(probs, debugProblem("LetsEncryptStaging", "Order for "+domain, order.URL)) - } - - return probs, nil -} - -func translateAcmeError(domain string, err error) Problem { - if acmeErr, ok := err.(acme.Problem); ok { - urn := strings.TrimPrefix(acmeErr.Type, "urn:ietf:params:acme:error:") - switch urn { - case "rejectedIdentifier", "unknownHost", "rateLimited", "caa", "dns", "connection": - // Boulder can send error:dns when _acme-challenge is NXDOMAIN, which is - // equivalent to unauthorized - if strings.Contains(acmeErr.Detail, "NXDOMAIN looking up TXT") { - return Problem{} - } - return letsencryptProblem(domain, acmeErr.Detail, SeverityError) - // When something bad is happening on staging - case "serverInternal": - return letsencryptProblem(domain, - fmt.Sprintf(`There may be internal issues on the staging service: %v`, acmeErr.Detail), SeverityWarning) - // Unauthorized is what we expect, except for these exceptions that we should handle: - // - When VA OR RA is checking Google Safe Browsing (groan) - case "unauthorized": - if strings.Contains(acmeErr.Detail, "considered an unsafe domain") { - return letsencryptProblem(domain, acmeErr.Detail, SeverityError) - } - return Problem{} - default: - return Problem{} - } - } - return internalProblem(fmt.Sprintf("An unknown issue occurred when performing a test authorization "+ - "against the Let's Encrypt staging service: %v", err), SeverityWarning) -} - -func letsencryptProblem(domain, detail string, severity SeverityLevel) Problem { - return Problem{ - Name: "IssueFromLetsEncrypt", - Explanation: fmt.Sprintf(`A test authorization for %s to the Let's Encrypt staging service has revealed `+ - `issues that may prevent any certificate for this domain being issued.`, domain), - Detail: detail, - Severity: severity, - } -} - -// ofacSanctionChecker checks whether a Registered Domain is present on the the XML sanctions list -// (https://www.treasury.gov/ofac/downloads/sdn.xml). -// It is disabled by default, and must be enabled with the environment variable LETSDEBUG_ENABLE_OFAC=1 -type ofacSanctionChecker struct { - muRefresh sync.RWMutex - domains map[string]struct{} -} - -func (c *ofacSanctionChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" { - return nil, errNotApplicable - } - c.muRefresh.RLock() - defer c.muRefresh.RUnlock() - - rd, _ := publicsuffix.EffectiveTLDPlusOne(domain) - for sanctionedRD := range c.domains { - if rd != sanctionedRD { - continue - } - - return []Problem{{ - Name: "SanctionedDomain", - Explanation: fmt.Sprintf("The Registered Domain %s was found on the United States' OFAC "+ - "Specially Designated Nationals and Blocked Persons (SDN) List. Let's Encrypt are unable to issue certificates "+ - "for sanctioned entities. Search on https://sanctionssearch.ofac.treas.gov/ for futher details.", sanctionedRD), - Severity: SeverityError, - }}, nil - } - - return nil, nil -} - -func (c *ofacSanctionChecker) setup() { - if os.Getenv("LETSDEBUG_ENABLE_OFAC") != "1" { - return - } - c.domains = map[string]struct{}{} - go func() { - for { - if err := c.poll(); err != nil { - fmt.Printf("OFAC SDN poller failed: %v\n", err) - } - time.Sleep(24 * time.Hour) - } - }() -} - -func (c *ofacSanctionChecker) poll() error { - req, _ := http.NewRequest(http.MethodGet, "https://www.treasury.gov/ofac/downloads/sdn.xml", nil) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - req = req.WithContext(ctx) - req.Header.Set("User-Agent", "Let's Debug (https://letsdebug.net)") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - dec := xml.NewDecoder(resp.Body) - - registeredDomains := map[string]struct{}{} - isID := false - for { - tok, _ := dec.Token() - if tok == nil { - break - } - - switch el := tok.(type) { - case xml.StartElement: - if el.Name.Local == "id" { - isID = true - break - } - if el.Name.Local == "idType" { - next, _ := dec.Token() - if next == nil { - break - } - raw, ok := next.(xml.CharData) - if !ok { - break - } - if string(raw) != "Website" { - isID = false - break - } - break - } - if el.Name.Local == "idNumber" && isID { - next, _ := dec.Token() - if next == nil { - break - } - raw, ok := next.(xml.CharData) - if !ok { - break - } - if rd := c.extractRegisteredDomain(string(raw)); rd != "" { - registeredDomains[rd] = struct{}{} - } - } - case xml.EndElement: - if el.Name.Local == "id" { - isID = false - break - } - } - } - - c.muRefresh.Lock() - defer c.muRefresh.Unlock() - - c.domains = registeredDomains - - return nil -} - -func (c *ofacSanctionChecker) extractRegisteredDomain(d string) string { - d = strings.ToLower(strings.TrimSpace(d)) - if len(d) == 0 { - return "" - } - // If there's a protocol or path, then we need to parse the URL and extract the host - if strings.Contains(d, "/") { - u, err := url.Parse(d) - if err != nil { - return "" - } - d = u.Host - } - d, _ = publicsuffix.EffectiveTLDPlusOne(d) - return d -} diff --git a/vendor/github.com/letsdebug/letsdebug/http01.go b/vendor/github.com/letsdebug/letsdebug/http01.go deleted file mode 100644 index 01c65c03..00000000 --- a/vendor/github.com/letsdebug/letsdebug/http01.go +++ /dev/null @@ -1,268 +0,0 @@ -package letsdebug - -import ( - "bytes" - "fmt" - "net" - "strings" - "sync" - - "github.com/miekg/dns" -) - -var ( - likelyModemRouters = []string{"micro_httpd", "cisco-IOS", "LANCOM", "Mini web server 1.0 ZTE corp 2005."} - isLikelyNginxTestcookiePayloads = [][]byte{ - []byte(`src="/aes.js"`), - []byte(`src="/aes.min.js"`), - []byte(`var a=toNumbers`)} - isHTTP497Payloads = [][]byte{ - // httpd: https://github.com/apache/httpd/blob/e820d1ea4d3f1f5152574dbaa13979887a5c14b7/modules/ssl/ssl_engine_kernel.c#L322 - []byte("You're speaking plain HTTP to an SSL-enabled server port"), - // nginx: https://github.com/nginx/nginx/blob/15544440425008d5ad39a295b826665ad56fdc90/src/http/ngx_http_special_response.c#L274 - []byte("400 The plain HTTP request was sent to HTTPS port"), - } -) - -// dnsAChecker checks if there are any issues in Unbound looking up the A and -// AAAA records for a domain (such as DNSSEC issues or dead nameservers) -type dnsAChecker struct{} - -func (c dnsAChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if method != HTTP01 { - return nil, errNotApplicable - } - - var probs []Problem - var aRRs, aaaaRRs []dns.RR - var aErr, aaaaErr error - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - aaaaRRs, aaaaErr = ctx.Lookup(domain, dns.TypeAAAA) - }() - - go func() { - defer wg.Done() - aRRs, aErr = ctx.Lookup(domain, dns.TypeA) - }() - - wg.Wait() - - if aErr != nil { - probs = append(probs, dnsLookupFailed(domain, "A", aErr)) - } - if aaaaErr != nil { - probs = append(probs, dnsLookupFailed(domain, "AAAA", aaaaErr)) - } - - for _, rr := range aRRs { - if aRR, ok := rr.(*dns.A); ok && isAddressReserved(aRR.A) { - probs = append(probs, reservedAddress(domain, aRR.A.String())) - } - } - for _, rr := range aaaaRRs { - if aaaaRR, ok := rr.(*dns.AAAA); ok && isAddressReserved(aaaaRR.AAAA) { - probs = append(probs, reservedAddress(domain, aaaaRR.AAAA.String())) - } - } - - var sb []string - for _, rr := range append(aRRs, aaaaRRs...) { - sb = append(sb, rr.String()) - } - - if len(sb) > 0 { - probs = append(probs, debugProblem("HTTPRecords", "A and AAAA records found for this domain", strings.Join(sb, "\n"))) - } - - if len(sb) == 0 { - probs = append(probs, noRecords(domain, "No A or AAAA records found.")) - } - - return probs, nil -} - -// httpAccessibilityChecker checks whether an HTTP ACME validation request -// would lead to any issues such as: -// - Bad redirects -// - IPs not listening on port 80 -type httpAccessibilityChecker struct{} - -func (c httpAccessibilityChecker) Check(ctx *scanContext, domain string, method ValidationMethod) ([]Problem, error) { - if method != HTTP01 { - return nil, errNotApplicable - } - - var probs []Problem - - var ips []net.IP - - rrs, _ := ctx.Lookup(domain, dns.TypeAAAA) - for _, rr := range rrs { - aaaa, ok := rr.(*dns.AAAA) - if !ok { - continue - } - ips = append(ips, aaaa.AAAA) - } - rrs, _ = ctx.Lookup(domain, dns.TypeA) - for _, rr := range rrs { - a, ok := rr.(*dns.A) - if !ok { - continue - } - ips = append(ips, a.A) - } - - if len(ips) == 0 { - return probs, nil - } - - // Track whether responses differ between any of the A/AAAA addresses - // for the domain - allCheckResults := []httpCheckResult{} - - var debug []string - - for _, ip := range ips { - res, prob := checkHTTP(ctx, domain, ip) - allCheckResults = append(allCheckResults, res) - if !prob.IsZero() { - probs = append(probs, prob) - } - debug = append(debug, fmt.Sprintf("Request to: %s/%s, Result: %s, Issue: %s\nTrace:\n%s\n", - domain, ip.String(), res.String(), prob.Name, strings.Join(res.DialStack, "\n"))) - } - - // Filter out the servers that didn't respond at all - var nonZeroResults []httpCheckResult - for _, v := range allCheckResults { - if v.IsZero() { - continue - } - nonZeroResults = append(nonZeroResults, v) - } - if len(nonZeroResults) > 1 { - firstResult := nonZeroResults[0] - for _, otherResult := range nonZeroResults[1:] { - if firstResult.StatusCode != otherResult.StatusCode || - firstResult.ServerHeader != otherResult.ServerHeader || - firstResult.NumRedirects != otherResult.NumRedirects || - firstResult.InitialStatusCode != otherResult.InitialStatusCode { - probs = append(probs, multipleIPAddressDiscrepancy(domain, firstResult, otherResult)) - } - } - } - - probs = append(probs, debugProblem("HTTPCheck", "Requests made to the domain", strings.Join(debug, "\n"))) - - if res := isLikelyModemRouter(allCheckResults); !res.IsZero() { - probs = append(probs, Problem{ - Name: "PortForwarding", - Explanation: "A request to your domain revealed that the web server that responded may be " + - "the administrative interface of a modem or router. This can indicate an issue with the port forwarding " + - "setup on that modem or router. You may need to reconfigure the device to properly forward traffic to your " + - "intended webserver.", - Detail: fmt.Sprintf(`The web server that responded identified itself as "%s", `+ - "which is a known webserver commonly used by modems/routers.", res.ServerHeader), - Severity: SeverityWarning, - }) - } - - if res := isLikelyNginxTestcookie(allCheckResults); !res.IsZero() { - probs = append(probs, Problem{ - Name: "BlockedByNginxTestCookie", - Explanation: "The validation request to this domain was blocked by a deployment of the nginx " + - "testcookie module (https://github.com/kyprizel/testcookie-nginx-module). This module is designed to " + - "block robots, and causes the Let's Encrypt validation process to fail. The server administrator can " + - "solve this issue by disabling the module (`testcookie off;`) for requests under the path of `/.well-known" + - "/acme-challenge/`.", - Detail: fmt.Sprintf("The server at %s produced this result.", res.IP.String()), - Severity: SeverityError, - }) - } - - if res := isHTTP497(allCheckResults); !res.IsZero() { - probs = append(probs, Problem{ - Name: "HttpOnHttpsPort", - Explanation: "A validation request to this domain resulted in an HTTP request being made to a port that expects " + - "to receive HTTPS requests. This could be the result of an incorrect redirect (such as to http://example.com:443/) " + - "or it could be the result of a webserver misconfiguration, such as trying to enable SSL on a port 80 virtualhost.", - Detail: strings.Join(res.DialStack, "\n"), - Severity: SeverityError, - }) - } - - return probs, nil -} - -func noRecords(name, rrSummary string) Problem { - return Problem{ - Name: "NoRecords", - Explanation: fmt.Sprintf(`No valid A or AAAA records could be ultimately resolved for %s. `+ - `This means that Let's Encrypt would not be able to to connect to your domain to perform HTTP validation, since `+ - `it would not know where to connect to.`, name), - Detail: rrSummary, - Severity: SeverityFatal, - } -} - -func reservedAddress(name, address string) Problem { - return Problem{ - Name: "ReservedAddress", - Explanation: fmt.Sprintf(`A private, inaccessible, IANA/IETF-reserved IP address was found for %s. Let's Encrypt will always fail HTTP validation `+ - `for any domain that is pointing to an address that is not routable on the internet. You should either remove this address `+ - `and replace it with a public one or use the DNS validation method instead.`, name), - Detail: address, - Severity: SeverityFatal, - } -} - -func multipleIPAddressDiscrepancy(domain string, result1, result2 httpCheckResult) Problem { - return Problem{ - Name: "MultipleIPAddressDiscrepancy", - Explanation: fmt.Sprintf(`%s has multiple IP addresses in its DNS records. While they appear to be accessible on the network, `+ - `we have detected that they produce differing results when sent an ACME HTTP validation request. This may indicate that `+ - `some of the IP addresses may unintentionally point to different servers, which would cause validation to fail.`, - domain), - Detail: fmt.Sprintf("%s vs %s", result1.String(), result2.String()), - Severity: SeverityWarning, - } -} - -func isLikelyModemRouter(results []httpCheckResult) httpCheckResult { - for _, res := range results { - for _, toMatch := range likelyModemRouters { - if res.ServerHeader == toMatch { - return res - } - } - } - return httpCheckResult{} -} - -func isLikelyNginxTestcookie(results []httpCheckResult) httpCheckResult { - for _, res := range results { - for _, needle := range isLikelyNginxTestcookiePayloads { - if bytes.Contains(res.Content, needle) { - return res - } - } - } - return httpCheckResult{} -} - -func isHTTP497(results []httpCheckResult) httpCheckResult { - for _, res := range results { - for _, needle := range isHTTP497Payloads { - if bytes.Contains(res.Content, needle) { - return res - } - } - } - return httpCheckResult{} -} diff --git a/vendor/github.com/letsdebug/letsdebug/http_util.go b/vendor/github.com/letsdebug/letsdebug/http_util.go deleted file mode 100644 index 75345c4f..00000000 --- a/vendor/github.com/letsdebug/letsdebug/http_util.go +++ /dev/null @@ -1,310 +0,0 @@ -package letsdebug - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - httpTimeout = 10 -) - -type redirectError string - -func (e redirectError) Error() string { - return string(e) -} - -type httpCheckResult struct { - StatusCode int - ServerHeader string - IP net.IP - InitialStatusCode int - NumRedirects int - FirstDial time.Time - DialStack []string - Content []byte -} - -func (r *httpCheckResult) Trace(s string) { - if r.FirstDial.IsZero() { - r.FirstDial = time.Now() - } - r.DialStack = append(r.DialStack, - fmt.Sprintf("@%dms: %s", time.Since(r.FirstDial).Nanoseconds()/1e6, s)) -} - -func (r httpCheckResult) IsZero() bool { - return r.StatusCode == 0 -} - -func (r httpCheckResult) String() string { - addrType := "IPv6" - if r.IP.To4() != nil { - addrType = "IPv4" - } - - lines := []string{ - "Address=" + r.IP.String(), - "Address Type=" + addrType, - "Server=" + r.ServerHeader, - "HTTP Status=" + strconv.Itoa(r.InitialStatusCode), - } - if r.NumRedirects > 0 { - lines = append(lines, "Number of Redirects="+strconv.Itoa(r.NumRedirects)) - lines = append(lines, "Final HTTP Status="+strconv.Itoa(r.StatusCode)) - } - - return fmt.Sprintf("[%s]", strings.Join(lines, ",")) -} - -type checkHTTPTransport struct { - transport http.RoundTripper - result *httpCheckResult -} - -func (t checkHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { - resp, err := t.transport.RoundTrip(req) - - if t.result != nil && err != nil { - t.result.Trace(fmt.Sprintf("Experienced error: %v", err)) - } - - if t.result != nil && resp != nil { - if t.result.InitialStatusCode == 0 { - t.result.InitialStatusCode = resp.StatusCode - } - - t.result.Trace(fmt.Sprintf("Server response: HTTP %s", resp.Status)) - } - - return resp, err -} - -func makeSingleShotHTTPTransport() *http.Transport { - return &http.Transport{ - // Boulder VA's HTTP transport settings - // https://github.com/letsencrypt/boulder/blob/387e94407c58fe0ff65207a89304776ee7417410/va/http.go#L143-L160 - DisableKeepAlives: true, - IdleConnTimeout: time.Second, - TLSHandshakeTimeout: 10 * time.Second, - MaxIdleConns: 1, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } -} - -func checkHTTP(scanCtx *scanContext, domain string, address net.IP) (httpCheckResult, Problem) { - dialer := net.Dialer{ - Timeout: httpTimeout * time.Second, - } - - checkRes := &httpCheckResult{ - IP: address, - DialStack: []string{}, - } - - var redirErr redirectError - - baseHTTPTransport := makeSingleShotHTTPTransport() - baseHTTPTransport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - host, port, _ := net.SplitHostPort(addr) - host = normalizeFqdn(host) - - dialFunc := func(ip net.IP, port string) (net.Conn, error) { - checkRes.Trace(fmt.Sprintf("Dialing %s", ip.String())) - if ip.To4() == nil { - return dialer.DialContext(ctx, "tcp", "["+ip.String()+"]:"+port) - } - return dialer.DialContext(ctx, "tcp", ip.String()+":"+port) - } - - // Only override the address for this specific domain. - // We don't want to mangle redirects. - if host == domain { - return dialFunc(address, port) - } - - // For other hosts, we need to use Unbound to resolve the name - otherAddr, err := scanCtx.LookupRandomHTTPRecord(host) - if err != nil { - return nil, err - } - - return dialFunc(otherAddr, port) - } - - cl := http.Client{ - Transport: checkHTTPTransport{ - result: checkRes, - transport: baseHTTPTransport, - }, - // boulder: va.go fetchHTTP - CheckRedirect: func(req *http.Request, via []*http.Request) error { - checkRes.NumRedirects++ - - if len(via) >= 10 { - redirErr = redirectError(fmt.Sprintf("Too many (%d) redirects, last redirect was to: %s", len(via), req.URL.String())) - return redirErr - } - - checkRes.Trace(fmt.Sprintf("Received redirect to %s", req.URL.String())) - - host := req.URL.Host - if _, p, err := net.SplitHostPort(host); err == nil { - if port, _ := strconv.Atoi(p); port != 80 && port != 443 { - redirErr = redirectError(fmt.Sprintf("Bad port number provided when fetching %s: %s", req.URL.String(), p)) - return redirErr - } - } - - scheme := strings.ToLower(req.URL.Scheme) - if scheme != "http" && scheme != "https" { - redirErr = redirectError(fmt.Sprintf("Bad scheme provided when fetching %s: %s", req.URL.String(), scheme)) - return redirErr - } - - // Also check for domain.tld.well-known/acme-challenge - if strings.HasSuffix(req.URL.Hostname(), ".well-known") { - redirErr = redirectError(fmt.Sprintf("It appears that a redirect was generated by your web server that is missing a trailing "+ - "slash after your domain name: %v. Check your web server configuration and .htaccess for Redirect/RedirectMatch/RewriteRule.", - req.URL.String())) - return redirErr - } - - return nil - }, - } - - reqURL := "http://" + domain + "/.well-known/acme-challenge/" + scanCtx.httpRequestPath - checkRes.Trace(fmt.Sprintf("Making a request to %s (using initial IP %s)", reqURL, address)) - - req, err := http.NewRequest("GET", reqURL, nil) - if err != nil { - return *checkRes, internalProblem(fmt.Sprintf("Failed to construct validation request: %v", err), SeverityError) - } - - req.Header.Set("Accept", "*/*") - req.Header.Set("User-Agent", "Mozilla/5.0 (compatible; Let's Debug emulating Let's Encrypt validation server; +https://letsdebug.net)") - - ctx, cancel := context.WithTimeout(context.Background(), httpTimeout*time.Second) - defer cancel() - - req = req.WithContext(ctx) - - resp, err := cl.Do(req) - if resp != nil { - checkRes.StatusCode = resp.StatusCode - checkRes.ServerHeader = resp.Header.Get("Server") - } - if err != nil { - if redirErr != "" { - err = redirErr - } - return *checkRes, translateHTTPError(domain, address, err, checkRes.DialStack) - } - - defer resp.Body.Close() - - maxLen := 1024 - if l := len(scanCtx.httpExpectResponse) + 2; l > maxLen { - maxLen = l - } - r := io.LimitReader(resp.Body, int64(maxLen)) - - buf, err := ioutil.ReadAll(r) - checkRes.Content = buf - - // If we expect a certain response, check for it - if scanCtx.httpExpectResponse != "" { - if err != nil { - return *checkRes, translateHTTPError(domain, address, - fmt.Errorf(`This test expected the server to respond with "%s" but instead we experienced an error reading the response: %v`, - scanCtx.httpExpectResponse, err), - checkRes.DialStack) - } else if respStr := string(buf); respStr != scanCtx.httpExpectResponse { - return *checkRes, translateHTTPError(domain, address, - fmt.Errorf(`This test expected the server to respond with "%s" but instead we got a response beginning with "%s"`, - scanCtx.httpExpectResponse, respStr), - checkRes.DialStack) - } - } - - return *checkRes, Problem{} -} - -func translateHTTPError(domain string, address net.IP, e error, dialStack []string) Problem { - if redirErr, ok := e.(redirectError); ok { - return badRedirect(domain, redirErr, dialStack) - } - - if strings.HasSuffix(e.Error(), "http: server gave HTTP response to HTTPS client") { - return httpServerMisconfiguration(domain, "Web server is serving the wrong protocol on the wrong port: "+e.Error()+ - ". This may be due to a previous HTTP redirect rather than a webserver misconfiguration.\n\nTrace:\n"+strings.Join(dialStack, "\n")) - } - - // Make a nicer error message if it was a context timeout - if urlErr, ok := e.(*url.Error); ok && urlErr.Timeout() { - e = fmt.Errorf("A timeout was experienced while communicating with %s/%s: %v", - domain, address.String(), urlErr) - } - - if address.To4() == nil { - return aaaaNotWorking(domain, address.String(), e, dialStack) - } else { - return aNotWorking(domain, address.String(), e, dialStack) - } -} - -func httpServerMisconfiguration(domain, detail string) Problem { - return Problem{ - Name: "WebserverMisconfiguration", - Explanation: fmt.Sprintf(`%s's webserver may be misconfigured.`, domain), - Detail: detail, - Severity: SeverityError, - } -} - -func aaaaNotWorking(domain, ipv6Address string, err error, dialStack []string) Problem { - return Problem{ - Name: "AAAANotWorking", - Explanation: fmt.Sprintf(`%s has an AAAA (IPv6) record (%s) but a test request to this address over port 80 did not succeed. `+ - `Your web server must have at least one working IPv4 or IPv6 address. `+ - `You should either ensure that validation requests to this domain succeed over IPv6, or remove its AAAA record.`, - domain, ipv6Address), - Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), - Severity: SeverityError, - } -} - -func aNotWorking(domain, addr string, err error, dialStack []string) Problem { - return Problem{ - Name: "ANotWorking", - Explanation: fmt.Sprintf(`%s has an A (IPv4) record (%s) but a request to this address over port 80 did not succeed. `+ - `Your web server must have at least one working IPv4 or IPv6 address.`, - domain, addr), - Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), - Severity: SeverityError, - } -} - -func badRedirect(domain string, err error, dialStack []string) Problem { - return Problem{ - Name: "BadRedirect", - Explanation: fmt.Sprintf(`Sending an ACME HTTP validation request to %s results in an unacceptable redirect. `+ - `This is most likely a misconfiguration of your web server or your web application.`, - domain), - Detail: fmt.Sprintf("%s\n\nTrace:\n%s", err.Error(), strings.Join(dialStack, "\n")), - Severity: SeverityError, - } -} diff --git a/vendor/github.com/letsdebug/letsdebug/letsdebug.go b/vendor/github.com/letsdebug/letsdebug/letsdebug.go deleted file mode 100644 index e63dd480..00000000 --- a/vendor/github.com/letsdebug/letsdebug/letsdebug.go +++ /dev/null @@ -1,85 +0,0 @@ -// Package letsdebug provides an library, web API and CLI to provide diagnostic -// information for why a particular (FQDN, ACME Validation Method) pair *may* fail -// when attempting to issue an SSL Certificate from Let's Encrypt (https://letsencrypt.org). -// -// The usage cannot be generalized to other ACME providers, as the policies checked by this package -// are specific to Let's Encrypt, rather than being mandated by the ACME protocol. -// -// This package relies on libunbound. -package letsdebug - -import ( - "fmt" - "os" - "reflect" - "time" -) - -// Options provide additional configuration to the various checkers -type Options struct { - // HTTPRequestPath alters the /.well-known/acme-challenge/letsdebug-test to - // /acme-challenge/acme-challenge/{{ HTTPRequestPath }} - HTTPRequestPath string - // HTTPExpectResponse causes the HTTP checker to require the remote server to - // respond with specific content. If the content does not match, then the test - // will fail with severity Error. - HTTPExpectResponse string -} - -// Check calls CheckWithOptions with default options -func Check(domain string, method ValidationMethod) (probs []Problem, retErr error) { - return CheckWithOptions(domain, method, Options{}) -} - -// CheckWithOptions will run each checker against the domain and validation method provided. -// It is expected that this method may take a long time to execute, and may not be cancelled. -func CheckWithOptions(domain string, method ValidationMethod, opts Options) (probs []Problem, retErr error) { - defer func() { - if r := recover(); r != nil { - retErr = fmt.Errorf("panic: %v", r) - } - }() - - ctx := newScanContext() - if opts.HTTPRequestPath != "" { - ctx.httpRequestPath = opts.HTTPRequestPath - } - if opts.HTTPExpectResponse != "" { - ctx.httpExpectResponse = opts.HTTPExpectResponse - } - - domain = normalizeFqdn(domain) - - for _, checker := range checkers { - t := reflect.TypeOf(checker) - debug("[*] + %v\n", t) - start := time.Now() - checkerProbs, err := checker.Check(ctx, domain, method) - debug("[*] - %v in %v\n", t, time.Since(start)) - if err == nil { - if len(checkerProbs) > 0 { - probs = append(probs, checkerProbs...) - } - // dont continue checking when a fatal error occurs - if hasFatalProblem(probs) { - break - } - } else if err != errNotApplicable { - return nil, err - } - } - return probs, nil -} - -var isDebug *bool - -func debug(format string, args ...interface{}) { - if isDebug == nil { - d := os.Getenv("LETSDEBUG_DEBUG") != "" - isDebug = &d - } - if !(*isDebug) { - return - } - fmt.Fprintf(os.Stderr, format, args...) -} diff --git a/vendor/github.com/letsdebug/letsdebug/problem.go b/vendor/github.com/letsdebug/letsdebug/problem.go deleted file mode 100644 index 1a2689f0..00000000 --- a/vendor/github.com/letsdebug/letsdebug/problem.go +++ /dev/null @@ -1,75 +0,0 @@ -package letsdebug - -import ( - "fmt" - "strings" -) - -// SeverityLevel represents the priority of a reported problem -type SeverityLevel string - -// Problem represents an issue found by one of the checkers in this package. -// Explanation is a human-readable explanation of the issue. -// Detail is usually the underlying machine error. -type Problem struct { - Name string `json:"name"` - Explanation string `json:"explanation"` - Detail string `json:"detail"` - Severity SeverityLevel `json:"severity"` -} - -const ( - SeverityFatal SeverityLevel = "Fatal" // Represents a fatal error which will stop any further checks - SeverityError SeverityLevel = "Error" - SeverityWarning SeverityLevel = "Warning" - SeverityDebug SeverityLevel = "Debug" // Not to be shown by default -) - -func (p Problem) String() string { - return fmt.Sprintf("[%s] %s: %s", p.Name, p.Explanation, p.Detail) -} - -func (p Problem) IsZero() bool { - return p.Name == "" -} - -func (p Problem) DetailLines() []string { - return strings.Split(p.Detail, "\n") -} - -func hasFatalProblem(probs []Problem) bool { - for _, p := range probs { - if p.Severity == SeverityFatal { - return true - } - } - - return false -} - -func internalProblem(message string, level SeverityLevel) Problem { - return Problem{ - Name: "InternalProblem", - Explanation: "An internal error occurred while checking the domain", - Detail: message, - Severity: level, - } -} - -func dnsLookupFailed(name, rrType string, err error) Problem { - return Problem{ - Name: "DNSLookupFailed", - Explanation: fmt.Sprintf(`A fatal issue occurred during the DNS lookup process for %s/%s.`, name, rrType), - Detail: err.Error(), - Severity: SeverityFatal, - } -} - -func debugProblem(name, message, detail string) Problem { - return Problem{ - Name: name, - Explanation: message, - Detail: detail, - Severity: SeverityDebug, - } -} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore deleted file mode 100644 index 0f1d00e1..00000000 --- a/vendor/github.com/lib/pq/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.db -*.test -*~ -*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh deleted file mode 100644 index ebf44703..00000000 --- a/vendor/github.com/lib/pq/.travis.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -set -eu - -client_configure() { - sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key -} - -pgdg_repository() { - local sourcelist='sources.list.d/postgresql.list' - - curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - - echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" - sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update -} - -postgresql_configure() { - sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config - local all all trust - hostnossl all pqgossltest 127.0.0.1/32 reject - hostnossl all pqgosslcert 127.0.0.1/32 reject - hostssl all pqgossltest 127.0.0.1/32 trust - hostssl all pqgosslcert 127.0.0.1/32 cert - host all all 127.0.0.1/32 trust - hostnossl all pqgossltest ::1/128 reject - hostnossl all pqgosslcert ::1/128 reject - hostssl all pqgossltest ::1/128 trust - hostssl all pqgosslcert ::1/128 cert - host all all ::1/128 trust - config - - xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates - certs/root.crt - certs/server.crt - certs/server.key - certificates - - sort -VCu <<-versions || - $PGVERSION - 9.2 - versions - sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config - ssl_ca_file = 'root.crt' - ssl_cert_file = 'server.crt' - ssl_key_file = 'server.key' - config - - echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null - - sudo service postgresql restart -} - -postgresql_install() { - xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages - postgresql-$PGVERSION - postgresql-server-dev-$PGVERSION - postgresql-contrib-$PGVERSION - packages -} - -postgresql_uninstall() { - sudo service postgresql stop - xargs sudo apt-get -y --purge remove <<-packages - libpq-dev - libpq5 - postgresql - postgresql-client-common - postgresql-common - packages - sudo rm -rf /var/lib/postgresql -} - -$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml deleted file mode 100644 index 3498c53d..00000000 --- a/vendor/github.com/lib/pq/.travis.yml +++ /dev/null @@ -1,44 +0,0 @@ -language: go - -go: - - 1.13.x - - 1.14.x - - master - -sudo: true - -env: - global: - - PGUSER=postgres - - PQGOSSLTESTS=1 - - PQSSLCERTTEST_PATH=$PWD/certs - - PGHOST=127.0.0.1 - matrix: - - PGVERSION=10 - - PGVERSION=9.6 - - PGVERSION=9.5 - - PGVERSION=9.4 - -before_install: - - ./.travis.sh postgresql_uninstall - - ./.travis.sh pgdg_repository - - ./.travis.sh postgresql_install - - ./.travis.sh postgresql_configure - - ./.travis.sh client_configure - - go get golang.org/x/tools/cmd/goimports - - go get golang.org/x/lint/golint - - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3 - -before_script: - - createdb pqgotest - - createuser -DRS pqgossltest - - createuser -DRS pqgosslcert - -script: - - > - goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' - - go vet ./... - - staticcheck -go 1.13 ./... - - golint ./... - - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... - - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md deleted file mode 100644 index 5773904a..00000000 --- a/vendor/github.com/lib/pq/LICENSE.md +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2011-2013, 'pq' Contributors -Portions Copyright (C) 2011 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md deleted file mode 100644 index c972a86a..00000000 --- a/vendor/github.com/lib/pq/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# pq - A pure Go postgres driver for Go's database/sql package - -[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) - -## Install - - go get github.com/lib/pq - -## Features - -* SSL -* Handles bad connections for `database/sql` -* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) -* Scan binary blobs correctly (i.e. `bytea`) -* Package for `hstore` support -* COPY FROM support -* pq.ParseURL for converting urls to connection strings for sql.Open. -* Many libpq compatible environment variables -* Unix socket support -* Notifications: `LISTEN`/`NOTIFY` -* pgpass support -* GSS (Kerberos) auth - -## Tests - -`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. - -## Status - -This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md deleted file mode 100644 index f0502111..00000000 --- a/vendor/github.com/lib/pq/TESTS.md +++ /dev/null @@ -1,33 +0,0 @@ -# Tests - -## Running Tests - -`go test` is used for testing. A running PostgreSQL -server is required, with the ability to log in. The -database to connect to test with is "pqgotest," on -"localhost" but these can be overridden using [environment -variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). - -Example: - - PGHOST=/run/postgresql go test - -## Benchmarks - -A benchmark suite can be run as part of the tests: - - go test -bench . - -## Example setup (Docker) - -Run a postgres container: - -``` -docker run --expose 5432:5432 postgres -``` - -Run tests: - -``` -PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test -``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go deleted file mode 100644 index e4933e22..00000000 --- a/vendor/github.com/lib/pq/array.go +++ /dev/null @@ -1,756 +0,0 @@ -package pq - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding/hex" - "fmt" - "reflect" - "strconv" - "strings" -) - -var typeByteSlice = reflect.TypeOf([]byte{}) -var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() - -// Array returns the optimal driver.Valuer and sql.Scanner for an array or -// slice of any dimension. -// -// For example: -// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) -// -// var x []sql.NullInt64 -// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) -// -// Scanning multi-dimensional arrays is not supported. Arrays where the lower -// bound is not one (such as `[0:0]={1}') are not supported. -func Array(a interface{}) interface { - driver.Valuer - sql.Scanner -} { - switch a := a.(type) { - case []bool: - return (*BoolArray)(&a) - case []float64: - return (*Float64Array)(&a) - case []int64: - return (*Int64Array)(&a) - case []string: - return (*StringArray)(&a) - - case *[]bool: - return (*BoolArray)(a) - case *[]float64: - return (*Float64Array)(a) - case *[]int64: - return (*Int64Array)(a) - case *[]string: - return (*StringArray)(a) - } - - return GenericArray{a} -} - -// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner -// to override the array delimiter used by GenericArray. -type ArrayDelimiter interface { - // ArrayDelimiter returns the delimiter character(s) for this element's type. - ArrayDelimiter() string -} - -// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. -type BoolArray []bool - -// Scan implements the sql.Scanner interface. -func (a *BoolArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to BoolArray", src) -} - -func (a *BoolArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "BoolArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(BoolArray, len(elems)) - for i, v := range elems { - if len(v) != 1 { - return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) - } - switch v[0] { - case 't': - b[i] = true - case 'f': - b[i] = false - default: - return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a BoolArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be exactly two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1+2*n) - - for i := 0; i < n; i++ { - b[2*i] = ',' - if a[i] { - b[1+2*i] = 't' - } else { - b[1+2*i] = 'f' - } - } - - b[0] = '{' - b[2*n] = '}' - - return string(b), nil - } - - return "{}", nil -} - -// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. -type ByteaArray [][]byte - -// Scan implements the sql.Scanner interface. -func (a *ByteaArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) -} - -func (a *ByteaArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(ByteaArray, len(elems)) - for i, v := range elems { - b[i], err = parseBytea(v) - if err != nil { - return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. It uses the "hex" format which -// is only supported on PostgreSQL 9.0 or newer. -func (a ByteaArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, 2*N bytes of quotes, - // 3*N bytes of hex formatting, and N-1 bytes of delimiters. - size := 1 + 6*n - for _, x := range a { - size += hex.EncodedLen(len(x)) - } - - b := make([]byte, size) - - for i, s := 0, b; i < n; i++ { - o := copy(s, `,"\\x`) - o += hex.Encode(s[o:], a[i]) - s[o] = '"' - s = s[o+1:] - } - - b[0] = '{' - b[size-1] = '}' - - return string(b), nil - } - - return "{}", nil -} - -// Float64Array represents a one-dimensional array of the PostgreSQL double -// precision type. -type Float64Array []float64 - -// Scan implements the sql.Scanner interface. -func (a *Float64Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Float64Array", src) -} - -func (a *Float64Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Float64Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Float64Array, len(elems)) - for i, v := range elems { - if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Float64Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendFloat(b, a[0], 'f', -1, 64) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendFloat(b, a[i], 'f', -1, 64) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// GenericArray implements the driver.Valuer and sql.Scanner interfaces for -// an array or slice of any dimension. -type GenericArray struct{ A interface{} } - -func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { - var assign func([]byte, reflect.Value) error - var del = "," - - // TODO calculate the assign function for other types - // TODO repeat this section on the element type of arrays or slices (multidimensional) - { - if reflect.PtrTo(rt).Implements(typeSQLScanner) { - // dest is always addressable because it is an element of a slice. - assign = func(src []byte, dest reflect.Value) (err error) { - ss := dest.Addr().Interface().(sql.Scanner) - if src == nil { - err = ss.Scan(nil) - } else { - err = ss.Scan(src) - } - return - } - goto FoundType - } - - assign = func([]byte, reflect.Value) error { - return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) - } - } - -FoundType: - - if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { - del = ad.ArrayDelimiter() - } - - return rt, assign, del -} - -// Scan implements the sql.Scanner interface. -func (a GenericArray) Scan(src interface{}) error { - dpv := reflect.ValueOf(a.A) - switch { - case dpv.Kind() != reflect.Ptr: - return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) - case dpv.IsNil(): - return fmt.Errorf("pq: destination %T is nil", a.A) - } - - dv := dpv.Elem() - switch dv.Kind() { - case reflect.Slice: - case reflect.Array: - default: - return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) - } - - switch src := src.(type) { - case []byte: - return a.scanBytes(src, dv) - case string: - return a.scanBytes([]byte(src), dv) - case nil: - if dv.Kind() == reflect.Slice { - dv.Set(reflect.Zero(dv.Type())) - return nil - } - } - - return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) -} - -func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { - dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) - dims, elems, err := parseArray(src, []byte(del)) - if err != nil { - return err - } - - // TODO allow multidimensional - - if len(dims) > 1 { - return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", - strings.Replace(fmt.Sprint(dims), " ", "][", -1)) - } - - // Treat a zero-dimensional array like an array with a single dimension of zero. - if len(dims) == 0 { - dims = append(dims, 0) - } - - for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { - switch rt.Kind() { - case reflect.Slice: - case reflect.Array: - if rt.Len() != dims[i] { - return fmt.Errorf("pq: cannot convert ARRAY%s to %s", - strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) - } - default: - // TODO handle multidimensional - } - } - - values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) - for i, e := range elems { - if err := assign(e, values.Index(i)); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - - // TODO handle multidimensional - - switch dv.Kind() { - case reflect.Slice: - dv.Set(values.Slice(0, dims[0])) - case reflect.Array: - for i := 0; i < dims[0]; i++ { - dv.Index(i).Set(values.Index(i)) - } - } - - return nil -} - -// Value implements the driver.Valuer interface. -func (a GenericArray) Value() (driver.Value, error) { - if a.A == nil { - return nil, nil - } - - rv := reflect.ValueOf(a.A) - - switch rv.Kind() { - case reflect.Slice: - if rv.IsNil() { - return nil, nil - } - case reflect.Array: - default: - return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) - } - - if n := rv.Len(); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 0, 1+2*n) - - b, _, err := appendArray(b, rv, n) - return string(b), err - } - - return "{}", nil -} - -// Int64Array represents a one-dimensional array of the PostgreSQL integer types. -type Int64Array []int64 - -// Scan implements the sql.Scanner interface. -func (a *Int64Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Int64Array", src) -} - -func (a *Int64Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Int64Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Int64Array, len(elems)) - for i, v := range elems { - if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Int64Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendInt(b, a[0], 10) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendInt(b, a[i], 10) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// StringArray represents a one-dimensional array of the PostgreSQL character types. -type StringArray []string - -// Scan implements the sql.Scanner interface. -func (a *StringArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to StringArray", src) -} - -func (a *StringArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "StringArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(StringArray, len(elems)) - for i, v := range elems { - if b[i] = string(v); v == nil { - return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a StringArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, 2*N bytes of quotes, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+3*n) - b[0] = '{' - - b = appendArrayQuotedBytes(b, []byte(a[0])) - for i := 1; i < n; i++ { - b = append(b, ',') - b = appendArrayQuotedBytes(b, []byte(a[i])) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// appendArray appends rv to the buffer, returning the extended buffer and -// the delimiter used between elements. -// -// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. -func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { - var del string - var err error - - b = append(b, '{') - - if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { - return b, del, err - } - - for i := 1; i < n; i++ { - b = append(b, del...) - if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { - return b, del, err - } - } - - return append(b, '}'), del, nil -} - -// appendArrayElement appends rv to the buffer, returning the extended buffer -// and the delimiter to use before the next element. -// -// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted -// using driver.DefaultParameterConverter and the resulting []byte or string -// is double-quoted. -// -// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO -func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { - if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { - if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { - if n := rv.Len(); n > 0 { - return appendArray(b, rv, n) - } - - return b, "", nil - } - } - - var del = "," - var err error - var iv interface{} = rv.Interface() - - if ad, ok := iv.(ArrayDelimiter); ok { - del = ad.ArrayDelimiter() - } - - if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { - return b, del, err - } - - switch v := iv.(type) { - case nil: - return append(b, "NULL"...), del, nil - case []byte: - return appendArrayQuotedBytes(b, v), del, nil - case string: - return appendArrayQuotedBytes(b, []byte(v)), del, nil - } - - b, err = appendValue(b, iv) - return b, del, err -} - -func appendArrayQuotedBytes(b, v []byte) []byte { - b = append(b, '"') - for { - i := bytes.IndexAny(v, `"\`) - if i < 0 { - b = append(b, v...) - break - } - if i > 0 { - b = append(b, v[:i]...) - } - b = append(b, '\\', v[i]) - v = v[i+1:] - } - return append(b, '"') -} - -func appendValue(b []byte, v driver.Value) ([]byte, error) { - return append(b, encode(nil, v, 0)...), nil -} - -// parseArray extracts the dimensions and elements of an array represented in -// text format. Only representations emitted by the backend are supported. -// Notably, whitespace around brackets and delimiters is significant, and NULL -// is case-sensitive. -// -// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO -func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { - var depth, i int - - if len(src) < 1 || src[0] != '{' { - return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) - } - -Open: - for i < len(src) { - switch src[i] { - case '{': - depth++ - i++ - case '}': - elems = make([][]byte, 0) - goto Close - default: - break Open - } - } - dims = make([]int, i) - -Element: - for i < len(src) { - switch src[i] { - case '{': - if depth == len(dims) { - break Element - } - depth++ - dims[depth-1] = 0 - i++ - case '"': - var elem = []byte{} - var escape bool - for i++; i < len(src); i++ { - if escape { - elem = append(elem, src[i]) - escape = false - } else { - switch src[i] { - default: - elem = append(elem, src[i]) - case '\\': - escape = true - case '"': - elems = append(elems, elem) - i++ - break Element - } - } - } - default: - for start := i; i < len(src); i++ { - if bytes.HasPrefix(src[i:], del) || src[i] == '}' { - elem := src[start:i] - if len(elem) == 0 { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - if bytes.Equal(elem, []byte("NULL")) { - elem = nil - } - elems = append(elems, elem) - break Element - } - } - } - } - - for i < len(src) { - if bytes.HasPrefix(src[i:], del) && depth > 0 { - dims[depth-1]++ - i += len(del) - goto Element - } else if src[i] == '}' && depth > 0 { - dims[depth-1]++ - depth-- - i++ - } else { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - } - -Close: - for i < len(src) { - if src[i] == '}' && depth > 0 { - depth-- - i++ - } else { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - } - if depth > 0 { - err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) - } - if err == nil { - for _, d := range dims { - if (len(elems) % d) != 0 { - err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") - } - } - } - return -} - -func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { - dims, elems, err := parseArray(src, del) - if err != nil { - return nil, err - } - if len(dims) > 1 { - return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) - } - return elems, err -} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go deleted file mode 100644 index 4b0a0a8f..00000000 --- a/vendor/github.com/lib/pq/buf.go +++ /dev/null @@ -1,91 +0,0 @@ -package pq - -import ( - "bytes" - "encoding/binary" - - "github.com/lib/pq/oid" -) - -type readBuf []byte - -func (b *readBuf) int32() (n int) { - n = int(int32(binary.BigEndian.Uint32(*b))) - *b = (*b)[4:] - return -} - -func (b *readBuf) oid() (n oid.Oid) { - n = oid.Oid(binary.BigEndian.Uint32(*b)) - *b = (*b)[4:] - return -} - -// N.B: this is actually an unsigned 16-bit integer, unlike int32 -func (b *readBuf) int16() (n int) { - n = int(binary.BigEndian.Uint16(*b)) - *b = (*b)[2:] - return -} - -func (b *readBuf) string() string { - i := bytes.IndexByte(*b, 0) - if i < 0 { - errorf("invalid message format; expected string terminator") - } - s := (*b)[:i] - *b = (*b)[i+1:] - return string(s) -} - -func (b *readBuf) next(n int) (v []byte) { - v = (*b)[:n] - *b = (*b)[n:] - return -} - -func (b *readBuf) byte() byte { - return b.next(1)[0] -} - -type writeBuf struct { - buf []byte - pos int -} - -func (b *writeBuf) int32(n int) { - x := make([]byte, 4) - binary.BigEndian.PutUint32(x, uint32(n)) - b.buf = append(b.buf, x...) -} - -func (b *writeBuf) int16(n int) { - x := make([]byte, 2) - binary.BigEndian.PutUint16(x, uint16(n)) - b.buf = append(b.buf, x...) -} - -func (b *writeBuf) string(s string) { - b.buf = append(append(b.buf, s...), '\000') -} - -func (b *writeBuf) byte(c byte) { - b.buf = append(b.buf, c) -} - -func (b *writeBuf) bytes(v []byte) { - b.buf = append(b.buf, v...) -} - -func (b *writeBuf) wrap() []byte { - p := b.buf[b.pos:] - binary.BigEndian.PutUint32(p, uint32(len(p))) - return b.buf -} - -func (b *writeBuf) next(c byte) { - p := b.buf[b.pos:] - binary.BigEndian.PutUint32(p, uint32(len(p))) - b.pos = len(b.buf) + 1 - b.buf = append(b.buf, c, 0, 0, 0, 0) -} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go deleted file mode 100644 index f313c149..00000000 --- a/vendor/github.com/lib/pq/conn.go +++ /dev/null @@ -1,1996 +0,0 @@ -package pq - -import ( - "bufio" - "context" - "crypto/md5" - "crypto/sha256" - "database/sql" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "os" - "os/user" - "path" - "path/filepath" - "strconv" - "strings" - "time" - "unicode" - - "github.com/lib/pq/oid" - "github.com/lib/pq/scram" -) - -// Common error types -var ( - ErrNotSupported = errors.New("pq: Unsupported command") - ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") - ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") - ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") - ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") - - errUnexpectedReady = errors.New("unexpected ReadyForQuery") - errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") - errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") -) - -// Driver is the Postgres database driver. -type Driver struct{} - -// Open opens a new connection to the database. name is a connection string. -// Most users should only use it through database/sql package from the standard -// library. -func (d *Driver) Open(name string) (driver.Conn, error) { - return Open(name) -} - -func init() { - sql.Register("postgres", &Driver{}) -} - -type parameterStatus struct { - // server version in the same format as server_version_num, or 0 if - // unavailable - serverVersion int - - // the current location based on the TimeZone value of the session, if - // available - currentLocation *time.Location -} - -type transactionStatus byte - -const ( - txnStatusIdle transactionStatus = 'I' - txnStatusIdleInTransaction transactionStatus = 'T' - txnStatusInFailedTransaction transactionStatus = 'E' -) - -func (s transactionStatus) String() string { - switch s { - case txnStatusIdle: - return "idle" - case txnStatusIdleInTransaction: - return "idle in transaction" - case txnStatusInFailedTransaction: - return "in a failed transaction" - default: - errorf("unknown transactionStatus %d", s) - } - - panic("not reached") -} - -// Dialer is the dialer interface. It can be used to obtain more control over -// how pq creates network connections. -type Dialer interface { - Dial(network, address string) (net.Conn, error) - DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) -} - -// DialerContext is the context-aware dialer interface. -type DialerContext interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -type defaultDialer struct { - d net.Dialer -} - -func (d defaultDialer) Dial(network, address string) (net.Conn, error) { - return d.d.Dial(network, address) -} -func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - return d.DialContext(ctx, network, address) -} -func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - return d.d.DialContext(ctx, network, address) -} - -type conn struct { - c net.Conn - buf *bufio.Reader - namei int - scratch [512]byte - txnStatus transactionStatus - txnFinish func() - - // Save connection arguments to use during CancelRequest. - dialer Dialer - opts values - - // Cancellation key data for use with CancelRequest messages. - processID int - secretKey int - - parameterStatus parameterStatus - - saveMessageType byte - saveMessageBuffer []byte - - // If true, this connection is bad and all public-facing functions should - // return ErrBadConn. - bad bool - - // If set, this connection should never use the binary format when - // receiving query results from prepared statements. Only provided for - // debugging. - disablePreparedBinaryResult bool - - // Whether to always send []byte parameters over as binary. Enables single - // round-trip mode for non-prepared Query calls. - binaryParameters bool - - // If true this connection is in the middle of a COPY - inCopy bool - - // If not nil, notices will be synchronously sent here - noticeHandler func(*Error) - - // If not nil, notifications will be synchronously sent here - notificationHandler func(*Notification) - - // GSSAPI context - gss GSS -} - -// Handle driver-side settings in parsed connection string. -func (cn *conn) handleDriverSettings(o values) (err error) { - boolSetting := func(key string, val *bool) error { - if value, ok := o[key]; ok { - if value == "yes" { - *val = true - } else if value == "no" { - *val = false - } else { - return fmt.Errorf("unrecognized value %q for %s", value, key) - } - } - return nil - } - - err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) - if err != nil { - return err - } - return boolSetting("binary_parameters", &cn.binaryParameters) -} - -func (cn *conn) handlePgpass(o values) { - // if a password was supplied, do not process .pgpass - if _, ok := o["password"]; ok { - return - } - filename := os.Getenv("PGPASSFILE") - if filename == "" { - // XXX this code doesn't work on Windows where the default filename is - // XXX %APPDATA%\postgresql\pgpass.conf - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - userHome := os.Getenv("HOME") - if userHome == "" { - user, err := user.Current() - if err != nil { - return - } - userHome = user.HomeDir - } - filename = filepath.Join(userHome, ".pgpass") - } - fileinfo, err := os.Stat(filename) - if err != nil { - return - } - mode := fileinfo.Mode() - if mode&(0x77) != 0 { - // XXX should warn about incorrect .pgpass permissions as psql does - return - } - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - scanner := bufio.NewScanner(io.Reader(file)) - hostname := o["host"] - ntw, _ := network(o) - port := o["port"] - db := o["dbname"] - username := o["user"] - // From: https://github.com/tg/pgpass/blob/master/reader.go - getFields := func(s string) []string { - fs := make([]string, 0, 5) - f := make([]rune, 0, len(s)) - - var esc bool - for _, c := range s { - switch { - case esc: - f = append(f, c) - esc = false - case c == '\\': - esc = true - case c == ':': - fs = append(fs, string(f)) - f = f[:0] - default: - f = append(f, c) - } - } - return append(fs, string(f)) - } - for scanner.Scan() { - line := scanner.Text() - if len(line) == 0 || line[0] == '#' { - continue - } - split := getFields(line) - if len(split) != 5 { - continue - } - if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { - o["password"] = split[4] - return - } - } -} - -func (cn *conn) writeBuf(b byte) *writeBuf { - cn.scratch[0] = b - return &writeBuf{ - buf: cn.scratch[:5], - pos: 1, - } -} - -// Open opens a new connection to the database. dsn is a connection string. -// Most users should only use it through database/sql package from the standard -// library. -func Open(dsn string) (_ driver.Conn, err error) { - return DialOpen(defaultDialer{}, dsn) -} - -// DialOpen opens a new connection to the database using a dialer. -func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { - c, err := NewConnector(dsn) - if err != nil { - return nil, err - } - c.dialer = d - return c.open(context.Background()) -} - -func (c *Connector) open(ctx context.Context) (cn *conn, err error) { - // Handle any panics during connection initialization. Note that we - // specifically do *not* want to use errRecover(), as that would turn any - // connection errors into ErrBadConns, hiding the real error message from - // the user. - defer errRecoverNoErrBadConn(&err) - - o := c.opts - - cn = &conn{ - opts: o, - dialer: c.dialer, - } - err = cn.handleDriverSettings(o) - if err != nil { - return nil, err - } - cn.handlePgpass(o) - - cn.c, err = dial(ctx, c.dialer, o) - if err != nil { - return nil, err - } - - err = cn.ssl(o) - if err != nil { - if cn.c != nil { - cn.c.Close() - } - return nil, err - } - - // cn.startup panics on error. Make sure we don't leak cn.c. - panicking := true - defer func() { - if panicking { - cn.c.Close() - } - }() - - cn.buf = bufio.NewReader(cn.c) - cn.startup(o) - - // reset the deadline, in case one was set (see dial) - if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { - err = cn.c.SetDeadline(time.Time{}) - } - panicking = false - return cn, err -} - -func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { - network, address := network(o) - - // Zero or not specified means wait indefinitely. - if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { - seconds, err := strconv.ParseInt(timeout, 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) - } - duration := time.Duration(seconds) * time.Second - - // connect_timeout should apply to the entire connection establishment - // procedure, so we both use a timeout for the TCP connection - // establishment and set a deadline for doing the initial handshake. - // The deadline is then reset after startup() is done. - deadline := time.Now().Add(duration) - var conn net.Conn - if dctx, ok := d.(DialerContext); ok { - ctx, cancel := context.WithTimeout(ctx, duration) - defer cancel() - conn, err = dctx.DialContext(ctx, network, address) - } else { - conn, err = d.DialTimeout(network, address, duration) - } - if err != nil { - return nil, err - } - err = conn.SetDeadline(deadline) - return conn, err - } - if dctx, ok := d.(DialerContext); ok { - return dctx.DialContext(ctx, network, address) - } - return d.Dial(network, address) -} - -func network(o values) (string, string) { - host := o["host"] - - if strings.HasPrefix(host, "/") { - sockPath := path.Join(host, ".s.PGSQL."+o["port"]) - return "unix", sockPath - } - - return "tcp", net.JoinHostPort(host, o["port"]) -} - -type values map[string]string - -// scanner implements a tokenizer for libpq-style option strings. -type scanner struct { - s []rune - i int -} - -// newScanner returns a new scanner initialized with the option string s. -func newScanner(s string) *scanner { - return &scanner{[]rune(s), 0} -} - -// Next returns the next rune. -// It returns 0, false if the end of the text has been reached. -func (s *scanner) Next() (rune, bool) { - if s.i >= len(s.s) { - return 0, false - } - r := s.s[s.i] - s.i++ - return r, true -} - -// SkipSpaces returns the next non-whitespace rune. -// It returns 0, false if the end of the text has been reached. -func (s *scanner) SkipSpaces() (rune, bool) { - r, ok := s.Next() - for unicode.IsSpace(r) && ok { - r, ok = s.Next() - } - return r, ok -} - -// parseOpts parses the options from name and adds them to the values. -// -// The parsing code is based on conninfo_parse from libpq's fe-connect.c -func parseOpts(name string, o values) error { - s := newScanner(name) - - for { - var ( - keyRunes, valRunes []rune - r rune - ok bool - ) - - if r, ok = s.SkipSpaces(); !ok { - break - } - - // Scan the key - for !unicode.IsSpace(r) && r != '=' { - keyRunes = append(keyRunes, r) - if r, ok = s.Next(); !ok { - break - } - } - - // Skip any whitespace if we're not at the = yet - if r != '=' { - r, ok = s.SkipSpaces() - } - - // The current character should be = - if r != '=' || !ok { - return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) - } - - // Skip any whitespace after the = - if r, ok = s.SkipSpaces(); !ok { - // If we reach the end here, the last value is just an empty string as per libpq. - o[string(keyRunes)] = "" - break - } - - if r != '\'' { - for !unicode.IsSpace(r) { - if r == '\\' { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`missing character after backslash`) - } - } - valRunes = append(valRunes, r) - - if r, ok = s.Next(); !ok { - break - } - } - } else { - quote: - for { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`unterminated quoted string literal in connection string`) - } - switch r { - case '\'': - break quote - case '\\': - r, _ = s.Next() - fallthrough - default: - valRunes = append(valRunes, r) - } - } - } - - o[string(keyRunes)] = string(valRunes) - } - - return nil -} - -func (cn *conn) isInTransaction() bool { - return cn.txnStatus == txnStatusIdleInTransaction || - cn.txnStatus == txnStatusInFailedTransaction -} - -func (cn *conn) checkIsInTransaction(intxn bool) { - if cn.isInTransaction() != intxn { - cn.bad = true - errorf("unexpected transaction status %v", cn.txnStatus) - } -} - -func (cn *conn) Begin() (_ driver.Tx, err error) { - return cn.begin("") -} - -func (cn *conn) begin(mode string) (_ driver.Tx, err error) { - if cn.bad { - return nil, driver.ErrBadConn - } - defer cn.errRecover(&err) - - cn.checkIsInTransaction(false) - _, commandTag, err := cn.simpleExec("BEGIN" + mode) - if err != nil { - return nil, err - } - if commandTag != "BEGIN" { - cn.bad = true - return nil, fmt.Errorf("unexpected command tag %s", commandTag) - } - if cn.txnStatus != txnStatusIdleInTransaction { - cn.bad = true - return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) - } - return cn, nil -} - -func (cn *conn) closeTxn() { - if finish := cn.txnFinish; finish != nil { - finish() - } -} - -func (cn *conn) Commit() (err error) { - defer cn.closeTxn() - if cn.bad { - return driver.ErrBadConn - } - defer cn.errRecover(&err) - - cn.checkIsInTransaction(true) - // We don't want the client to think that everything is okay if it tries - // to commit a failed transaction. However, no matter what we return, - // database/sql will release this connection back into the free connection - // pool so we have to abort the current transaction here. Note that you - // would get the same behaviour if you issued a COMMIT in a failed - // transaction, so it's also the least surprising thing to do here. - if cn.txnStatus == txnStatusInFailedTransaction { - if err := cn.rollback(); err != nil { - return err - } - return ErrInFailedTransaction - } - - _, commandTag, err := cn.simpleExec("COMMIT") - if err != nil { - if cn.isInTransaction() { - cn.bad = true - } - return err - } - if commandTag != "COMMIT" { - cn.bad = true - return fmt.Errorf("unexpected command tag %s", commandTag) - } - cn.checkIsInTransaction(false) - return nil -} - -func (cn *conn) Rollback() (err error) { - defer cn.closeTxn() - if cn.bad { - return driver.ErrBadConn - } - defer cn.errRecover(&err) - return cn.rollback() -} - -func (cn *conn) rollback() (err error) { - cn.checkIsInTransaction(true) - _, commandTag, err := cn.simpleExec("ROLLBACK") - if err != nil { - if cn.isInTransaction() { - cn.bad = true - } - return err - } - if commandTag != "ROLLBACK" { - return fmt.Errorf("unexpected command tag %s", commandTag) - } - cn.checkIsInTransaction(false) - return nil -} - -func (cn *conn) gname() string { - cn.namei++ - return strconv.FormatInt(int64(cn.namei), 10) -} - -func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'C': - res, commandTag = cn.parseComplete(r.string()) - case 'Z': - cn.processReadyForQuery(r) - if res == nil && err == nil { - err = errUnexpectedReady - } - // done - return - case 'E': - err = parseError(r) - case 'I': - res = emptyRows - case 'T', 'D': - // ignore any results - default: - cn.bad = true - errorf("unknown response for simple query: %q", t) - } - } -} - -func (cn *conn) simpleQuery(q string) (res *rows, err error) { - defer cn.errRecover(&err) - - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'C', 'I': - // We allow queries which don't return any results through Query as - // well as Exec. We still have to give database/sql a rows object - // the user can close, though, to avoid connections from being - // leaked. A "rows" with done=true works fine for that purpose. - if err != nil { - cn.bad = true - errorf("unexpected message %q in simple query execution", t) - } - if res == nil { - res = &rows{ - cn: cn, - } - } - // Set the result and tag to the last command complete if there wasn't a - // query already run. Although queries usually return from here and cede - // control to Next, a query with zero results does not. - if t == 'C' && res.colNames == nil { - res.result, res.tag = cn.parseComplete(r.string()) - } - res.done = true - case 'Z': - cn.processReadyForQuery(r) - // done - return - case 'E': - res = nil - err = parseError(r) - case 'D': - if res == nil { - cn.bad = true - errorf("unexpected DataRow in simple query execution") - } - // the query didn't fail; kick off to Next - cn.saveMessage(t, r) - return - case 'T': - // res might be non-nil here if we received a previous - // CommandComplete, but that's fine; just overwrite it - res = &rows{cn: cn} - res.rowsHeader = parsePortalRowDescribe(r) - - // To work around a bug in QueryRow in Go 1.2 and earlier, wait - // until the first DataRow has been received. - default: - cn.bad = true - errorf("unknown response for simple query: %q", t) - } - } -} - -type noRows struct{} - -var emptyRows noRows - -var _ driver.Result = noRows{} - -func (noRows) LastInsertId() (int64, error) { - return 0, errNoLastInsertID -} - -func (noRows) RowsAffected() (int64, error) { - return 0, errNoRowsAffected -} - -// Decides which column formats to use for a prepared statement. The input is -// an array of type oids, one element per result column. -func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { - if len(colTyps) == 0 { - return nil, colFmtDataAllText - } - - colFmts = make([]format, len(colTyps)) - if forceText { - return colFmts, colFmtDataAllText - } - - allBinary := true - allText := true - for i, t := range colTyps { - switch t.OID { - // This is the list of types to use binary mode for when receiving them - // through a prepared statement. If a type appears in this list, it - // must also be implemented in binaryDecode in encode.go. - case oid.T_bytea: - fallthrough - case oid.T_int8: - fallthrough - case oid.T_int4: - fallthrough - case oid.T_int2: - fallthrough - case oid.T_uuid: - colFmts[i] = formatBinary - allText = false - - default: - allBinary = false - } - } - - if allBinary { - return colFmts, colFmtDataAllBinary - } else if allText { - return colFmts, colFmtDataAllText - } else { - colFmtData = make([]byte, 2+len(colFmts)*2) - binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) - for i, v := range colFmts { - binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) - } - return colFmts, colFmtData - } -} - -func (cn *conn) prepareTo(q, stmtName string) *stmt { - st := &stmt{cn: cn, name: stmtName} - - b := cn.writeBuf('P') - b.string(st.name) - b.string(q) - b.int16(0) - - b.next('D') - b.byte('S') - b.string(st.name) - - b.next('S') - cn.send(b) - - cn.readParseResponse() - st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() - st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) - cn.readReadyForQuery() - return st -} - -func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { - if cn.bad { - return nil, driver.ErrBadConn - } - defer cn.errRecover(&err) - - if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { - s, err := cn.prepareCopyIn(q) - if err == nil { - cn.inCopy = true - } - return s, err - } - return cn.prepareTo(q, cn.gname()), nil -} - -func (cn *conn) Close() (err error) { - // Skip cn.bad return here because we always want to close a connection. - defer cn.errRecover(&err) - - // Ensure that cn.c.Close is always run. Since error handling is done with - // panics and cn.errRecover, the Close must be in a defer. - defer func() { - cerr := cn.c.Close() - if err == nil { - err = cerr - } - }() - - // Don't go through send(); ListenerConn relies on us not scribbling on the - // scratch buffer of this connection. - return cn.sendSimpleMessage('X') -} - -// Implement the "Queryer" interface -func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { - return cn.query(query, args) -} - -func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { - if cn.bad { - return nil, driver.ErrBadConn - } - if cn.inCopy { - return nil, errCopyInProgress - } - defer cn.errRecover(&err) - - // Check to see if we can use the "simpleQuery" interface, which is - // *much* faster than going through prepare/exec - if len(args) == 0 { - return cn.simpleQuery(query) - } - - if cn.binaryParameters { - cn.sendBinaryModeQuery(query, args) - - cn.readParseResponse() - cn.readBindResponse() - rows := &rows{cn: cn} - rows.rowsHeader = cn.readPortalDescribeResponse() - cn.postExecuteWorkaround() - return rows, nil - } - st := cn.prepareTo(query, "") - st.exec(args) - return &rows{ - cn: cn, - rowsHeader: st.rowsHeader, - }, nil -} - -// Implement the optional "Execer" interface for one-shot queries -func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { - if cn.bad { - return nil, driver.ErrBadConn - } - defer cn.errRecover(&err) - - // Check to see if we can use the "simpleExec" interface, which is - // *much* faster than going through prepare/exec - if len(args) == 0 { - // ignore commandTag, our caller doesn't care - r, _, err := cn.simpleExec(query) - return r, err - } - - if cn.binaryParameters { - cn.sendBinaryModeQuery(query, args) - - cn.readParseResponse() - cn.readBindResponse() - cn.readPortalDescribeResponse() - cn.postExecuteWorkaround() - res, _, err = cn.readExecuteResponse("Execute") - return res, err - } - // Use the unnamed statement to defer planning until bind - // time, or else value-based selectivity estimates cannot be - // used. - st := cn.prepareTo(query, "") - r, err := st.Exec(args) - if err != nil { - panic(err) - } - return r, err -} - -func (cn *conn) send(m *writeBuf) { - _, err := cn.c.Write(m.wrap()) - if err != nil { - panic(err) - } -} - -func (cn *conn) sendStartupPacket(m *writeBuf) error { - _, err := cn.c.Write((m.wrap())[1:]) - return err -} - -// Send a message of type typ to the server on the other end of cn. The -// message should have no payload. This method does not use the scratch -// buffer. -func (cn *conn) sendSimpleMessage(typ byte) (err error) { - _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) - return err -} - -// saveMessage memorizes a message and its buffer in the conn struct. -// recvMessage will then return these values on the next call to it. This -// method is useful in cases where you have to see what the next message is -// going to be (e.g. to see whether it's an error or not) but you can't handle -// the message yourself. -func (cn *conn) saveMessage(typ byte, buf *readBuf) { - if cn.saveMessageType != 0 { - cn.bad = true - errorf("unexpected saveMessageType %d", cn.saveMessageType) - } - cn.saveMessageType = typ - cn.saveMessageBuffer = *buf -} - -// recvMessage receives any message from the backend, or returns an error if -// a problem occurred while reading the message. -func (cn *conn) recvMessage(r *readBuf) (byte, error) { - // workaround for a QueryRow bug, see exec - if cn.saveMessageType != 0 { - t := cn.saveMessageType - *r = cn.saveMessageBuffer - cn.saveMessageType = 0 - cn.saveMessageBuffer = nil - return t, nil - } - - x := cn.scratch[:5] - _, err := io.ReadFull(cn.buf, x) - if err != nil { - return 0, err - } - - // read the type and length of the message that follows - t := x[0] - n := int(binary.BigEndian.Uint32(x[1:])) - 4 - var y []byte - if n <= len(cn.scratch) { - y = cn.scratch[:n] - } else { - y = make([]byte, n) - } - _, err = io.ReadFull(cn.buf, y) - if err != nil { - return 0, err - } - *r = y - return t, nil -} - -// recv receives a message from the backend, but if an error happened while -// reading the message or the received message was an ErrorResponse, it panics. -// NoticeResponses are ignored. This function should generally be used only -// during the startup sequence. -func (cn *conn) recv() (t byte, r *readBuf) { - for { - var err error - r = &readBuf{} - t, err = cn.recvMessage(r) - if err != nil { - panic(err) - } - switch t { - case 'E': - panic(parseError(r)) - case 'N': - if n := cn.noticeHandler; n != nil { - n(parseError(r)) - } - case 'A': - if n := cn.notificationHandler; n != nil { - n(recvNotification(r)) - } - default: - return - } - } -} - -// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by -// the caller to avoid an allocation. -func (cn *conn) recv1Buf(r *readBuf) byte { - for { - t, err := cn.recvMessage(r) - if err != nil { - panic(err) - } - - switch t { - case 'A': - if n := cn.notificationHandler; n != nil { - n(recvNotification(r)) - } - case 'N': - if n := cn.noticeHandler; n != nil { - n(parseError(r)) - } - case 'S': - cn.processParameterStatus(r) - default: - return t - } - } -} - -// recv1 receives a message from the backend, panicking if an error occurs -// while attempting to read it. All asynchronous messages are ignored, with -// the exception of ErrorResponse. -func (cn *conn) recv1() (t byte, r *readBuf) { - r = &readBuf{} - t = cn.recv1Buf(r) - return t, r -} - -func (cn *conn) ssl(o values) error { - upgrade, err := ssl(o) - if err != nil { - return err - } - - if upgrade == nil { - // Nothing to do - return nil - } - - w := cn.writeBuf(0) - w.int32(80877103) - if err = cn.sendStartupPacket(w); err != nil { - return err - } - - b := cn.scratch[:1] - _, err = io.ReadFull(cn.c, b) - if err != nil { - return err - } - - if b[0] != 'S' { - return ErrSSLNotSupported - } - - cn.c, err = upgrade(cn.c) - return err -} - -// isDriverSetting returns true iff a setting is purely for configuring the -// driver's options and should not be sent to the server in the connection -// startup packet. -func isDriverSetting(key string) bool { - switch key { - case "host", "port": - return true - case "password": - return true - case "sslmode", "sslcert", "sslkey", "sslrootcert": - return true - case "fallback_application_name": - return true - case "connect_timeout": - return true - case "disable_prepared_binary_result": - return true - case "binary_parameters": - return true - case "krbsrvname": - return true - case "krbspn": - return true - default: - return false - } -} - -func (cn *conn) startup(o values) { - w := cn.writeBuf(0) - w.int32(196608) - // Send the backend the name of the database we want to connect to, and the - // user we want to connect as. Additionally, we send over any run-time - // parameters potentially included in the connection string. If the server - // doesn't recognize any of them, it will reply with an error. - for k, v := range o { - if isDriverSetting(k) { - // skip options which can't be run-time parameters - continue - } - // The protocol requires us to supply the database name as "database" - // instead of "dbname". - if k == "dbname" { - k = "database" - } - w.string(k) - w.string(v) - } - w.string("") - if err := cn.sendStartupPacket(w); err != nil { - panic(err) - } - - for { - t, r := cn.recv() - switch t { - case 'K': - cn.processBackendKeyData(r) - case 'S': - cn.processParameterStatus(r) - case 'R': - cn.auth(r, o) - case 'Z': - cn.processReadyForQuery(r) - return - default: - errorf("unknown response for startup: %q", t) - } - } -} - -func (cn *conn) auth(r *readBuf, o values) { - switch code := r.int32(); code { - case 0: - // OK - case 3: - w := cn.writeBuf('p') - w.string(o["password"]) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 0 { - errorf("unexpected authentication response: %q", t) - } - case 5: - s := string(r.next(4)) - w := cn.writeBuf('p') - w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 0 { - errorf("unexpected authentication response: %q", t) - } - case 7: // GSSAPI, startup - if newGss == nil { - errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") - } - cli, err := newGss() - if err != nil { - errorf("kerberos error: %s", err.Error()) - } - - var token []byte - - if spn, ok := o["krbspn"]; ok { - // Use the supplied SPN if provided.. - token, err = cli.GetInitTokenFromSpn(spn) - } else { - // Allow the kerberos service name to be overridden - service := "postgres" - if val, ok := o["krbsrvname"]; ok { - service = val - } - - token, err = cli.GetInitToken(o["host"], service) - } - - if err != nil { - errorf("failed to get Kerberos ticket: %q", err) - } - - w := cn.writeBuf('p') - w.bytes(token) - cn.send(w) - - // Store for GSSAPI continue message - cn.gss = cli - - case 8: // GSSAPI continue - - if cn.gss == nil { - errorf("GSSAPI protocol error") - } - - b := []byte(*r) - - done, tokOut, err := cn.gss.Continue(b) - if err == nil && !done { - w := cn.writeBuf('p') - w.bytes(tokOut) - cn.send(w) - } - - // Errors fall through and read the more detailed message - // from the server.. - - case 10: - sc := scram.NewClient(sha256.New, o["user"], o["password"]) - sc.Step(nil) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - scOut := sc.Out() - - w := cn.writeBuf('p') - w.string("SCRAM-SHA-256") - w.int32(len(scOut)) - w.bytes(scOut) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 11 { - errorf("unexpected authentication response: %q", t) - } - - nextStep := r.next(len(*r)) - sc.Step(nextStep) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - - scOut = sc.Out() - w = cn.writeBuf('p') - w.bytes(scOut) - cn.send(w) - - t, r = cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 12 { - errorf("unexpected authentication response: %q", t) - } - - nextStep = r.next(len(*r)) - sc.Step(nextStep) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - - default: - errorf("unknown authentication response: %d", code) - } -} - -type format int - -const formatText format = 0 -const formatBinary format = 1 - -// One result-column format code with the value 1 (i.e. all binary). -var colFmtDataAllBinary = []byte{0, 1, 0, 1} - -// No result-column format codes (i.e. all text). -var colFmtDataAllText = []byte{0, 0} - -type stmt struct { - cn *conn - name string - rowsHeader - colFmtData []byte - paramTyps []oid.Oid - closed bool -} - -func (st *stmt) Close() (err error) { - if st.closed { - return nil - } - if st.cn.bad { - return driver.ErrBadConn - } - defer st.cn.errRecover(&err) - - w := st.cn.writeBuf('C') - w.byte('S') - w.string(st.name) - st.cn.send(w) - - st.cn.send(st.cn.writeBuf('S')) - - t, _ := st.cn.recv1() - if t != '3' { - st.cn.bad = true - errorf("unexpected close response: %q", t) - } - st.closed = true - - t, r := st.cn.recv1() - if t != 'Z' { - st.cn.bad = true - errorf("expected ready for query, but got: %q", t) - } - st.cn.processReadyForQuery(r) - - return nil -} - -func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { - if st.cn.bad { - return nil, driver.ErrBadConn - } - defer st.cn.errRecover(&err) - - st.exec(v) - return &rows{ - cn: st.cn, - rowsHeader: st.rowsHeader, - }, nil -} - -func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { - if st.cn.bad { - return nil, driver.ErrBadConn - } - defer st.cn.errRecover(&err) - - st.exec(v) - res, _, err = st.cn.readExecuteResponse("simple query") - return res, err -} - -func (st *stmt) exec(v []driver.Value) { - if len(v) >= 65536 { - errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) - } - if len(v) != len(st.paramTyps) { - errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) - } - - cn := st.cn - w := cn.writeBuf('B') - w.byte(0) // unnamed portal - w.string(st.name) - - if cn.binaryParameters { - cn.sendBinaryParameters(w, v) - } else { - w.int16(0) - w.int16(len(v)) - for i, x := range v { - if x == nil { - w.int32(-1) - } else { - b := encode(&cn.parameterStatus, x, st.paramTyps[i]) - w.int32(len(b)) - w.bytes(b) - } - } - } - w.bytes(st.colFmtData) - - w.next('E') - w.byte(0) - w.int32(0) - - w.next('S') - cn.send(w) - - cn.readBindResponse() - cn.postExecuteWorkaround() - -} - -func (st *stmt) NumInput() int { - return len(st.paramTyps) -} - -// parseComplete parses the "command tag" from a CommandComplete message, and -// returns the number of rows affected (if applicable) and a string -// identifying only the command that was executed, e.g. "ALTER TABLE". If the -// command tag could not be parsed, parseComplete panics. -func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { - commandsWithAffectedRows := []string{ - "SELECT ", - // INSERT is handled below - "UPDATE ", - "DELETE ", - "FETCH ", - "MOVE ", - "COPY ", - } - - var affectedRows *string - for _, tag := range commandsWithAffectedRows { - if strings.HasPrefix(commandTag, tag) { - t := commandTag[len(tag):] - affectedRows = &t - commandTag = tag[:len(tag)-1] - break - } - } - // INSERT also includes the oid of the inserted row in its command tag. - // Oids in user tables are deprecated, and the oid is only returned when - // exactly one row is inserted, so it's unlikely to be of value to any - // real-world application and we can ignore it. - if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { - parts := strings.Split(commandTag, " ") - if len(parts) != 3 { - cn.bad = true - errorf("unexpected INSERT command tag %s", commandTag) - } - affectedRows = &parts[len(parts)-1] - commandTag = "INSERT" - } - // There should be no affected rows attached to the tag, just return it - if affectedRows == nil { - return driver.RowsAffected(0), commandTag - } - n, err := strconv.ParseInt(*affectedRows, 10, 64) - if err != nil { - cn.bad = true - errorf("could not parse commandTag: %s", err) - } - return driver.RowsAffected(n), commandTag -} - -type rowsHeader struct { - colNames []string - colTyps []fieldDesc - colFmts []format -} - -type rows struct { - cn *conn - finish func() - rowsHeader - done bool - rb readBuf - result driver.Result - tag string - - next *rowsHeader -} - -func (rs *rows) Close() error { - if finish := rs.finish; finish != nil { - defer finish() - } - // no need to look at cn.bad as Next() will - for { - err := rs.Next(nil) - switch err { - case nil: - case io.EOF: - // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row - // description, used with HasNextResultSet). We need to fetch messages until - // we hit a 'Z', which is done by waiting for done to be set. - if rs.done { - return nil - } - default: - return err - } - } -} - -func (rs *rows) Columns() []string { - return rs.colNames -} - -func (rs *rows) Result() driver.Result { - if rs.result == nil { - return emptyRows - } - return rs.result -} - -func (rs *rows) Tag() string { - return rs.tag -} - -func (rs *rows) Next(dest []driver.Value) (err error) { - if rs.done { - return io.EOF - } - - conn := rs.cn - if conn.bad { - return driver.ErrBadConn - } - defer conn.errRecover(&err) - - for { - t := conn.recv1Buf(&rs.rb) - switch t { - case 'E': - err = parseError(&rs.rb) - case 'C', 'I': - if t == 'C' { - rs.result, rs.tag = conn.parseComplete(rs.rb.string()) - } - continue - case 'Z': - conn.processReadyForQuery(&rs.rb) - rs.done = true - if err != nil { - return err - } - return io.EOF - case 'D': - n := rs.rb.int16() - if err != nil { - conn.bad = true - errorf("unexpected DataRow after error %s", err) - } - if n < len(dest) { - dest = dest[:n] - } - for i := range dest { - l := rs.rb.int32() - if l == -1 { - dest[i] = nil - continue - } - dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) - } - return - case 'T': - next := parsePortalRowDescribe(&rs.rb) - rs.next = &next - return io.EOF - default: - errorf("unexpected message after execute: %q", t) - } - } -} - -func (rs *rows) HasNextResultSet() bool { - hasNext := rs.next != nil && !rs.done - return hasNext -} - -func (rs *rows) NextResultSet() error { - if rs.next == nil { - return io.EOF - } - rs.rowsHeader = *rs.next - rs.next = nil - return nil -} - -// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be -// used as part of an SQL statement. For example: -// -// tblname := "my_table" -// data := "my_data" -// quoted := pq.QuoteIdentifier(tblname) -// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) -// -// Any double quotes in name will be escaped. The quoted identifier will be -// case sensitive when used in a query. If the input string contains a zero -// byte, the result will be truncated immediately before it. -func QuoteIdentifier(name string) string { - end := strings.IndexRune(name, 0) - if end > -1 { - name = name[:end] - } - return `"` + strings.Replace(name, `"`, `""`, -1) + `"` -} - -// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal -// to DDL and other statements that do not accept parameters) to be used as part -// of an SQL statement. For example: -// -// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") -// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) -// -// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be -// replaced by two backslashes (i.e. "\\") and the C-style escape identifier -// that PostgreSQL provides ('E') will be prepended to the string. -func QuoteLiteral(literal string) string { - // This follows the PostgreSQL internal algorithm for handling quoted literals - // from libpq, which can be found in the "PQEscapeStringInternal" function, - // which is found in the libpq/fe-exec.c source file: - // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c - // - // substitute any single-quotes (') with two single-quotes ('') - literal = strings.Replace(literal, `'`, `''`, -1) - // determine if the string has any backslashes (\) in it. - // if it does, replace any backslashes (\) with two backslashes (\\) - // then, we need to wrap the entire string with a PostgreSQL - // C-style escape. Per how "PQEscapeStringInternal" handles this case, we - // also add a space before the "E" - if strings.Contains(literal, `\`) { - literal = strings.Replace(literal, `\`, `\\`, -1) - literal = ` E'` + literal + `'` - } else { - // otherwise, we can just wrap the literal with a pair of single quotes - literal = `'` + literal + `'` - } - return literal -} - -func md5s(s string) string { - h := md5.New() - h.Write([]byte(s)) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { - // Do one pass over the parameters to see if we're going to send any of - // them over in binary. If we are, create a paramFormats array at the - // same time. - var paramFormats []int - for i, x := range args { - _, ok := x.([]byte) - if ok { - if paramFormats == nil { - paramFormats = make([]int, len(args)) - } - paramFormats[i] = 1 - } - } - if paramFormats == nil { - b.int16(0) - } else { - b.int16(len(paramFormats)) - for _, x := range paramFormats { - b.int16(x) - } - } - - b.int16(len(args)) - for _, x := range args { - if x == nil { - b.int32(-1) - } else { - datum := binaryEncode(&cn.parameterStatus, x) - b.int32(len(datum)) - b.bytes(datum) - } - } -} - -func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { - if len(args) >= 65536 { - errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) - } - - b := cn.writeBuf('P') - b.byte(0) // unnamed statement - b.string(query) - b.int16(0) - - b.next('B') - b.int16(0) // unnamed portal and statement - cn.sendBinaryParameters(b, args) - b.bytes(colFmtDataAllText) - - b.next('D') - b.byte('P') - b.byte(0) // unnamed portal - - b.next('E') - b.byte(0) - b.int32(0) - - b.next('S') - cn.send(b) -} - -func (cn *conn) processParameterStatus(r *readBuf) { - var err error - - param := r.string() - switch param { - case "server_version": - var major1 int - var major2 int - var minor int - _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) - if err == nil { - cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor - } - - case "TimeZone": - cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) - if err != nil { - cn.parameterStatus.currentLocation = nil - } - - default: - // ignore - } -} - -func (cn *conn) processReadyForQuery(r *readBuf) { - cn.txnStatus = transactionStatus(r.byte()) -} - -func (cn *conn) readReadyForQuery() { - t, r := cn.recv1() - switch t { - case 'Z': - cn.processReadyForQuery(r) - return - default: - cn.bad = true - errorf("unexpected message %q; expected ReadyForQuery", t) - } -} - -func (cn *conn) processBackendKeyData(r *readBuf) { - cn.processID = r.int32() - cn.secretKey = r.int32() -} - -func (cn *conn) readParseResponse() { - t, r := cn.recv1() - switch t { - case '1': - return - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.bad = true - errorf("unexpected Parse response %q", t) - } -} - -func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { - for { - t, r := cn.recv1() - switch t { - case 't': - nparams := r.int16() - paramTyps = make([]oid.Oid, nparams) - for i := range paramTyps { - paramTyps[i] = r.oid() - } - case 'n': - return paramTyps, nil, nil - case 'T': - colNames, colTyps = parseStatementRowDescribe(r) - return paramTyps, colNames, colTyps - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.bad = true - errorf("unexpected Describe statement response %q", t) - } - } -} - -func (cn *conn) readPortalDescribeResponse() rowsHeader { - t, r := cn.recv1() - switch t { - case 'T': - return parsePortalRowDescribe(r) - case 'n': - return rowsHeader{} - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.bad = true - errorf("unexpected Describe response %q", t) - } - panic("not reached") -} - -func (cn *conn) readBindResponse() { - t, r := cn.recv1() - switch t { - case '2': - return - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.bad = true - errorf("unexpected Bind response %q", t) - } -} - -func (cn *conn) postExecuteWorkaround() { - // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores - // any errors from rows.Next, which masks errors that happened during the - // execution of the query. To avoid the problem in common cases, we wait - // here for one more message from the database. If it's not an error the - // query will likely succeed (or perhaps has already, if it's a - // CommandComplete), so we push the message into the conn struct; recv1 - // will return it as the next message for rows.Next or rows.Close. - // However, if it's an error, we wait until ReadyForQuery and then return - // the error to our caller. - for { - t, r := cn.recv1() - switch t { - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - case 'C', 'D', 'I': - // the query didn't fail, but we can't process this message - cn.saveMessage(t, r) - return - default: - cn.bad = true - errorf("unexpected message during extended query execution: %q", t) - } - } -} - -// Only for Exec(), since we ignore the returned data -func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { - for { - t, r := cn.recv1() - switch t { - case 'C': - if err != nil { - cn.bad = true - errorf("unexpected CommandComplete after error %s", err) - } - res, commandTag = cn.parseComplete(r.string()) - case 'Z': - cn.processReadyForQuery(r) - if res == nil && err == nil { - err = errUnexpectedReady - } - return res, commandTag, err - case 'E': - err = parseError(r) - case 'T', 'D', 'I': - if err != nil { - cn.bad = true - errorf("unexpected %q after error %s", t, err) - } - if t == 'I' { - res = emptyRows - } - // ignore any results - default: - cn.bad = true - errorf("unknown %s response: %q", protocolState, t) - } - } -} - -func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { - n := r.int16() - colNames = make([]string, n) - colTyps = make([]fieldDesc, n) - for i := range colNames { - colNames[i] = r.string() - r.next(6) - colTyps[i].OID = r.oid() - colTyps[i].Len = r.int16() - colTyps[i].Mod = r.int32() - // format code not known when describing a statement; always 0 - r.next(2) - } - return -} - -func parsePortalRowDescribe(r *readBuf) rowsHeader { - n := r.int16() - colNames := make([]string, n) - colFmts := make([]format, n) - colTyps := make([]fieldDesc, n) - for i := range colNames { - colNames[i] = r.string() - r.next(6) - colTyps[i].OID = r.oid() - colTyps[i].Len = r.int16() - colTyps[i].Mod = r.int32() - colFmts[i] = format(r.int16()) - } - return rowsHeader{ - colNames: colNames, - colFmts: colFmts, - colTyps: colTyps, - } -} - -// parseEnviron tries to mimic some of libpq's environment handling -// -// To ease testing, it does not directly reference os.Environ, but is -// designed to accept its output. -// -// Environment-set connection information is intended to have a higher -// precedence than a library default but lower than any explicitly -// passed information (such as in the URL or connection string). -func parseEnviron(env []string) (out map[string]string) { - out = make(map[string]string) - - for _, v := range env { - parts := strings.SplitN(v, "=", 2) - - accrue := func(keyname string) { - out[keyname] = parts[1] - } - unsupported := func() { - panic(fmt.Sprintf("setting %v not supported", parts[0])) - } - - // The order of these is the same as is seen in the - // PostgreSQL 9.1 manual. Unsupported but well-defined - // keys cause a panic; these should be unset prior to - // execution. Options which pq expects to be set to a - // certain value are allowed, but must be set to that - // value if present (they can, of course, be absent). - switch parts[0] { - case "PGHOST": - accrue("host") - case "PGHOSTADDR": - unsupported() - case "PGPORT": - accrue("port") - case "PGDATABASE": - accrue("dbname") - case "PGUSER": - accrue("user") - case "PGPASSWORD": - accrue("password") - case "PGSERVICE", "PGSERVICEFILE", "PGREALM": - unsupported() - case "PGOPTIONS": - accrue("options") - case "PGAPPNAME": - accrue("application_name") - case "PGSSLMODE": - accrue("sslmode") - case "PGSSLCERT": - accrue("sslcert") - case "PGSSLKEY": - accrue("sslkey") - case "PGSSLROOTCERT": - accrue("sslrootcert") - case "PGREQUIRESSL", "PGSSLCRL": - unsupported() - case "PGREQUIREPEER": - unsupported() - case "PGKRBSRVNAME", "PGGSSLIB": - unsupported() - case "PGCONNECT_TIMEOUT": - accrue("connect_timeout") - case "PGCLIENTENCODING": - accrue("client_encoding") - case "PGDATESTYLE": - accrue("datestyle") - case "PGTZ": - accrue("timezone") - case "PGGEQO": - accrue("geqo") - case "PGSYSCONFDIR", "PGLOCALEDIR": - unsupported() - } - } - - return out -} - -// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". -func isUTF8(name string) bool { - // Recognize all sorts of silly things as "UTF-8", like Postgres does - s := strings.Map(alnumLowerASCII, name) - return s == "utf8" || s == "unicode" -} - -func alnumLowerASCII(ch rune) rune { - if 'A' <= ch && ch <= 'Z' { - return ch + ('a' - 'A') - } - if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { - return ch - } - return -1 // discard -} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go deleted file mode 100644 index 09e2ea46..00000000 --- a/vendor/github.com/lib/pq/conn_go18.go +++ /dev/null @@ -1,149 +0,0 @@ -package pq - -import ( - "context" - "database/sql" - "database/sql/driver" - "fmt" - "io" - "io/ioutil" - "time" -) - -// Implement the "QueryerContext" interface -func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - finish := cn.watchCancel(ctx) - r, err := cn.query(query, list) - if err != nil { - if finish != nil { - finish() - } - return nil, err - } - r.finish = finish - return r, nil -} - -// Implement the "ExecerContext" interface -func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - - if finish := cn.watchCancel(ctx); finish != nil { - defer finish() - } - - return cn.Exec(query, list) -} - -// Implement the "ConnBeginTx" interface -func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - var mode string - - switch sql.IsolationLevel(opts.Isolation) { - case sql.LevelDefault: - // Don't touch mode: use the server's default - case sql.LevelReadUncommitted: - mode = " ISOLATION LEVEL READ UNCOMMITTED" - case sql.LevelReadCommitted: - mode = " ISOLATION LEVEL READ COMMITTED" - case sql.LevelRepeatableRead: - mode = " ISOLATION LEVEL REPEATABLE READ" - case sql.LevelSerializable: - mode = " ISOLATION LEVEL SERIALIZABLE" - default: - return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) - } - - if opts.ReadOnly { - mode += " READ ONLY" - } else { - mode += " READ WRITE" - } - - tx, err := cn.begin(mode) - if err != nil { - return nil, err - } - cn.txnFinish = cn.watchCancel(ctx) - return tx, nil -} - -func (cn *conn) Ping(ctx context.Context) error { - if finish := cn.watchCancel(ctx); finish != nil { - defer finish() - } - rows, err := cn.simpleQuery(";") - if err != nil { - return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger - } - rows.Close() - return nil -} - -func (cn *conn) watchCancel(ctx context.Context) func() { - if done := ctx.Done(); done != nil { - finished := make(chan struct{}) - go func() { - select { - case <-done: - // At this point the function level context is canceled, - // so it must not be used for the additional network - // request to cancel the query. - // Create a new context to pass into the dial. - ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - _ = cn.cancel(ctxCancel) - finished <- struct{}{} - case <-finished: - } - }() - return func() { - select { - case <-finished: - case finished <- struct{}{}: - } - } - } - return nil -} - -func (cn *conn) cancel(ctx context.Context) error { - c, err := dial(ctx, cn.dialer, cn.opts) - if err != nil { - return err - } - defer c.Close() - - { - can := conn{ - c: c, - } - err = can.ssl(cn.opts) - if err != nil { - return err - } - - w := can.writeBuf(0) - w.int32(80877102) // cancel request code - w.int32(cn.processID) - w.int32(cn.secretKey) - - if err := can.sendStartupPacket(w); err != nil { - return err - } - } - - // Read until EOF to ensure that the server received the cancel. - { - _, err := io.Copy(ioutil.Discard, c) - return err - } -} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go deleted file mode 100644 index d7d47261..00000000 --- a/vendor/github.com/lib/pq/connector.go +++ /dev/null @@ -1,115 +0,0 @@ -package pq - -import ( - "context" - "database/sql/driver" - "errors" - "fmt" - "os" - "strings" -) - -// Connector represents a fixed configuration for the pq driver with a given -// name. Connector satisfies the database/sql/driver Connector interface and -// can be used to create any number of DB Conn's via the database/sql OpenDB -// function. -// -// See https://golang.org/pkg/database/sql/driver/#Connector. -// See https://golang.org/pkg/database/sql/#OpenDB. -type Connector struct { - opts values - dialer Dialer -} - -// Connect returns a connection to the database using the fixed configuration -// of this Connector. Context is not used. -func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { - return c.open(ctx) -} - -// Driver returns the underlying driver of this Connector. -func (c *Connector) Driver() driver.Driver { - return &Driver{} -} - -// NewConnector returns a connector for the pq driver in a fixed configuration -// with the given dsn. The returned connector can be used to create any number -// of equivalent Conn's. The returned connector is intended to be used with -// database/sql.OpenDB. -// -// See https://golang.org/pkg/database/sql/driver/#Connector. -// See https://golang.org/pkg/database/sql/#OpenDB. -func NewConnector(dsn string) (*Connector, error) { - var err error - o := make(values) - - // A number of defaults are applied here, in this order: - // - // * Very low precedence defaults applied in every situation - // * Environment variables - // * Explicitly passed connection information - o["host"] = "localhost" - o["port"] = "5432" - // N.B.: Extra float digits should be set to 3, but that breaks - // Postgres 8.4 and older, where the max is 2. - o["extra_float_digits"] = "2" - for k, v := range parseEnviron(os.Environ()) { - o[k] = v - } - - if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { - dsn, err = ParseURL(dsn) - if err != nil { - return nil, err - } - } - - if err := parseOpts(dsn, o); err != nil { - return nil, err - } - - // Use the "fallback" application name if necessary - if fallback, ok := o["fallback_application_name"]; ok { - if _, ok := o["application_name"]; !ok { - o["application_name"] = fallback - } - } - - // We can't work with any client_encoding other than UTF-8 currently. - // However, we have historically allowed the user to set it to UTF-8 - // explicitly, and there's no reason to break such programs, so allow that. - // Note that the "options" setting could also set client_encoding, but - // parsing its value is not worth it. Instead, we always explicitly send - // client_encoding as a separate run-time parameter, which should override - // anything set in options. - if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { - return nil, errors.New("client_encoding must be absent or 'UTF8'") - } - o["client_encoding"] = "UTF8" - // DateStyle needs a similar treatment. - if datestyle, ok := o["datestyle"]; ok { - if datestyle != "ISO, MDY" { - return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) - } - } else { - o["datestyle"] = "ISO, MDY" - } - - // If a user is not provided by any other means, the last - // resort is to use the current operating system provided user - // name. - if _, ok := o["user"]; !ok { - u, err := userCurrent() - if err != nil { - return nil, err - } - o["user"] = u - } - - // SSL is not necessary or supported over UNIX domain sockets - if network, _ := network(o); network == "unix" { - o["sslmode"] = "disable" - } - - return &Connector{opts: o, dialer: defaultDialer{}}, nil -} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go deleted file mode 100644 index 38d5bb69..00000000 --- a/vendor/github.com/lib/pq/copy.go +++ /dev/null @@ -1,307 +0,0 @@ -package pq - -import ( - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "sync" -) - -var ( - errCopyInClosed = errors.New("pq: copyin statement has already been closed") - errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") - errCopyToNotSupported = errors.New("pq: COPY TO is not supported") - errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") - errCopyInProgress = errors.New("pq: COPY in progress") -) - -// CopyIn creates a COPY FROM statement which can be prepared with -// Tx.Prepare(). The target table should be visible in search_path. -func CopyIn(table string, columns ...string) string { - stmt := "COPY " + QuoteIdentifier(table) + " (" - for i, col := range columns { - if i != 0 { - stmt += ", " - } - stmt += QuoteIdentifier(col) - } - stmt += ") FROM STDIN" - return stmt -} - -// CopyInSchema creates a COPY FROM statement which can be prepared with -// Tx.Prepare(). -func CopyInSchema(schema, table string, columns ...string) string { - stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" - for i, col := range columns { - if i != 0 { - stmt += ", " - } - stmt += QuoteIdentifier(col) - } - stmt += ") FROM STDIN" - return stmt -} - -type copyin struct { - cn *conn - buffer []byte - rowData chan []byte - done chan bool - driver.Result - - closed bool - - sync.Mutex // guards err - err error -} - -const ciBufferSize = 64 * 1024 - -// flush buffer before the buffer is filled up and needs reallocation -const ciBufferFlushSize = 63 * 1024 - -func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { - if !cn.isInTransaction() { - return nil, errCopyNotSupportedOutsideTxn - } - - ci := ©in{ - cn: cn, - buffer: make([]byte, 0, ciBufferSize), - rowData: make(chan []byte), - done: make(chan bool, 1), - } - // add CopyData identifier + 4 bytes for message length - ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) - - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - -awaitCopyInResponse: - for { - t, r := cn.recv1() - switch t { - case 'G': - if r.byte() != 0 { - err = errBinaryCopyNotSupported - break awaitCopyInResponse - } - go ci.resploop() - return ci, nil - case 'H': - err = errCopyToNotSupported - break awaitCopyInResponse - case 'E': - err = parseError(r) - case 'Z': - if err == nil { - ci.setBad() - errorf("unexpected ReadyForQuery in response to COPY") - } - cn.processReadyForQuery(r) - return nil, err - default: - ci.setBad() - errorf("unknown response for copy query: %q", t) - } - } - - // something went wrong, abort COPY before we return - b = cn.writeBuf('f') - b.string(err.Error()) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'c', 'C', 'E': - case 'Z': - // correctly aborted, we're done - cn.processReadyForQuery(r) - return nil, err - default: - ci.setBad() - errorf("unknown response for CopyFail: %q", t) - } - } -} - -func (ci *copyin) flush(buf []byte) { - // set message length (without message identifier) - binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) - - _, err := ci.cn.c.Write(buf) - if err != nil { - panic(err) - } -} - -func (ci *copyin) resploop() { - for { - var r readBuf - t, err := ci.cn.recvMessage(&r) - if err != nil { - ci.setBad() - ci.setError(err) - ci.done <- true - return - } - switch t { - case 'C': - // complete - res, _ := ci.cn.parseComplete(r.string()) - ci.setResult(res) - case 'N': - if n := ci.cn.noticeHandler; n != nil { - n(parseError(&r)) - } - case 'Z': - ci.cn.processReadyForQuery(&r) - ci.done <- true - return - case 'E': - err := parseError(&r) - ci.setError(err) - default: - ci.setBad() - ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) - ci.done <- true - return - } - } -} - -func (ci *copyin) setBad() { - ci.Lock() - ci.cn.bad = true - ci.Unlock() -} - -func (ci *copyin) isBad() bool { - ci.Lock() - b := ci.cn.bad - ci.Unlock() - return b -} - -func (ci *copyin) isErrorSet() bool { - ci.Lock() - isSet := (ci.err != nil) - ci.Unlock() - return isSet -} - -// setError() sets ci.err if one has not been set already. Caller must not be -// holding ci.Mutex. -func (ci *copyin) setError(err error) { - ci.Lock() - if ci.err == nil { - ci.err = err - } - ci.Unlock() -} - -func (ci *copyin) setResult(result driver.Result) { - ci.Lock() - ci.Result = result - ci.Unlock() -} - -func (ci *copyin) getResult() driver.Result { - ci.Lock() - result := ci.Result - if result == nil { - return driver.RowsAffected(0) - } - ci.Unlock() - return result -} - -func (ci *copyin) NumInput() int { - return -1 -} - -func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { - return nil, ErrNotSupported -} - -// Exec inserts values into the COPY stream. The insert is asynchronous -// and Exec can return errors from previous Exec calls to the same -// COPY stmt. -// -// You need to call Exec(nil) to sync the COPY stream and to get any -// errors from pending data, since Stmt.Close() doesn't return errors -// to the user. -func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { - if ci.closed { - return nil, errCopyInClosed - } - - if ci.isBad() { - return nil, driver.ErrBadConn - } - defer ci.cn.errRecover(&err) - - if ci.isErrorSet() { - return nil, ci.err - } - - if len(v) == 0 { - if err := ci.Close(); err != nil { - return driver.RowsAffected(0), err - } - - return ci.getResult(), nil - } - - numValues := len(v) - for i, value := range v { - ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) - if i < numValues-1 { - ci.buffer = append(ci.buffer, '\t') - } - } - - ci.buffer = append(ci.buffer, '\n') - - if len(ci.buffer) > ciBufferFlushSize { - ci.flush(ci.buffer) - // reset buffer, keep bytes for message identifier and length - ci.buffer = ci.buffer[:5] - } - - return driver.RowsAffected(0), nil -} - -func (ci *copyin) Close() (err error) { - if ci.closed { // Don't do anything, we're already closed - return nil - } - ci.closed = true - - if ci.isBad() { - return driver.ErrBadConn - } - defer ci.cn.errRecover(&err) - - if len(ci.buffer) > 0 { - ci.flush(ci.buffer) - } - // Avoid touching the scratch buffer as resploop could be using it. - err = ci.cn.sendSimpleMessage('c') - if err != nil { - return err - } - - <-ci.done - ci.cn.inCopy = false - - if ci.isErrorSet() { - err = ci.err - return err - } - return nil -} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go deleted file mode 100644 index b5718480..00000000 --- a/vendor/github.com/lib/pq/doc.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Package pq is a pure Go Postgres driver for the database/sql package. - -In most cases clients will use the database/sql package instead of -using this package directly. For example: - - import ( - "database/sql" - - _ "github.com/lib/pq" - ) - - func main() { - connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" - db, err := sql.Open("postgres", connStr) - if err != nil { - log.Fatal(err) - } - - age := 21 - rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) - … - } - -You can also connect to a database using a URL. For example: - - connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" - db, err := sql.Open("postgres", connStr) - - -Connection String Parameters - - -Similarly to libpq, when establishing a connection using pq you are expected to -supply a connection string containing zero or more parameters. -A subset of the connection parameters supported by libpq are also supported by pq. -Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) -directly in the connection string. This is different from libpq, which does not allow -run-time parameters in the connection string, instead requiring you to supply -them in the options parameter. - -For compatibility with libpq, the following special connection parameters are -supported: - - * dbname - The name of the database to connect to - * user - The user to sign in as - * password - The user's password - * host - The host to connect to. Values that start with / are for unix - domain sockets. (default is localhost) - * port - The port to bind to. (default is 5432) - * sslmode - Whether or not to use SSL (default is require, this is not - the default for libpq) - * fallback_application_name - An application_name to fall back to if one isn't provided. - * connect_timeout - Maximum wait for connection, in seconds. Zero or - not specified means wait indefinitely. - * sslcert - Cert file location. The file must contain PEM encoded data. - * sslkey - Key file location. The file must contain PEM encoded data. - * sslrootcert - The location of the root certificate file. The file - must contain PEM encoded data. - -Valid values for sslmode are: - - * disable - No SSL - * require - Always SSL (skip verification) - * verify-ca - Always SSL (verify that the certificate presented by the - server was signed by a trusted CA) - * verify-full - Always SSL (verify that the certification presented by - the server was signed by a trusted CA and the server host name - matches the one in the certificate) - -See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING -for more information about connection string parameters. - -Use single quotes for values that contain whitespace: - - "user=pqgotest password='with spaces'" - -A backslash will escape the next character in values: - - "user=space\ man password='it\'s valid'" - -Note that the connection parameter client_encoding (which sets the -text encoding for the connection) may be set but must be "UTF8", -matching with the same rules as Postgres. It is an error to provide -any other value. - -In addition to the parameters listed above, any run-time parameter that can be -set at backend start time can be set in the connection string. For more -information, see -http://www.postgresql.org/docs/current/static/runtime-config.html. - -Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html -supported by libpq are also supported by pq. If any of the environment -variables not supported by pq are set, pq will panic during connection -establishment. Environment variables have a lower precedence than explicitly -provided connection parameters. - -The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html -is supported, but on Windows PGPASSFILE must be specified explicitly. - - -Queries - - -database/sql does not dictate any specific format for parameter -markers in query strings, and pq uses the Postgres-native ordinal markers, -as shown above. The same marker can be reused for the same parameter: - - rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 - OR age BETWEEN $2 AND $2 + 3`, "orange", 64) - -pq does not support the LastInsertId() method of the Result type in database/sql. -To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres -RETURNING clause with a standard Query or QueryRow call: - - var userid int - err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) - VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) - -For more details on RETURNING, see the Postgres documentation: - - http://www.postgresql.org/docs/current/static/sql-insert.html - http://www.postgresql.org/docs/current/static/sql-update.html - http://www.postgresql.org/docs/current/static/sql-delete.html - -For additional instructions on querying see the documentation for the database/sql package. - - -Data Types - - -Parameters pass through driver.DefaultParameterConverter before they are handled -by this package. When the binary_parameters connection option is enabled, -[]byte values are sent directly to the backend as data in binary format. - -This package returns the following types for values from the PostgreSQL backend: - - - integer types smallint, integer, and bigint are returned as int64 - - floating-point types real and double precision are returned as float64 - - character types char, varchar, and text are returned as string - - temporal types date, time, timetz, timestamp, and timestamptz are - returned as time.Time - - the boolean type is returned as bool - - the bytea type is returned as []byte - -All other types are returned directly from the backend as []byte values in text format. - - -Errors - - -pq may return errors of type *pq.Error which can be interrogated for error details: - - if err, ok := err.(*pq.Error); ok { - fmt.Println("pq error:", err.Code.Name()) - } - -See the pq.Error type for details. - - -Bulk imports - -You can perform bulk imports by preparing a statement returned by pq.CopyIn (or -pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement -handle can then be repeatedly "executed" to copy data into the target table. -After all data has been processed you should call Exec() once with no arguments -to flush all buffered data. Any call to Exec() might return an error which -should be handled appropriately, but because of the internal buffering an error -returned by Exec() might not be related to the data passed in the call that -failed. - -CopyIn uses COPY FROM internally. It is not possible to COPY outside of an -explicit transaction in pq. - -Usage example: - - txn, err := db.Begin() - if err != nil { - log.Fatal(err) - } - - stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) - if err != nil { - log.Fatal(err) - } - - for _, user := range users { - _, err = stmt.Exec(user.Name, int64(user.Age)) - if err != nil { - log.Fatal(err) - } - } - - _, err = stmt.Exec() - if err != nil { - log.Fatal(err) - } - - err = stmt.Close() - if err != nil { - log.Fatal(err) - } - - err = txn.Commit() - if err != nil { - log.Fatal(err) - } - - -Notifications - - -PostgreSQL supports a simple publish/subscribe model over database -connections. See http://www.postgresql.org/docs/current/static/sql-notify.html -for more information about the general mechanism. - -To start listening for notifications, you first have to open a new connection -to the database by calling NewListener. This connection can not be used for -anything other than LISTEN / NOTIFY. Calling Listen will open a "notification -channel"; once a notification channel is open, a notification generated on that -channel will effect a send on the Listener.Notify channel. A notification -channel will remain open until Unlisten is called, though connection loss might -result in some notifications being lost. To solve this problem, Listener sends -a nil pointer over the Notify channel any time the connection is re-established -following a connection loss. The application can get information about the -state of the underlying connection by setting an event callback in the call to -NewListener. - -A single Listener can safely be used from concurrent goroutines, which means -that there is often no need to create more than one Listener in your -application. However, a Listener is always connected to a single database, so -you will need to create a new Listener instance for every database you want to -receive notifications in. - -The channel name in both Listen and Unlisten is case sensitive, and can contain -any characters legal in an identifier (see -http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS -for more information). Note that the channel name will be truncated to 63 -bytes by the PostgreSQL server. - -You can find a complete, working example of Listener usage at -https://godoc.org/github.com/lib/pq/example/listen. - - -Kerberos Support - - -If you need support for Kerberos authentication, add the following to your main -package: - - import "github.com/lib/pq/auth/kerberos" - - func init() { - pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) - } - -This package is in a separate module so that users who don't need Kerberos -don't have to download unnecessary dependencies. - -When imported, additional connection string parameters are supported: - - * krbsrvname - GSS (Kerberos) service name when constructing the - SPN (default is `postgres`). This will be combined with the host - to form the full SPN: `krbsrvname/host`. - * krbspn - GSS (Kerberos) SPN. This takes priority over - `krbsrvname` if present. -*/ -package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go deleted file mode 100644 index c4dafe27..00000000 --- a/vendor/github.com/lib/pq/encode.go +++ /dev/null @@ -1,622 +0,0 @@ -package pq - -import ( - "bytes" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/lib/pq/oid" -) - -var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) - -func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { - switch v := x.(type) { - case []byte: - return v - default: - return encode(parameterStatus, x, oid.T_unknown) - } -} - -func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { - switch v := x.(type) { - case int64: - return strconv.AppendInt(nil, v, 10) - case float64: - return strconv.AppendFloat(nil, v, 'f', -1, 64) - case []byte: - if pgtypOid == oid.T_bytea { - return encodeBytea(parameterStatus.serverVersion, v) - } - - return v - case string: - if pgtypOid == oid.T_bytea { - return encodeBytea(parameterStatus.serverVersion, []byte(v)) - } - - return []byte(v) - case bool: - return strconv.AppendBool(nil, v) - case time.Time: - return formatTs(v) - - default: - errorf("encode: unknown type for %T", v) - } - - panic("not reached") -} - -func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { - switch f { - case formatBinary: - return binaryDecode(parameterStatus, s, typ) - case formatText: - return textDecode(parameterStatus, s, typ) - default: - panic("not reached") - } -} - -func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { - switch typ { - case oid.T_bytea: - return s - case oid.T_int8: - return int64(binary.BigEndian.Uint64(s)) - case oid.T_int4: - return int64(int32(binary.BigEndian.Uint32(s))) - case oid.T_int2: - return int64(int16(binary.BigEndian.Uint16(s))) - case oid.T_uuid: - b, err := decodeUUIDBinary(s) - if err != nil { - panic(err) - } - return b - - default: - errorf("don't know how to decode binary parameter of type %d", uint32(typ)) - } - - panic("not reached") -} - -func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { - switch typ { - case oid.T_char, oid.T_varchar, oid.T_text: - return string(s) - case oid.T_bytea: - b, err := parseBytea(s) - if err != nil { - errorf("%s", err) - } - return b - case oid.T_timestamptz: - return parseTs(parameterStatus.currentLocation, string(s)) - case oid.T_timestamp, oid.T_date: - return parseTs(nil, string(s)) - case oid.T_time: - return mustParse("15:04:05", typ, s) - case oid.T_timetz: - return mustParse("15:04:05-07", typ, s) - case oid.T_bool: - return s[0] == 't' - case oid.T_int8, oid.T_int4, oid.T_int2: - i, err := strconv.ParseInt(string(s), 10, 64) - if err != nil { - errorf("%s", err) - } - return i - case oid.T_float4, oid.T_float8: - // We always use 64 bit parsing, regardless of whether the input text is for - // a float4 or float8, because clients expect float64s for all float datatypes - // and returning a 32-bit parsed float64 produces lossy results. - f, err := strconv.ParseFloat(string(s), 64) - if err != nil { - errorf("%s", err) - } - return f - } - - return s -} - -// appendEncodedText encodes item in text format as required by COPY -// and appends to buf -func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { - switch v := x.(type) { - case int64: - return strconv.AppendInt(buf, v, 10) - case float64: - return strconv.AppendFloat(buf, v, 'f', -1, 64) - case []byte: - encodedBytea := encodeBytea(parameterStatus.serverVersion, v) - return appendEscapedText(buf, string(encodedBytea)) - case string: - return appendEscapedText(buf, v) - case bool: - return strconv.AppendBool(buf, v) - case time.Time: - return append(buf, formatTs(v)...) - case nil: - return append(buf, "\\N"...) - default: - errorf("encode: unknown type for %T", v) - } - - panic("not reached") -} - -func appendEscapedText(buf []byte, text string) []byte { - escapeNeeded := false - startPos := 0 - var c byte - - // check if we need to escape - for i := 0; i < len(text); i++ { - c = text[i] - if c == '\\' || c == '\n' || c == '\r' || c == '\t' { - escapeNeeded = true - startPos = i - break - } - } - if !escapeNeeded { - return append(buf, text...) - } - - // copy till first char to escape, iterate the rest - result := append(buf, text[:startPos]...) - for i := startPos; i < len(text); i++ { - c = text[i] - switch c { - case '\\': - result = append(result, '\\', '\\') - case '\n': - result = append(result, '\\', 'n') - case '\r': - result = append(result, '\\', 'r') - case '\t': - result = append(result, '\\', 't') - default: - result = append(result, c) - } - } - return result -} - -func mustParse(f string, typ oid.Oid, s []byte) time.Time { - str := string(s) - - // check for a 30-minute-offset timezone - if (typ == oid.T_timestamptz || typ == oid.T_timetz) && - str[len(str)-3] == ':' { - f += ":00" - } - // Special case for 24:00 time. - // Unfortunately, golang does not parse 24:00 as a proper time. - // In this case, we want to try "round to the next day", to differentiate. - // As such, we find if the 24:00 time matches at the beginning; if so, - // we default it back to 00:00 but add a day later. - var is2400Time bool - switch typ { - case oid.T_timetz, oid.T_time: - if matches := time2400Regex.FindStringSubmatch(str); matches != nil { - // Concatenate timezone information at the back. - str = "00:00:00" + str[len(matches[1]):] - is2400Time = true - } - } - t, err := time.Parse(f, str) - if err != nil { - errorf("decode: %s", err) - } - if is2400Time { - t = t.Add(24 * time.Hour) - } - return t -} - -var errInvalidTimestamp = errors.New("invalid timestamp") - -type timestampParser struct { - err error -} - -func (p *timestampParser) expect(str string, char byte, pos int) { - if p.err != nil { - return - } - if pos+1 > len(str) { - p.err = errInvalidTimestamp - return - } - if c := str[pos]; c != char && p.err == nil { - p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) - } -} - -func (p *timestampParser) mustAtoi(str string, begin int, end int) int { - if p.err != nil { - return 0 - } - if begin < 0 || end < 0 || begin > end || end > len(str) { - p.err = errInvalidTimestamp - return 0 - } - result, err := strconv.Atoi(str[begin:end]) - if err != nil { - if p.err == nil { - p.err = fmt.Errorf("expected number; got '%v'", str) - } - return 0 - } - return result -} - -// The location cache caches the time zones typically used by the client. -type locationCache struct { - cache map[int]*time.Location - lock sync.Mutex -} - -// All connections share the same list of timezones. Benchmarking shows that -// about 5% speed could be gained by putting the cache in the connection and -// losing the mutex, at the cost of a small amount of memory and a somewhat -// significant increase in code complexity. -var globalLocationCache = newLocationCache() - -func newLocationCache() *locationCache { - return &locationCache{cache: make(map[int]*time.Location)} -} - -// Returns the cached timezone for the specified offset, creating and caching -// it if necessary. -func (c *locationCache) getLocation(offset int) *time.Location { - c.lock.Lock() - defer c.lock.Unlock() - - location, ok := c.cache[offset] - if !ok { - location = time.FixedZone("", offset) - c.cache[offset] = location - } - - return location -} - -var infinityTsEnabled = false -var infinityTsNegative time.Time -var infinityTsPositive time.Time - -const ( - infinityTsEnabledAlready = "pq: infinity timestamp enabled already" - infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" -) - -// EnableInfinityTs controls the handling of Postgres' "-infinity" and -// "infinity" "timestamp"s. -// -// If EnableInfinityTs is not called, "-infinity" and "infinity" will return -// []byte("-infinity") and []byte("infinity") respectively, and potentially -// cause error "sql: Scan error on column index 0: unsupported driver -> Scan -// pair: []uint8 -> *time.Time", when scanning into a time.Time value. -// -// Once EnableInfinityTs has been called, all connections created using this -// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", -// "timestamp with time zone" and "date" types to the predefined minimum and -// maximum times, respectively. When encoding time.Time values, any time which -// equals or precedes the predefined minimum time will be encoded to -// "-infinity". Any values at or past the maximum time will similarly be -// encoded to "infinity". -// -// If EnableInfinityTs is called with negative >= positive, it will panic. -// Calling EnableInfinityTs after a connection has been established results in -// undefined behavior. If EnableInfinityTs is called more than once, it will -// panic. -func EnableInfinityTs(negative time.Time, positive time.Time) { - if infinityTsEnabled { - panic(infinityTsEnabledAlready) - } - if !negative.Before(positive) { - panic(infinityTsNegativeMustBeSmaller) - } - infinityTsEnabled = true - infinityTsNegative = negative - infinityTsPositive = positive -} - -/* - * Testing might want to toggle infinityTsEnabled - */ -func disableInfinityTs() { - infinityTsEnabled = false -} - -// This is a time function specific to the Postgres default DateStyle -// setting ("ISO, MDY"), the only one we currently support. This -// accounts for the discrepancies between the parsing available with -// time.Parse and the Postgres date formatting quirks. -func parseTs(currentLocation *time.Location, str string) interface{} { - switch str { - case "-infinity": - if infinityTsEnabled { - return infinityTsNegative - } - return []byte(str) - case "infinity": - if infinityTsEnabled { - return infinityTsPositive - } - return []byte(str) - } - t, err := ParseTimestamp(currentLocation, str) - if err != nil { - panic(err) - } - return t -} - -// ParseTimestamp parses Postgres' text format. It returns a time.Time in -// currentLocation iff that time's offset agrees with the offset sent from the -// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the -// fixed offset offset provided by the Postgres server. -func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { - p := timestampParser{} - - monSep := strings.IndexRune(str, '-') - // this is Gregorian year, not ISO Year - // In Gregorian system, the year 1 BC is followed by AD 1 - year := p.mustAtoi(str, 0, monSep) - daySep := monSep + 3 - month := p.mustAtoi(str, monSep+1, daySep) - p.expect(str, '-', daySep) - timeSep := daySep + 3 - day := p.mustAtoi(str, daySep+1, timeSep) - - minLen := monSep + len("01-01") + 1 - - isBC := strings.HasSuffix(str, " BC") - if isBC { - minLen += 3 - } - - var hour, minute, second int - if len(str) > minLen { - p.expect(str, ' ', timeSep) - minSep := timeSep + 3 - p.expect(str, ':', minSep) - hour = p.mustAtoi(str, timeSep+1, minSep) - secSep := minSep + 3 - p.expect(str, ':', secSep) - minute = p.mustAtoi(str, minSep+1, secSep) - secEnd := secSep + 3 - second = p.mustAtoi(str, secSep+1, secEnd) - } - remainderIdx := monSep + len("01-01 00:00:00") + 1 - // Three optional (but ordered) sections follow: the - // fractional seconds, the time zone offset, and the BC - // designation. We set them up here and adjust the other - // offsets if the preceding sections exist. - - nanoSec := 0 - tzOff := 0 - - if remainderIdx < len(str) && str[remainderIdx] == '.' { - fracStart := remainderIdx + 1 - fracOff := strings.IndexAny(str[fracStart:], "-+ ") - if fracOff < 0 { - fracOff = len(str) - fracStart - } - fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) - nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) - - remainderIdx += fracOff + 1 - } - if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { - // time zone separator is always '-' or '+' (UTC is +00) - var tzSign int - switch c := str[tzStart]; c { - case '-': - tzSign = -1 - case '+': - tzSign = +1 - default: - return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) - } - tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) - remainderIdx += 3 - var tzMin, tzSec int - if remainderIdx < len(str) && str[remainderIdx] == ':' { - tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) - remainderIdx += 3 - } - if remainderIdx < len(str) && str[remainderIdx] == ':' { - tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) - remainderIdx += 3 - } - tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) - } - var isoYear int - - if isBC { - isoYear = 1 - year - remainderIdx += 3 - } else { - isoYear = year - } - if remainderIdx < len(str) { - return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) - } - t := time.Date(isoYear, time.Month(month), day, - hour, minute, second, nanoSec, - globalLocationCache.getLocation(tzOff)) - - if currentLocation != nil { - // Set the location of the returned Time based on the session's - // TimeZone value, but only if the local time zone database agrees with - // the remote database on the offset. - lt := t.In(currentLocation) - _, newOff := lt.Zone() - if newOff == tzOff { - t = lt - } - } - - return t, p.err -} - -// formatTs formats t into a format postgres understands. -func formatTs(t time.Time) []byte { - if infinityTsEnabled { - // t <= -infinity : ! (t > -infinity) - if !t.After(infinityTsNegative) { - return []byte("-infinity") - } - // t >= infinity : ! (!t < infinity) - if !t.Before(infinityTsPositive) { - return []byte("infinity") - } - } - return FormatTimestamp(t) -} - -// FormatTimestamp formats t into Postgres' text format for timestamps. -func FormatTimestamp(t time.Time) []byte { - // Need to send dates before 0001 A.D. with " BC" suffix, instead of the - // minus sign preferred by Go. - // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on - bc := false - if t.Year() <= 0 { - // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" - t = t.AddDate((-t.Year())*2+1, 0, 0) - bc = true - } - b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) - - _, offset := t.Zone() - offset %= 60 - if offset != 0 { - // RFC3339Nano already printed the minus sign - if offset < 0 { - offset = -offset - } - - b = append(b, ':') - if offset < 10 { - b = append(b, '0') - } - b = strconv.AppendInt(b, int64(offset), 10) - } - - if bc { - b = append(b, " BC"...) - } - return b -} - -// Parse a bytea value received from the server. Both "hex" and the legacy -// "escape" format are supported. -func parseBytea(s []byte) (result []byte, err error) { - if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { - // bytea_output = hex - s = s[2:] // trim off leading "\\x" - result = make([]byte, hex.DecodedLen(len(s))) - _, err := hex.Decode(result, s) - if err != nil { - return nil, err - } - } else { - // bytea_output = escape - for len(s) > 0 { - if s[0] == '\\' { - // escaped '\\' - if len(s) >= 2 && s[1] == '\\' { - result = append(result, '\\') - s = s[2:] - continue - } - - // '\\' followed by an octal number - if len(s) < 4 { - return nil, fmt.Errorf("invalid bytea sequence %v", s) - } - r, err := strconv.ParseInt(string(s[1:4]), 8, 9) - if err != nil { - return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) - } - result = append(result, byte(r)) - s = s[4:] - } else { - // We hit an unescaped, raw byte. Try to read in as many as - // possible in one go. - i := bytes.IndexByte(s, '\\') - if i == -1 { - result = append(result, s...) - break - } - result = append(result, s[:i]...) - s = s[i:] - } - } - } - - return result, nil -} - -func encodeBytea(serverVersion int, v []byte) (result []byte) { - if serverVersion >= 90000 { - // Use the hex format if we know that the server supports it - result = make([]byte, 2+hex.EncodedLen(len(v))) - result[0] = '\\' - result[1] = 'x' - hex.Encode(result[2:], v) - } else { - // .. or resort to "escape" - for _, b := range v { - if b == '\\' { - result = append(result, '\\', '\\') - } else if b < 0x20 || b > 0x7e { - result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) - } else { - result = append(result, b) - } - } - } - - return result -} - -// NullTime represents a time.Time that may be null. NullTime implements the -// sql.Scanner interface so it can be used as a scan destination, similar to -// sql.NullString. -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -func (nt *NullTime) Scan(value interface{}) error { - nt.Time, nt.Valid = value.(time.Time) - return nil -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go deleted file mode 100644 index 3d66ba7c..00000000 --- a/vendor/github.com/lib/pq/error.go +++ /dev/null @@ -1,515 +0,0 @@ -package pq - -import ( - "database/sql/driver" - "fmt" - "io" - "net" - "runtime" -) - -// Error severities -const ( - Efatal = "FATAL" - Epanic = "PANIC" - Ewarning = "WARNING" - Enotice = "NOTICE" - Edebug = "DEBUG" - Einfo = "INFO" - Elog = "LOG" -) - -// Error represents an error communicating with the server. -// -// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields -type Error struct { - Severity string - Code ErrorCode - Message string - Detail string - Hint string - Position string - InternalPosition string - InternalQuery string - Where string - Schema string - Table string - Column string - DataTypeName string - Constraint string - File string - Line string - Routine string -} - -// ErrorCode is a five-character error code. -type ErrorCode string - -// Name returns a more human friendly rendering of the error code, namely the -// "condition name". -// -// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for -// details. -func (ec ErrorCode) Name() string { - return errorCodeNames[ec] -} - -// ErrorClass is only the class part of an error code. -type ErrorClass string - -// Name returns the condition name of an error class. It is equivalent to the -// condition name of the "standard" error code (i.e. the one having the last -// three characters "000"). -func (ec ErrorClass) Name() string { - return errorCodeNames[ErrorCode(ec+"000")] -} - -// Class returns the error class, e.g. "28". -// -// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for -// details. -func (ec ErrorCode) Class() ErrorClass { - return ErrorClass(ec[0:2]) -} - -// errorCodeNames is a mapping between the five-character error codes and the -// human readable "condition names". It is derived from the list at -// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html -var errorCodeNames = map[ErrorCode]string{ - // Class 00 - Successful Completion - "00000": "successful_completion", - // Class 01 - Warning - "01000": "warning", - "0100C": "dynamic_result_sets_returned", - "01008": "implicit_zero_bit_padding", - "01003": "null_value_eliminated_in_set_function", - "01007": "privilege_not_granted", - "01006": "privilege_not_revoked", - "01004": "string_data_right_truncation", - "01P01": "deprecated_feature", - // Class 02 - No Data (this is also a warning class per the SQL standard) - "02000": "no_data", - "02001": "no_additional_dynamic_result_sets_returned", - // Class 03 - SQL Statement Not Yet Complete - "03000": "sql_statement_not_yet_complete", - // Class 08 - Connection Exception - "08000": "connection_exception", - "08003": "connection_does_not_exist", - "08006": "connection_failure", - "08001": "sqlclient_unable_to_establish_sqlconnection", - "08004": "sqlserver_rejected_establishment_of_sqlconnection", - "08007": "transaction_resolution_unknown", - "08P01": "protocol_violation", - // Class 09 - Triggered Action Exception - "09000": "triggered_action_exception", - // Class 0A - Feature Not Supported - "0A000": "feature_not_supported", - // Class 0B - Invalid Transaction Initiation - "0B000": "invalid_transaction_initiation", - // Class 0F - Locator Exception - "0F000": "locator_exception", - "0F001": "invalid_locator_specification", - // Class 0L - Invalid Grantor - "0L000": "invalid_grantor", - "0LP01": "invalid_grant_operation", - // Class 0P - Invalid Role Specification - "0P000": "invalid_role_specification", - // Class 0Z - Diagnostics Exception - "0Z000": "diagnostics_exception", - "0Z002": "stacked_diagnostics_accessed_without_active_handler", - // Class 20 - Case Not Found - "20000": "case_not_found", - // Class 21 - Cardinality Violation - "21000": "cardinality_violation", - // Class 22 - Data Exception - "22000": "data_exception", - "2202E": "array_subscript_error", - "22021": "character_not_in_repertoire", - "22008": "datetime_field_overflow", - "22012": "division_by_zero", - "22005": "error_in_assignment", - "2200B": "escape_character_conflict", - "22022": "indicator_overflow", - "22015": "interval_field_overflow", - "2201E": "invalid_argument_for_logarithm", - "22014": "invalid_argument_for_ntile_function", - "22016": "invalid_argument_for_nth_value_function", - "2201F": "invalid_argument_for_power_function", - "2201G": "invalid_argument_for_width_bucket_function", - "22018": "invalid_character_value_for_cast", - "22007": "invalid_datetime_format", - "22019": "invalid_escape_character", - "2200D": "invalid_escape_octet", - "22025": "invalid_escape_sequence", - "22P06": "nonstandard_use_of_escape_character", - "22010": "invalid_indicator_parameter_value", - "22023": "invalid_parameter_value", - "2201B": "invalid_regular_expression", - "2201W": "invalid_row_count_in_limit_clause", - "2201X": "invalid_row_count_in_result_offset_clause", - "22009": "invalid_time_zone_displacement_value", - "2200C": "invalid_use_of_escape_character", - "2200G": "most_specific_type_mismatch", - "22004": "null_value_not_allowed", - "22002": "null_value_no_indicator_parameter", - "22003": "numeric_value_out_of_range", - "2200H": "sequence_generator_limit_exceeded", - "22026": "string_data_length_mismatch", - "22001": "string_data_right_truncation", - "22011": "substring_error", - "22027": "trim_error", - "22024": "unterminated_c_string", - "2200F": "zero_length_character_string", - "22P01": "floating_point_exception", - "22P02": "invalid_text_representation", - "22P03": "invalid_binary_representation", - "22P04": "bad_copy_file_format", - "22P05": "untranslatable_character", - "2200L": "not_an_xml_document", - "2200M": "invalid_xml_document", - "2200N": "invalid_xml_content", - "2200S": "invalid_xml_comment", - "2200T": "invalid_xml_processing_instruction", - // Class 23 - Integrity Constraint Violation - "23000": "integrity_constraint_violation", - "23001": "restrict_violation", - "23502": "not_null_violation", - "23503": "foreign_key_violation", - "23505": "unique_violation", - "23514": "check_violation", - "23P01": "exclusion_violation", - // Class 24 - Invalid Cursor State - "24000": "invalid_cursor_state", - // Class 25 - Invalid Transaction State - "25000": "invalid_transaction_state", - "25001": "active_sql_transaction", - "25002": "branch_transaction_already_active", - "25008": "held_cursor_requires_same_isolation_level", - "25003": "inappropriate_access_mode_for_branch_transaction", - "25004": "inappropriate_isolation_level_for_branch_transaction", - "25005": "no_active_sql_transaction_for_branch_transaction", - "25006": "read_only_sql_transaction", - "25007": "schema_and_data_statement_mixing_not_supported", - "25P01": "no_active_sql_transaction", - "25P02": "in_failed_sql_transaction", - // Class 26 - Invalid SQL Statement Name - "26000": "invalid_sql_statement_name", - // Class 27 - Triggered Data Change Violation - "27000": "triggered_data_change_violation", - // Class 28 - Invalid Authorization Specification - "28000": "invalid_authorization_specification", - "28P01": "invalid_password", - // Class 2B - Dependent Privilege Descriptors Still Exist - "2B000": "dependent_privilege_descriptors_still_exist", - "2BP01": "dependent_objects_still_exist", - // Class 2D - Invalid Transaction Termination - "2D000": "invalid_transaction_termination", - // Class 2F - SQL Routine Exception - "2F000": "sql_routine_exception", - "2F005": "function_executed_no_return_statement", - "2F002": "modifying_sql_data_not_permitted", - "2F003": "prohibited_sql_statement_attempted", - "2F004": "reading_sql_data_not_permitted", - // Class 34 - Invalid Cursor Name - "34000": "invalid_cursor_name", - // Class 38 - External Routine Exception - "38000": "external_routine_exception", - "38001": "containing_sql_not_permitted", - "38002": "modifying_sql_data_not_permitted", - "38003": "prohibited_sql_statement_attempted", - "38004": "reading_sql_data_not_permitted", - // Class 39 - External Routine Invocation Exception - "39000": "external_routine_invocation_exception", - "39001": "invalid_sqlstate_returned", - "39004": "null_value_not_allowed", - "39P01": "trigger_protocol_violated", - "39P02": "srf_protocol_violated", - // Class 3B - Savepoint Exception - "3B000": "savepoint_exception", - "3B001": "invalid_savepoint_specification", - // Class 3D - Invalid Catalog Name - "3D000": "invalid_catalog_name", - // Class 3F - Invalid Schema Name - "3F000": "invalid_schema_name", - // Class 40 - Transaction Rollback - "40000": "transaction_rollback", - "40002": "transaction_integrity_constraint_violation", - "40001": "serialization_failure", - "40003": "statement_completion_unknown", - "40P01": "deadlock_detected", - // Class 42 - Syntax Error or Access Rule Violation - "42000": "syntax_error_or_access_rule_violation", - "42601": "syntax_error", - "42501": "insufficient_privilege", - "42846": "cannot_coerce", - "42803": "grouping_error", - "42P20": "windowing_error", - "42P19": "invalid_recursion", - "42830": "invalid_foreign_key", - "42602": "invalid_name", - "42622": "name_too_long", - "42939": "reserved_name", - "42804": "datatype_mismatch", - "42P18": "indeterminate_datatype", - "42P21": "collation_mismatch", - "42P22": "indeterminate_collation", - "42809": "wrong_object_type", - "42703": "undefined_column", - "42883": "undefined_function", - "42P01": "undefined_table", - "42P02": "undefined_parameter", - "42704": "undefined_object", - "42701": "duplicate_column", - "42P03": "duplicate_cursor", - "42P04": "duplicate_database", - "42723": "duplicate_function", - "42P05": "duplicate_prepared_statement", - "42P06": "duplicate_schema", - "42P07": "duplicate_table", - "42712": "duplicate_alias", - "42710": "duplicate_object", - "42702": "ambiguous_column", - "42725": "ambiguous_function", - "42P08": "ambiguous_parameter", - "42P09": "ambiguous_alias", - "42P10": "invalid_column_reference", - "42611": "invalid_column_definition", - "42P11": "invalid_cursor_definition", - "42P12": "invalid_database_definition", - "42P13": "invalid_function_definition", - "42P14": "invalid_prepared_statement_definition", - "42P15": "invalid_schema_definition", - "42P16": "invalid_table_definition", - "42P17": "invalid_object_definition", - // Class 44 - WITH CHECK OPTION Violation - "44000": "with_check_option_violation", - // Class 53 - Insufficient Resources - "53000": "insufficient_resources", - "53100": "disk_full", - "53200": "out_of_memory", - "53300": "too_many_connections", - "53400": "configuration_limit_exceeded", - // Class 54 - Program Limit Exceeded - "54000": "program_limit_exceeded", - "54001": "statement_too_complex", - "54011": "too_many_columns", - "54023": "too_many_arguments", - // Class 55 - Object Not In Prerequisite State - "55000": "object_not_in_prerequisite_state", - "55006": "object_in_use", - "55P02": "cant_change_runtime_param", - "55P03": "lock_not_available", - // Class 57 - Operator Intervention - "57000": "operator_intervention", - "57014": "query_canceled", - "57P01": "admin_shutdown", - "57P02": "crash_shutdown", - "57P03": "cannot_connect_now", - "57P04": "database_dropped", - // Class 58 - System Error (errors external to PostgreSQL itself) - "58000": "system_error", - "58030": "io_error", - "58P01": "undefined_file", - "58P02": "duplicate_file", - // Class F0 - Configuration File Error - "F0000": "config_file_error", - "F0001": "lock_file_exists", - // Class HV - Foreign Data Wrapper Error (SQL/MED) - "HV000": "fdw_error", - "HV005": "fdw_column_name_not_found", - "HV002": "fdw_dynamic_parameter_value_needed", - "HV010": "fdw_function_sequence_error", - "HV021": "fdw_inconsistent_descriptor_information", - "HV024": "fdw_invalid_attribute_value", - "HV007": "fdw_invalid_column_name", - "HV008": "fdw_invalid_column_number", - "HV004": "fdw_invalid_data_type", - "HV006": "fdw_invalid_data_type_descriptors", - "HV091": "fdw_invalid_descriptor_field_identifier", - "HV00B": "fdw_invalid_handle", - "HV00C": "fdw_invalid_option_index", - "HV00D": "fdw_invalid_option_name", - "HV090": "fdw_invalid_string_length_or_buffer_length", - "HV00A": "fdw_invalid_string_format", - "HV009": "fdw_invalid_use_of_null_pointer", - "HV014": "fdw_too_many_handles", - "HV001": "fdw_out_of_memory", - "HV00P": "fdw_no_schemas", - "HV00J": "fdw_option_name_not_found", - "HV00K": "fdw_reply_handle", - "HV00Q": "fdw_schema_not_found", - "HV00R": "fdw_table_not_found", - "HV00L": "fdw_unable_to_create_execution", - "HV00M": "fdw_unable_to_create_reply", - "HV00N": "fdw_unable_to_establish_connection", - // Class P0 - PL/pgSQL Error - "P0000": "plpgsql_error", - "P0001": "raise_exception", - "P0002": "no_data_found", - "P0003": "too_many_rows", - // Class XX - Internal Error - "XX000": "internal_error", - "XX001": "data_corrupted", - "XX002": "index_corrupted", -} - -func parseError(r *readBuf) *Error { - err := new(Error) - for t := r.byte(); t != 0; t = r.byte() { - msg := r.string() - switch t { - case 'S': - err.Severity = msg - case 'C': - err.Code = ErrorCode(msg) - case 'M': - err.Message = msg - case 'D': - err.Detail = msg - case 'H': - err.Hint = msg - case 'P': - err.Position = msg - case 'p': - err.InternalPosition = msg - case 'q': - err.InternalQuery = msg - case 'W': - err.Where = msg - case 's': - err.Schema = msg - case 't': - err.Table = msg - case 'c': - err.Column = msg - case 'd': - err.DataTypeName = msg - case 'n': - err.Constraint = msg - case 'F': - err.File = msg - case 'L': - err.Line = msg - case 'R': - err.Routine = msg - } - } - return err -} - -// Fatal returns true if the Error Severity is fatal. -func (err *Error) Fatal() bool { - return err.Severity == Efatal -} - -// Get implements the legacy PGError interface. New code should use the fields -// of the Error struct directly. -func (err *Error) Get(k byte) (v string) { - switch k { - case 'S': - return err.Severity - case 'C': - return string(err.Code) - case 'M': - return err.Message - case 'D': - return err.Detail - case 'H': - return err.Hint - case 'P': - return err.Position - case 'p': - return err.InternalPosition - case 'q': - return err.InternalQuery - case 'W': - return err.Where - case 's': - return err.Schema - case 't': - return err.Table - case 'c': - return err.Column - case 'd': - return err.DataTypeName - case 'n': - return err.Constraint - case 'F': - return err.File - case 'L': - return err.Line - case 'R': - return err.Routine - } - return "" -} - -func (err Error) Error() string { - return "pq: " + err.Message -} - -// PGError is an interface used by previous versions of pq. It is provided -// only to support legacy code. New code should use the Error type. -type PGError interface { - Error() string - Fatal() bool - Get(k byte) (v string) -} - -func errorf(s string, args ...interface{}) { - panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) -} - -// TODO(ainar-g) Rename to errorf after removing panics. -func fmterrorf(s string, args ...interface{}) error { - return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) -} - -func errRecoverNoErrBadConn(err *error) { - e := recover() - if e == nil { - // Do nothing - return - } - var ok bool - *err, ok = e.(error) - if !ok { - *err = fmt.Errorf("pq: unexpected error: %#v", e) - } -} - -func (cn *conn) errRecover(err *error) { - e := recover() - switch v := e.(type) { - case nil: - // Do nothing - case runtime.Error: - cn.bad = true - panic(v) - case *Error: - if v.Fatal() { - *err = driver.ErrBadConn - } else { - *err = v - } - case *net.OpError: - cn.bad = true - *err = v - case error: - if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { - *err = driver.ErrBadConn - } else { - *err = v - } - - default: - cn.bad = true - panic(fmt.Sprintf("unknown error: %#v", e)) - } - - // Any time we return ErrBadConn, we need to remember it since *Tx doesn't - // mark the connection bad in database/sql. - if *err == driver.ErrBadConn { - cn.bad = true - } -} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go deleted file mode 100644 index 408ec01f..00000000 --- a/vendor/github.com/lib/pq/krb.go +++ /dev/null @@ -1,27 +0,0 @@ -package pq - -// NewGSSFunc creates a GSS authentication provider, for use with -// RegisterGSSProvider. -type NewGSSFunc func() (GSS, error) - -var newGss NewGSSFunc - -// RegisterGSSProvider registers a GSS authentication provider. For example, if -// you need to use Kerberos to authenticate with your server, add this to your -// main package: -// -// import "github.com/lib/pq/auth/kerberos" -// -// func init() { -// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) -// } -func RegisterGSSProvider(newGssArg NewGSSFunc) { - newGss = newGssArg -} - -// GSS provides GSSAPI authentication (e.g., Kerberos). -type GSS interface { - GetInitToken(host string, service string) ([]byte, error) - GetInitTokenFromSpn(spn string) ([]byte, error) - Continue(inToken []byte) (done bool, outToken []byte, err error) -} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go deleted file mode 100644 index 01dd8c72..00000000 --- a/vendor/github.com/lib/pq/notice.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build go1.10 - -package pq - -import ( - "context" - "database/sql/driver" -) - -// NoticeHandler returns the notice handler on the given connection, if any. A -// runtime panic occurs if c is not a pq connection. This is rarely used -// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. -func NoticeHandler(c driver.Conn) func(*Error) { - return c.(*conn).noticeHandler -} - -// SetNoticeHandler sets the given notice handler on the given connection. A -// runtime panic occurs if c is not a pq connection. A nil handler may be used -// to unset it. This is rarely used directly, use ConnectorNoticeHandler and -// ConnectorWithNoticeHandler instead. -// -// Note: Notice handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func SetNoticeHandler(c driver.Conn, handler func(*Error)) { - c.(*conn).noticeHandler = handler -} - -// NoticeHandlerConnector wraps a regular connector and sets a notice handler -// on it. -type NoticeHandlerConnector struct { - driver.Connector - noticeHandler func(*Error) -} - -// Connect calls the underlying connector's connect method and then sets the -// notice handler. -func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { - c, err := n.Connector.Connect(ctx) - if err == nil { - SetNoticeHandler(c, n.noticeHandler) - } - return c, err -} - -// ConnectorNoticeHandler returns the currently set notice handler, if any. If -// the given connector is not a result of ConnectorWithNoticeHandler, nil is -// returned. -func ConnectorNoticeHandler(c driver.Connector) func(*Error) { - if c, ok := c.(*NoticeHandlerConnector); ok { - return c.noticeHandler - } - return nil -} - -// ConnectorWithNoticeHandler creates or sets the given handler for the given -// connector. If the given connector is a result of calling this function -// previously, it is simply set on the given connector and returned. Otherwise, -// this returns a new connector wrapping the given one and setting the notice -// handler. A nil notice handler may be used to unset it. -// -// The returned connector is intended to be used with database/sql.OpenDB. -// -// Note: Notice handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { - if c, ok := c.(*NoticeHandlerConnector); ok { - c.noticeHandler = handler - return c - } - return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} -} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go deleted file mode 100644 index 5c421fdb..00000000 --- a/vendor/github.com/lib/pq/notify.go +++ /dev/null @@ -1,858 +0,0 @@ -package pq - -// Package pq is a pure Go Postgres driver for the database/sql package. -// This module contains support for Postgres LISTEN/NOTIFY. - -import ( - "context" - "database/sql/driver" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" -) - -// Notification represents a single notification from the database. -type Notification struct { - // Process ID (PID) of the notifying postgres backend. - BePid int - // Name of the channel the notification was sent on. - Channel string - // Payload, or the empty string if unspecified. - Extra string -} - -func recvNotification(r *readBuf) *Notification { - bePid := r.int32() - channel := r.string() - extra := r.string() - - return &Notification{bePid, channel, extra} -} - -// SetNotificationHandler sets the given notification handler on the given -// connection. A runtime panic occurs if c is not a pq connection. A nil handler -// may be used to unset it. -// -// Note: Notification handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { - c.(*conn).notificationHandler = handler -} - -// NotificationHandlerConnector wraps a regular connector and sets a notification handler -// on it. -type NotificationHandlerConnector struct { - driver.Connector - notificationHandler func(*Notification) -} - -// Connect calls the underlying connector's connect method and then sets the -// notification handler. -func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { - c, err := n.Connector.Connect(ctx) - if err == nil { - SetNotificationHandler(c, n.notificationHandler) - } - return c, err -} - -// ConnectorNotificationHandler returns the currently set notification handler, if any. If -// the given connector is not a result of ConnectorWithNotificationHandler, nil is -// returned. -func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { - if c, ok := c.(*NotificationHandlerConnector); ok { - return c.notificationHandler - } - return nil -} - -// ConnectorWithNotificationHandler creates or sets the given handler for the given -// connector. If the given connector is a result of calling this function -// previously, it is simply set on the given connector and returned. Otherwise, -// this returns a new connector wrapping the given one and setting the notification -// handler. A nil notification handler may be used to unset it. -// -// The returned connector is intended to be used with database/sql.OpenDB. -// -// Note: Notification handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { - if c, ok := c.(*NotificationHandlerConnector); ok { - c.notificationHandler = handler - return c - } - return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} -} - -const ( - connStateIdle int32 = iota - connStateExpectResponse - connStateExpectReadyForQuery -) - -type message struct { - typ byte - err error -} - -var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") - -// ListenerConn is a low-level interface for waiting for notifications. You -// should use Listener instead. -type ListenerConn struct { - // guards cn and err - connectionLock sync.Mutex - cn *conn - err error - - connState int32 - - // the sending goroutine will be holding this lock - senderLock sync.Mutex - - notificationChan chan<- *Notification - - replyChan chan message -} - -// NewListenerConn creates a new ListenerConn. Use NewListener instead. -func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { - return newDialListenerConn(defaultDialer{}, name, notificationChan) -} - -func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { - cn, err := DialOpen(d, name) - if err != nil { - return nil, err - } - - l := &ListenerConn{ - cn: cn.(*conn), - notificationChan: c, - connState: connStateIdle, - replyChan: make(chan message, 2), - } - - go l.listenerConnMain() - - return l, nil -} - -// We can only allow one goroutine at a time to be running a query on the -// connection for various reasons, so the goroutine sending on the connection -// must be holding senderLock. -// -// Returns an error if an unrecoverable error has occurred and the ListenerConn -// should be abandoned. -func (l *ListenerConn) acquireSenderLock() error { - // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery - l.senderLock.Lock() - - l.connectionLock.Lock() - err := l.err - l.connectionLock.Unlock() - if err != nil { - l.senderLock.Unlock() - return err - } - return nil -} - -func (l *ListenerConn) releaseSenderLock() { - l.senderLock.Unlock() -} - -// setState advances the protocol state to newState. Returns false if moving -// to that state from the current state is not allowed. -func (l *ListenerConn) setState(newState int32) bool { - var expectedState int32 - - switch newState { - case connStateIdle: - expectedState = connStateExpectReadyForQuery - case connStateExpectResponse: - expectedState = connStateIdle - case connStateExpectReadyForQuery: - expectedState = connStateExpectResponse - default: - panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) - } - - return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) -} - -// Main logic is here: receive messages from the postgres backend, forward -// notifications and query replies and keep the internal state in sync with the -// protocol state. Returns when the connection has been lost, is about to go -// away or should be discarded because we couldn't agree on the state with the -// server backend. -func (l *ListenerConn) listenerConnLoop() (err error) { - defer errRecoverNoErrBadConn(&err) - - r := &readBuf{} - for { - t, err := l.cn.recvMessage(r) - if err != nil { - return err - } - - switch t { - case 'A': - // recvNotification copies all the data so we don't need to worry - // about the scratch buffer being overwritten. - l.notificationChan <- recvNotification(r) - - case 'T', 'D': - // only used by tests; ignore - - case 'E': - // We might receive an ErrorResponse even when not in a query; it - // is expected that the server will close the connection after - // that, but we should make sure that the error we display is the - // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. - if !l.setState(connStateExpectReadyForQuery) { - return parseError(r) - } - l.replyChan <- message{t, parseError(r)} - - case 'C', 'I': - if !l.setState(connStateExpectReadyForQuery) { - // protocol out of sync - return fmt.Errorf("unexpected CommandComplete") - } - // ExecSimpleQuery doesn't need to know about this message - - case 'Z': - if !l.setState(connStateIdle) { - // protocol out of sync - return fmt.Errorf("unexpected ReadyForQuery") - } - l.replyChan <- message{t, nil} - - case 'S': - // ignore - case 'N': - if n := l.cn.noticeHandler; n != nil { - n(parseError(r)) - } - default: - return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) - } - } -} - -// This is the main routine for the goroutine receiving on the database -// connection. Most of the main logic is in listenerConnLoop. -func (l *ListenerConn) listenerConnMain() { - err := l.listenerConnLoop() - - // listenerConnLoop terminated; we're done, but we still have to clean up. - // Make sure nobody tries to start any new queries by making sure the err - // pointer is set. It is important that we do not overwrite its value; a - // connection could be closed by either this goroutine or one sending on - // the connection -- whoever closes the connection is assumed to have the - // more meaningful error message (as the other one will probably get - // net.errClosed), so that goroutine sets the error we expose while the - // other error is discarded. If the connection is lost while two - // goroutines are operating on the socket, it probably doesn't matter which - // error we expose so we don't try to do anything more complex. - l.connectionLock.Lock() - if l.err == nil { - l.err = err - } - l.cn.Close() - l.connectionLock.Unlock() - - // There might be a query in-flight; make sure nobody's waiting for a - // response to it, since there's not going to be one. - close(l.replyChan) - - // let the listener know we're done - close(l.notificationChan) - - // this ListenerConn is done -} - -// Listen sends a LISTEN query to the server. See ExecSimpleQuery. -func (l *ListenerConn) Listen(channel string) (bool, error) { - return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) -} - -// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. -func (l *ListenerConn) Unlisten(channel string) (bool, error) { - return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) -} - -// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. -func (l *ListenerConn) UnlistenAll() (bool, error) { - return l.ExecSimpleQuery("UNLISTEN *") -} - -// Ping the remote server to make sure it's alive. Non-nil error means the -// connection has failed and should be abandoned. -func (l *ListenerConn) Ping() error { - sent, err := l.ExecSimpleQuery("") - if !sent { - return err - } - if err != nil { - // shouldn't happen - panic(err) - } - return nil -} - -// Attempt to send a query on the connection. Returns an error if sending the -// query failed, and the caller should initiate closure of this connection. -// The caller must be holding senderLock (see acquireSenderLock and -// releaseSenderLock). -func (l *ListenerConn) sendSimpleQuery(q string) (err error) { - defer errRecoverNoErrBadConn(&err) - - // must set connection state before sending the query - if !l.setState(connStateExpectResponse) { - panic("two queries running at the same time") - } - - // Can't use l.cn.writeBuf here because it uses the scratch buffer which - // might get overwritten by listenerConnLoop. - b := &writeBuf{ - buf: []byte("Q\x00\x00\x00\x00"), - pos: 1, - } - b.string(q) - l.cn.send(b) - - return nil -} - -// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable -// parameters) on the connection. The possible return values are: -// 1) "executed" is true; the query was executed to completion on the -// database server. If the query failed, err will be set to the error -// returned by the database, otherwise err will be nil. -// 2) If "executed" is false, the query could not be executed on the remote -// server. err will be non-nil. -// -// After a call to ExecSimpleQuery has returned an executed=false value, the -// connection has either been closed or will be closed shortly thereafter, and -// all subsequently executed queries will return an error. -func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { - if err = l.acquireSenderLock(); err != nil { - return false, err - } - defer l.releaseSenderLock() - - err = l.sendSimpleQuery(q) - if err != nil { - // We can't know what state the protocol is in, so we need to abandon - // this connection. - l.connectionLock.Lock() - // Set the error pointer if it hasn't been set already; see - // listenerConnMain. - if l.err == nil { - l.err = err - } - l.connectionLock.Unlock() - l.cn.c.Close() - return false, err - } - - // now we just wait for a reply.. - for { - m, ok := <-l.replyChan - if !ok { - // We lost the connection to server, don't bother waiting for a - // a response. err should have been set already. - l.connectionLock.Lock() - err := l.err - l.connectionLock.Unlock() - return false, err - } - switch m.typ { - case 'Z': - // sanity check - if m.err != nil { - panic("m.err != nil") - } - // done; err might or might not be set - return true, err - - case 'E': - // sanity check - if m.err == nil { - panic("m.err == nil") - } - // server responded with an error; ReadyForQuery to follow - err = m.err - - default: - return false, fmt.Errorf("unknown response for simple query: %q", m.typ) - } - } -} - -// Close closes the connection. -func (l *ListenerConn) Close() error { - l.connectionLock.Lock() - if l.err != nil { - l.connectionLock.Unlock() - return errListenerConnClosed - } - l.err = errListenerConnClosed - l.connectionLock.Unlock() - // We can't send anything on the connection without holding senderLock. - // Simply close the net.Conn to wake up everyone operating on it. - return l.cn.c.Close() -} - -// Err returns the reason the connection was closed. It is not safe to call -// this function until l.Notify has been closed. -func (l *ListenerConn) Err() error { - return l.err -} - -var errListenerClosed = errors.New("pq: Listener has been closed") - -// ErrChannelAlreadyOpen is returned from Listen when a channel is already -// open. -var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") - -// ErrChannelNotOpen is returned from Unlisten when a channel is not open. -var ErrChannelNotOpen = errors.New("pq: channel is not open") - -// ListenerEventType is an enumeration of listener event types. -type ListenerEventType int - -const ( - // ListenerEventConnected is emitted only when the database connection - // has been initially initialized. The err argument of the callback - // will always be nil. - ListenerEventConnected ListenerEventType = iota - - // ListenerEventDisconnected is emitted after a database connection has - // been lost, either because of an error or because Close has been - // called. The err argument will be set to the reason the database - // connection was lost. - ListenerEventDisconnected - - // ListenerEventReconnected is emitted after a database connection has - // been re-established after connection loss. The err argument of the - // callback will always be nil. After this event has been emitted, a - // nil pq.Notification is sent on the Listener.Notify channel. - ListenerEventReconnected - - // ListenerEventConnectionAttemptFailed is emitted after a connection - // to the database was attempted, but failed. The err argument will be - // set to an error describing why the connection attempt did not - // succeed. - ListenerEventConnectionAttemptFailed -) - -// EventCallbackType is the event callback type. See also ListenerEventType -// constants' documentation. -type EventCallbackType func(event ListenerEventType, err error) - -// Listener provides an interface for listening to notifications from a -// PostgreSQL database. For general usage information, see section -// "Notifications". -// -// Listener can safely be used from concurrently running goroutines. -type Listener struct { - // Channel for receiving notifications from the database. In some cases a - // nil value will be sent. See section "Notifications" above. - Notify chan *Notification - - name string - minReconnectInterval time.Duration - maxReconnectInterval time.Duration - dialer Dialer - eventCallback EventCallbackType - - lock sync.Mutex - isClosed bool - reconnectCond *sync.Cond - cn *ListenerConn - connNotificationChan <-chan *Notification - channels map[string]struct{} -} - -// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. -// -// name should be set to a connection string to be used to establish the -// database connection (see section "Connection String Parameters" above). -// -// minReconnectInterval controls the duration to wait before trying to -// re-establish the database connection after connection loss. After each -// consecutive failure this interval is doubled, until maxReconnectInterval is -// reached. Successfully completing the connection establishment procedure -// resets the interval back to minReconnectInterval. -// -// The last parameter eventCallback can be set to a function which will be -// called by the Listener when the state of the underlying database connection -// changes. This callback will be called by the goroutine which dispatches the -// notifications over the Notify channel, so you should try to avoid doing -// potentially time-consuming operations from the callback. -func NewListener(name string, - minReconnectInterval time.Duration, - maxReconnectInterval time.Duration, - eventCallback EventCallbackType) *Listener { - return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) -} - -// NewDialListener is like NewListener but it takes a Dialer. -func NewDialListener(d Dialer, - name string, - minReconnectInterval time.Duration, - maxReconnectInterval time.Duration, - eventCallback EventCallbackType) *Listener { - - l := &Listener{ - name: name, - minReconnectInterval: minReconnectInterval, - maxReconnectInterval: maxReconnectInterval, - dialer: d, - eventCallback: eventCallback, - - channels: make(map[string]struct{}), - - Notify: make(chan *Notification, 32), - } - l.reconnectCond = sync.NewCond(&l.lock) - - go l.listenerMain() - - return l -} - -// NotificationChannel returns the notification channel for this listener. -// This is the same channel as Notify, and will not be recreated during the -// life time of the Listener. -func (l *Listener) NotificationChannel() <-chan *Notification { - return l.Notify -} - -// Listen starts listening for notifications on a channel. Calls to this -// function will block until an acknowledgement has been received from the -// server. Note that Listener automatically re-establishes the connection -// after connection loss, so this function may block indefinitely if the -// connection can not be re-established. -// -// Listen will only fail in three conditions: -// 1) The channel is already open. The returned error will be -// ErrChannelAlreadyOpen. -// 2) The query was executed on the remote server, but PostgreSQL returned an -// error message in response to the query. The returned error will be a -// pq.Error containing the information the server supplied. -// 3) Close is called on the Listener before the request could be completed. -// -// The channel name is case-sensitive. -func (l *Listener) Listen(channel string) error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - // The server allows you to issue a LISTEN on a channel which is already - // open, but it seems useful to be able to detect this case to spot for - // mistakes in application logic. If the application genuinely does't - // care, it can check the exported error and ignore it. - _, exists := l.channels[channel] - if exists { - return ErrChannelAlreadyOpen - } - - if l.cn != nil { - // If gotResponse is true but error is set, the query was executed on - // the remote server, but resulted in an error. This should be - // relatively rare, so it's fine if we just pass the error to our - // caller. However, if gotResponse is false, we could not complete the - // query on the remote server and our underlying connection is about - // to go away, so we only add relname to l.channels, and wait for - // resync() to take care of the rest. - gotResponse, err := l.cn.Listen(channel) - if gotResponse && err != nil { - return err - } - } - - l.channels[channel] = struct{}{} - for l.cn == nil { - l.reconnectCond.Wait() - // we let go of the mutex for a while - if l.isClosed { - return errListenerClosed - } - } - - return nil -} - -// Unlisten removes a channel from the Listener's channel list. Returns -// ErrChannelNotOpen if the Listener is not listening on the specified channel. -// Returns immediately with no error if there is no connection. Note that you -// might still get notifications for this channel even after Unlisten has -// returned. -// -// The channel name is case-sensitive. -func (l *Listener) Unlisten(channel string) error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - // Similarly to LISTEN, this is not an error in Postgres, but it seems - // useful to distinguish from the normal conditions. - _, exists := l.channels[channel] - if !exists { - return ErrChannelNotOpen - } - - if l.cn != nil { - // Similarly to Listen (see comment in that function), the caller - // should only be bothered with an error if it came from the backend as - // a response to our query. - gotResponse, err := l.cn.Unlisten(channel) - if gotResponse && err != nil { - return err - } - } - - // Don't bother waiting for resync if there's no connection. - delete(l.channels, channel) - return nil -} - -// UnlistenAll removes all channels from the Listener's channel list. Returns -// immediately with no error if there is no connection. Note that you might -// still get notifications for any of the deleted channels even after -// UnlistenAll has returned. -func (l *Listener) UnlistenAll() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - if l.cn != nil { - // Similarly to Listen (see comment in that function), the caller - // should only be bothered with an error if it came from the backend as - // a response to our query. - gotResponse, err := l.cn.UnlistenAll() - if gotResponse && err != nil { - return err - } - } - - // Don't bother waiting for resync if there's no connection. - l.channels = make(map[string]struct{}) - return nil -} - -// Ping the remote server to make sure it's alive. Non-nil return value means -// that there is no active connection. -func (l *Listener) Ping() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - if l.cn == nil { - return errors.New("no connection") - } - - return l.cn.Ping() -} - -// Clean up after losing the server connection. Returns l.cn.Err(), which -// should have the reason the connection was lost. -func (l *Listener) disconnectCleanup() error { - l.lock.Lock() - defer l.lock.Unlock() - - // sanity check; can't look at Err() until the channel has been closed - select { - case _, ok := <-l.connNotificationChan: - if ok { - panic("connNotificationChan not closed") - } - default: - panic("connNotificationChan not closed") - } - - err := l.cn.Err() - l.cn.Close() - l.cn = nil - return err -} - -// Synchronize the list of channels we want to be listening on with the server -// after the connection has been established. -func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { - doneChan := make(chan error) - go func(notificationChan <-chan *Notification) { - for channel := range l.channels { - // If we got a response, return that error to our caller as it's - // going to be more descriptive than cn.Err(). - gotResponse, err := cn.Listen(channel) - if gotResponse && err != nil { - doneChan <- err - return - } - - // If we couldn't reach the server, wait for notificationChan to - // close and then return the error message from the connection, as - // per ListenerConn's interface. - if err != nil { - for range notificationChan { - } - doneChan <- cn.Err() - return - } - } - doneChan <- nil - }(notificationChan) - - // Ignore notifications while synchronization is going on to avoid - // deadlocks. We have to send a nil notification over Notify anyway as - // we can't possibly know which notifications (if any) were lost while - // the connection was down, so there's no reason to try and process - // these messages at all. - for { - select { - case _, ok := <-notificationChan: - if !ok { - notificationChan = nil - } - - case err := <-doneChan: - return err - } - } -} - -// caller should NOT be holding l.lock -func (l *Listener) closed() bool { - l.lock.Lock() - defer l.lock.Unlock() - - return l.isClosed -} - -func (l *Listener) connect() error { - notificationChan := make(chan *Notification, 32) - cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) - if err != nil { - return err - } - - l.lock.Lock() - defer l.lock.Unlock() - - err = l.resync(cn, notificationChan) - if err != nil { - cn.Close() - return err - } - - l.cn = cn - l.connNotificationChan = notificationChan - l.reconnectCond.Broadcast() - - return nil -} - -// Close disconnects the Listener from the database and shuts it down. -// Subsequent calls to its methods will return an error. Close returns an -// error if the connection has already been closed. -func (l *Listener) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - if l.cn != nil { - l.cn.Close() - } - l.isClosed = true - - // Unblock calls to Listen() - l.reconnectCond.Broadcast() - - return nil -} - -func (l *Listener) emitEvent(event ListenerEventType, err error) { - if l.eventCallback != nil { - l.eventCallback(event, err) - } -} - -// Main logic here: maintain a connection to the server when possible, wait -// for notifications and emit events. -func (l *Listener) listenerConnLoop() { - var nextReconnect time.Time - - reconnectInterval := l.minReconnectInterval - for { - for { - err := l.connect() - if err == nil { - break - } - - if l.closed() { - return - } - l.emitEvent(ListenerEventConnectionAttemptFailed, err) - - time.Sleep(reconnectInterval) - reconnectInterval *= 2 - if reconnectInterval > l.maxReconnectInterval { - reconnectInterval = l.maxReconnectInterval - } - } - - if nextReconnect.IsZero() { - l.emitEvent(ListenerEventConnected, nil) - } else { - l.emitEvent(ListenerEventReconnected, nil) - l.Notify <- nil - } - - reconnectInterval = l.minReconnectInterval - nextReconnect = time.Now().Add(reconnectInterval) - - for { - notification, ok := <-l.connNotificationChan - if !ok { - // lost connection, loop again - break - } - l.Notify <- notification - } - - err := l.disconnectCleanup() - if l.closed() { - return - } - l.emitEvent(ListenerEventDisconnected, err) - - time.Sleep(time.Until(nextReconnect)) - } -} - -func (l *Listener) listenerMain() { - l.listenerConnLoop() - close(l.Notify) -} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go deleted file mode 100644 index caaede24..00000000 --- a/vendor/github.com/lib/pq/oid/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package oid contains OID constants -// as defined by the Postgres server. -package oid - -// Oid is a Postgres Object ID. -type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go deleted file mode 100644 index ecc84c2c..00000000 --- a/vendor/github.com/lib/pq/oid/types.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by gen.go. DO NOT EDIT. - -package oid - -const ( - T_bool Oid = 16 - T_bytea Oid = 17 - T_char Oid = 18 - T_name Oid = 19 - T_int8 Oid = 20 - T_int2 Oid = 21 - T_int2vector Oid = 22 - T_int4 Oid = 23 - T_regproc Oid = 24 - T_text Oid = 25 - T_oid Oid = 26 - T_tid Oid = 27 - T_xid Oid = 28 - T_cid Oid = 29 - T_oidvector Oid = 30 - T_pg_ddl_command Oid = 32 - T_pg_type Oid = 71 - T_pg_attribute Oid = 75 - T_pg_proc Oid = 81 - T_pg_class Oid = 83 - T_json Oid = 114 - T_xml Oid = 142 - T__xml Oid = 143 - T_pg_node_tree Oid = 194 - T__json Oid = 199 - T_smgr Oid = 210 - T_index_am_handler Oid = 325 - T_point Oid = 600 - T_lseg Oid = 601 - T_path Oid = 602 - T_box Oid = 603 - T_polygon Oid = 604 - T_line Oid = 628 - T__line Oid = 629 - T_cidr Oid = 650 - T__cidr Oid = 651 - T_float4 Oid = 700 - T_float8 Oid = 701 - T_abstime Oid = 702 - T_reltime Oid = 703 - T_tinterval Oid = 704 - T_unknown Oid = 705 - T_circle Oid = 718 - T__circle Oid = 719 - T_money Oid = 790 - T__money Oid = 791 - T_macaddr Oid = 829 - T_inet Oid = 869 - T__bool Oid = 1000 - T__bytea Oid = 1001 - T__char Oid = 1002 - T__name Oid = 1003 - T__int2 Oid = 1005 - T__int2vector Oid = 1006 - T__int4 Oid = 1007 - T__regproc Oid = 1008 - T__text Oid = 1009 - T__tid Oid = 1010 - T__xid Oid = 1011 - T__cid Oid = 1012 - T__oidvector Oid = 1013 - T__bpchar Oid = 1014 - T__varchar Oid = 1015 - T__int8 Oid = 1016 - T__point Oid = 1017 - T__lseg Oid = 1018 - T__path Oid = 1019 - T__box Oid = 1020 - T__float4 Oid = 1021 - T__float8 Oid = 1022 - T__abstime Oid = 1023 - T__reltime Oid = 1024 - T__tinterval Oid = 1025 - T__polygon Oid = 1027 - T__oid Oid = 1028 - T_aclitem Oid = 1033 - T__aclitem Oid = 1034 - T__macaddr Oid = 1040 - T__inet Oid = 1041 - T_bpchar Oid = 1042 - T_varchar Oid = 1043 - T_date Oid = 1082 - T_time Oid = 1083 - T_timestamp Oid = 1114 - T__timestamp Oid = 1115 - T__date Oid = 1182 - T__time Oid = 1183 - T_timestamptz Oid = 1184 - T__timestamptz Oid = 1185 - T_interval Oid = 1186 - T__interval Oid = 1187 - T__numeric Oid = 1231 - T_pg_database Oid = 1248 - T__cstring Oid = 1263 - T_timetz Oid = 1266 - T__timetz Oid = 1270 - T_bit Oid = 1560 - T__bit Oid = 1561 - T_varbit Oid = 1562 - T__varbit Oid = 1563 - T_numeric Oid = 1700 - T_refcursor Oid = 1790 - T__refcursor Oid = 2201 - T_regprocedure Oid = 2202 - T_regoper Oid = 2203 - T_regoperator Oid = 2204 - T_regclass Oid = 2205 - T_regtype Oid = 2206 - T__regprocedure Oid = 2207 - T__regoper Oid = 2208 - T__regoperator Oid = 2209 - T__regclass Oid = 2210 - T__regtype Oid = 2211 - T_record Oid = 2249 - T_cstring Oid = 2275 - T_any Oid = 2276 - T_anyarray Oid = 2277 - T_void Oid = 2278 - T_trigger Oid = 2279 - T_language_handler Oid = 2280 - T_internal Oid = 2281 - T_opaque Oid = 2282 - T_anyelement Oid = 2283 - T__record Oid = 2287 - T_anynonarray Oid = 2776 - T_pg_authid Oid = 2842 - T_pg_auth_members Oid = 2843 - T__txid_snapshot Oid = 2949 - T_uuid Oid = 2950 - T__uuid Oid = 2951 - T_txid_snapshot Oid = 2970 - T_fdw_handler Oid = 3115 - T_pg_lsn Oid = 3220 - T__pg_lsn Oid = 3221 - T_tsm_handler Oid = 3310 - T_anyenum Oid = 3500 - T_tsvector Oid = 3614 - T_tsquery Oid = 3615 - T_gtsvector Oid = 3642 - T__tsvector Oid = 3643 - T__gtsvector Oid = 3644 - T__tsquery Oid = 3645 - T_regconfig Oid = 3734 - T__regconfig Oid = 3735 - T_regdictionary Oid = 3769 - T__regdictionary Oid = 3770 - T_jsonb Oid = 3802 - T__jsonb Oid = 3807 - T_anyrange Oid = 3831 - T_event_trigger Oid = 3838 - T_int4range Oid = 3904 - T__int4range Oid = 3905 - T_numrange Oid = 3906 - T__numrange Oid = 3907 - T_tsrange Oid = 3908 - T__tsrange Oid = 3909 - T_tstzrange Oid = 3910 - T__tstzrange Oid = 3911 - T_daterange Oid = 3912 - T__daterange Oid = 3913 - T_int8range Oid = 3926 - T__int8range Oid = 3927 - T_pg_shseclabel Oid = 4066 - T_regnamespace Oid = 4089 - T__regnamespace Oid = 4090 - T_regrole Oid = 4096 - T__regrole Oid = 4097 -) - -var TypeName = map[Oid]string{ - T_bool: "BOOL", - T_bytea: "BYTEA", - T_char: "CHAR", - T_name: "NAME", - T_int8: "INT8", - T_int2: "INT2", - T_int2vector: "INT2VECTOR", - T_int4: "INT4", - T_regproc: "REGPROC", - T_text: "TEXT", - T_oid: "OID", - T_tid: "TID", - T_xid: "XID", - T_cid: "CID", - T_oidvector: "OIDVECTOR", - T_pg_ddl_command: "PG_DDL_COMMAND", - T_pg_type: "PG_TYPE", - T_pg_attribute: "PG_ATTRIBUTE", - T_pg_proc: "PG_PROC", - T_pg_class: "PG_CLASS", - T_json: "JSON", - T_xml: "XML", - T__xml: "_XML", - T_pg_node_tree: "PG_NODE_TREE", - T__json: "_JSON", - T_smgr: "SMGR", - T_index_am_handler: "INDEX_AM_HANDLER", - T_point: "POINT", - T_lseg: "LSEG", - T_path: "PATH", - T_box: "BOX", - T_polygon: "POLYGON", - T_line: "LINE", - T__line: "_LINE", - T_cidr: "CIDR", - T__cidr: "_CIDR", - T_float4: "FLOAT4", - T_float8: "FLOAT8", - T_abstime: "ABSTIME", - T_reltime: "RELTIME", - T_tinterval: "TINTERVAL", - T_unknown: "UNKNOWN", - T_circle: "CIRCLE", - T__circle: "_CIRCLE", - T_money: "MONEY", - T__money: "_MONEY", - T_macaddr: "MACADDR", - T_inet: "INET", - T__bool: "_BOOL", - T__bytea: "_BYTEA", - T__char: "_CHAR", - T__name: "_NAME", - T__int2: "_INT2", - T__int2vector: "_INT2VECTOR", - T__int4: "_INT4", - T__regproc: "_REGPROC", - T__text: "_TEXT", - T__tid: "_TID", - T__xid: "_XID", - T__cid: "_CID", - T__oidvector: "_OIDVECTOR", - T__bpchar: "_BPCHAR", - T__varchar: "_VARCHAR", - T__int8: "_INT8", - T__point: "_POINT", - T__lseg: "_LSEG", - T__path: "_PATH", - T__box: "_BOX", - T__float4: "_FLOAT4", - T__float8: "_FLOAT8", - T__abstime: "_ABSTIME", - T__reltime: "_RELTIME", - T__tinterval: "_TINTERVAL", - T__polygon: "_POLYGON", - T__oid: "_OID", - T_aclitem: "ACLITEM", - T__aclitem: "_ACLITEM", - T__macaddr: "_MACADDR", - T__inet: "_INET", - T_bpchar: "BPCHAR", - T_varchar: "VARCHAR", - T_date: "DATE", - T_time: "TIME", - T_timestamp: "TIMESTAMP", - T__timestamp: "_TIMESTAMP", - T__date: "_DATE", - T__time: "_TIME", - T_timestamptz: "TIMESTAMPTZ", - T__timestamptz: "_TIMESTAMPTZ", - T_interval: "INTERVAL", - T__interval: "_INTERVAL", - T__numeric: "_NUMERIC", - T_pg_database: "PG_DATABASE", - T__cstring: "_CSTRING", - T_timetz: "TIMETZ", - T__timetz: "_TIMETZ", - T_bit: "BIT", - T__bit: "_BIT", - T_varbit: "VARBIT", - T__varbit: "_VARBIT", - T_numeric: "NUMERIC", - T_refcursor: "REFCURSOR", - T__refcursor: "_REFCURSOR", - T_regprocedure: "REGPROCEDURE", - T_regoper: "REGOPER", - T_regoperator: "REGOPERATOR", - T_regclass: "REGCLASS", - T_regtype: "REGTYPE", - T__regprocedure: "_REGPROCEDURE", - T__regoper: "_REGOPER", - T__regoperator: "_REGOPERATOR", - T__regclass: "_REGCLASS", - T__regtype: "_REGTYPE", - T_record: "RECORD", - T_cstring: "CSTRING", - T_any: "ANY", - T_anyarray: "ANYARRAY", - T_void: "VOID", - T_trigger: "TRIGGER", - T_language_handler: "LANGUAGE_HANDLER", - T_internal: "INTERNAL", - T_opaque: "OPAQUE", - T_anyelement: "ANYELEMENT", - T__record: "_RECORD", - T_anynonarray: "ANYNONARRAY", - T_pg_authid: "PG_AUTHID", - T_pg_auth_members: "PG_AUTH_MEMBERS", - T__txid_snapshot: "_TXID_SNAPSHOT", - T_uuid: "UUID", - T__uuid: "_UUID", - T_txid_snapshot: "TXID_SNAPSHOT", - T_fdw_handler: "FDW_HANDLER", - T_pg_lsn: "PG_LSN", - T__pg_lsn: "_PG_LSN", - T_tsm_handler: "TSM_HANDLER", - T_anyenum: "ANYENUM", - T_tsvector: "TSVECTOR", - T_tsquery: "TSQUERY", - T_gtsvector: "GTSVECTOR", - T__tsvector: "_TSVECTOR", - T__gtsvector: "_GTSVECTOR", - T__tsquery: "_TSQUERY", - T_regconfig: "REGCONFIG", - T__regconfig: "_REGCONFIG", - T_regdictionary: "REGDICTIONARY", - T__regdictionary: "_REGDICTIONARY", - T_jsonb: "JSONB", - T__jsonb: "_JSONB", - T_anyrange: "ANYRANGE", - T_event_trigger: "EVENT_TRIGGER", - T_int4range: "INT4RANGE", - T__int4range: "_INT4RANGE", - T_numrange: "NUMRANGE", - T__numrange: "_NUMRANGE", - T_tsrange: "TSRANGE", - T__tsrange: "_TSRANGE", - T_tstzrange: "TSTZRANGE", - T__tstzrange: "_TSTZRANGE", - T_daterange: "DATERANGE", - T__daterange: "_DATERANGE", - T_int8range: "INT8RANGE", - T__int8range: "_INT8RANGE", - T_pg_shseclabel: "PG_SHSECLABEL", - T_regnamespace: "REGNAMESPACE", - T__regnamespace: "_REGNAMESPACE", - T_regrole: "REGROLE", - T__regrole: "_REGROLE", -} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go deleted file mode 100644 index c6aa5b9a..00000000 --- a/vendor/github.com/lib/pq/rows.go +++ /dev/null @@ -1,93 +0,0 @@ -package pq - -import ( - "math" - "reflect" - "time" - - "github.com/lib/pq/oid" -) - -const headerSize = 4 - -type fieldDesc struct { - // The object ID of the data type. - OID oid.Oid - // The data type size (see pg_type.typlen). - // Note that negative values denote variable-width types. - Len int - // The type modifier (see pg_attribute.atttypmod). - // The meaning of the modifier is type-specific. - Mod int -} - -func (fd fieldDesc) Type() reflect.Type { - switch fd.OID { - case oid.T_int8: - return reflect.TypeOf(int64(0)) - case oid.T_int4: - return reflect.TypeOf(int32(0)) - case oid.T_int2: - return reflect.TypeOf(int16(0)) - case oid.T_varchar, oid.T_text: - return reflect.TypeOf("") - case oid.T_bool: - return reflect.TypeOf(false) - case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: - return reflect.TypeOf(time.Time{}) - case oid.T_bytea: - return reflect.TypeOf([]byte(nil)) - default: - return reflect.TypeOf(new(interface{})).Elem() - } -} - -func (fd fieldDesc) Name() string { - return oid.TypeName[fd.OID] -} - -func (fd fieldDesc) Length() (length int64, ok bool) { - switch fd.OID { - case oid.T_text, oid.T_bytea: - return math.MaxInt64, true - case oid.T_varchar, oid.T_bpchar: - return int64(fd.Mod - headerSize), true - default: - return 0, false - } -} - -func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { - switch fd.OID { - case oid.T_numeric, oid.T__numeric: - mod := fd.Mod - headerSize - precision = int64((mod >> 16) & 0xffff) - scale = int64(mod & 0xffff) - return precision, scale, true - default: - return 0, 0, false - } -} - -// ColumnTypeScanType returns the value type that can be used to scan types into. -func (rs *rows) ColumnTypeScanType(index int) reflect.Type { - return rs.colTyps[index].Type() -} - -// ColumnTypeDatabaseTypeName return the database system type name. -func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { - return rs.colTyps[index].Name() -} - -// ColumnTypeLength returns the length of the column type if the column is a -// variable length type. If the column is not a variable length type ok -// should return false. -func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { - return rs.colTyps[index].Length() -} - -// ColumnTypePrecisionScale should return the precision and scale for decimal -// types. If not applicable, ok should be false. -func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { - return rs.colTyps[index].PrecisionScale() -} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go deleted file mode 100644 index 477216b6..00000000 --- a/vendor/github.com/lib/pq/scram/scram.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2014 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. -// -// http://tools.ietf.org/html/rfc5802 -// -package scram - -import ( - "bytes" - "crypto/hmac" - "crypto/rand" - "encoding/base64" - "fmt" - "hash" - "strconv" - "strings" -) - -// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). -// -// A Client may be used within a SASL conversation with logic resembling: -// -// var in []byte -// var client = scram.NewClient(sha1.New, user, pass) -// for client.Step(in) { -// out := client.Out() -// // send out to server -// in := serverOut -// } -// if client.Err() != nil { -// // auth failed -// } -// -type Client struct { - newHash func() hash.Hash - - user string - pass string - step int - out bytes.Buffer - err error - - clientNonce []byte - serverNonce []byte - saltedPass []byte - authMsg bytes.Buffer -} - -// NewClient returns a new SCRAM-* client with the provided hash algorithm. -// -// For SCRAM-SHA-256, for example, use: -// -// client := scram.NewClient(sha256.New, user, pass) -// -func NewClient(newHash func() hash.Hash, user, pass string) *Client { - c := &Client{ - newHash: newHash, - user: user, - pass: pass, - } - c.out.Grow(256) - c.authMsg.Grow(256) - return c -} - -// Out returns the data to be sent to the server in the current step. -func (c *Client) Out() []byte { - if c.out.Len() == 0 { - return nil - } - return c.out.Bytes() -} - -// Err returns the error that occurred, or nil if there were no errors. -func (c *Client) Err() error { - return c.err -} - -// SetNonce sets the client nonce to the provided value. -// If not set, the nonce is generated automatically out of crypto/rand on the first step. -func (c *Client) SetNonce(nonce []byte) { - c.clientNonce = nonce -} - -var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") - -// Step processes the incoming data from the server and makes the -// next round of data for the server available via Client.Out. -// Step returns false if there are no errors and more data is -// still expected. -func (c *Client) Step(in []byte) bool { - c.out.Reset() - if c.step > 2 || c.err != nil { - return false - } - c.step++ - switch c.step { - case 1: - c.err = c.step1(in) - case 2: - c.err = c.step2(in) - case 3: - c.err = c.step3(in) - } - return c.step > 2 || c.err != nil -} - -func (c *Client) step1(in []byte) error { - if len(c.clientNonce) == 0 { - const nonceLen = 16 - buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) - if _, err := rand.Read(buf[:nonceLen]); err != nil { - return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) - } - c.clientNonce = buf[nonceLen:] - b64.Encode(c.clientNonce, buf[:nonceLen]) - } - c.authMsg.WriteString("n=") - escaper.WriteString(&c.authMsg, c.user) - c.authMsg.WriteString(",r=") - c.authMsg.Write(c.clientNonce) - - c.out.WriteString("n,,") - c.out.Write(c.authMsg.Bytes()) - return nil -} - -var b64 = base64.StdEncoding - -func (c *Client) step2(in []byte) error { - c.authMsg.WriteByte(',') - c.authMsg.Write(in) - - fields := bytes.Split(in, []byte(",")) - if len(fields) != 3 { - return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) - } - if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) - } - if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) - } - if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) - } - - c.serverNonce = fields[0][2:] - if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { - return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) - } - - salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) - n, err := b64.Decode(salt, fields[1][2:]) - if err != nil { - return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) - } - salt = salt[:n] - iterCount, err := strconv.Atoi(string(fields[2][2:])) - if err != nil { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) - } - c.saltPassword(salt, iterCount) - - c.authMsg.WriteString(",c=biws,r=") - c.authMsg.Write(c.serverNonce) - - c.out.WriteString("c=biws,r=") - c.out.Write(c.serverNonce) - c.out.WriteString(",p=") - c.out.Write(c.clientProof()) - return nil -} - -func (c *Client) step3(in []byte) error { - var isv, ise bool - var fields = bytes.Split(in, []byte(",")) - if len(fields) == 1 { - isv = bytes.HasPrefix(fields[0], []byte("v=")) - ise = bytes.HasPrefix(fields[0], []byte("e=")) - } - if ise { - return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) - } else if !isv { - return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) - } - if !bytes.Equal(c.serverSignature(), fields[0][2:]) { - return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) - } - return nil -} - -func (c *Client) saltPassword(salt []byte, iterCount int) { - mac := hmac.New(c.newHash, []byte(c.pass)) - mac.Write(salt) - mac.Write([]byte{0, 0, 0, 1}) - ui := mac.Sum(nil) - hi := make([]byte, len(ui)) - copy(hi, ui) - for i := 1; i < iterCount; i++ { - mac.Reset() - mac.Write(ui) - mac.Sum(ui[:0]) - for j, b := range ui { - hi[j] ^= b - } - } - c.saltedPass = hi -} - -func (c *Client) clientProof() []byte { - mac := hmac.New(c.newHash, c.saltedPass) - mac.Write([]byte("Client Key")) - clientKey := mac.Sum(nil) - hash := c.newHash() - hash.Write(clientKey) - storedKey := hash.Sum(nil) - mac = hmac.New(c.newHash, storedKey) - mac.Write(c.authMsg.Bytes()) - clientProof := mac.Sum(nil) - for i, b := range clientKey { - clientProof[i] ^= b - } - clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) - b64.Encode(clientProof64, clientProof) - return clientProof64 -} - -func (c *Client) serverSignature() []byte { - mac := hmac.New(c.newHash, c.saltedPass) - mac.Write([]byte("Server Key")) - serverKey := mac.Sum(nil) - - mac = hmac.New(c.newHash, serverKey) - mac.Write(c.authMsg.Bytes()) - serverSignature := mac.Sum(nil) - - encoded := make([]byte, b64.EncodedLen(len(serverSignature))) - b64.Encode(encoded, serverSignature) - return encoded -} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go deleted file mode 100644 index d9020845..00000000 --- a/vendor/github.com/lib/pq/ssl.go +++ /dev/null @@ -1,175 +0,0 @@ -package pq - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net" - "os" - "os/user" - "path/filepath" -) - -// ssl generates a function to upgrade a net.Conn based on the "sslmode" and -// related settings. The function is nil when no upgrade should take place. -func ssl(o values) (func(net.Conn) (net.Conn, error), error) { - verifyCaOnly := false - tlsConf := tls.Config{} - switch mode := o["sslmode"]; mode { - // "require" is the default. - case "", "require": - // We must skip TLS's own verification since it requires full - // verification since Go 1.3. - tlsConf.InsecureSkipVerify = true - - // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: - // - // Note: For backwards compatibility with earlier versions of - // PostgreSQL, if a root CA file exists, the behavior of - // sslmode=require will be the same as that of verify-ca, meaning the - // server certificate is validated against the CA. Relying on this - // behavior is discouraged, and applications that need certificate - // validation should always use verify-ca or verify-full. - if sslrootcert, ok := o["sslrootcert"]; ok { - if _, err := os.Stat(sslrootcert); err == nil { - verifyCaOnly = true - } else { - delete(o, "sslrootcert") - } - } - case "verify-ca": - // We must skip TLS's own verification since it requires full - // verification since Go 1.3. - tlsConf.InsecureSkipVerify = true - verifyCaOnly = true - case "verify-full": - tlsConf.ServerName = o["host"] - case "disable": - return nil, nil - default: - return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) - } - - err := sslClientCertificates(&tlsConf, o) - if err != nil { - return nil, err - } - err = sslCertificateAuthority(&tlsConf, o) - if err != nil { - return nil, err - } - - // Accept renegotiation requests initiated by the backend. - // - // Renegotiation was deprecated then removed from PostgreSQL 9.5, but - // the default configuration of older versions has it enabled. Redshift - // also initiates renegotiations and cannot be reconfigured. - tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient - - return func(conn net.Conn) (net.Conn, error) { - client := tls.Client(conn, &tlsConf) - if verifyCaOnly { - err := sslVerifyCertificateAuthority(client, &tlsConf) - if err != nil { - return nil, err - } - } - return client, nil - }, nil -} - -// sslClientCertificates adds the certificate specified in the "sslcert" and -// "sslkey" settings, or if they aren't set, from the .postgresql directory -// in the user's home directory. The configured files must exist and have -// the correct permissions. -func sslClientCertificates(tlsConf *tls.Config, o values) error { - // user.Current() might fail when cross-compiling. We have to ignore the - // error and continue without home directory defaults, since we wouldn't - // know from where to load them. - user, _ := user.Current() - - // In libpq, the client certificate is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 - sslcert := o["sslcert"] - if len(sslcert) == 0 && user != nil { - sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") - } - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 - if len(sslcert) == 0 { - return nil - } - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 - if _, err := os.Stat(sslcert); os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - - // In libpq, the ssl key is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 - sslkey := o["sslkey"] - if len(sslkey) == 0 && user != nil { - sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") - } - - if len(sslkey) > 0 { - if err := sslKeyPermissions(sslkey); err != nil { - return err - } - } - - cert, err := tls.LoadX509KeyPair(sslcert, sslkey) - if err != nil { - return err - } - - tlsConf.Certificates = []tls.Certificate{cert} - return nil -} - -// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. -func sslCertificateAuthority(tlsConf *tls.Config, o values) error { - // In libpq, the root certificate is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 - if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { - tlsConf.RootCAs = x509.NewCertPool() - - cert, err := ioutil.ReadFile(sslrootcert) - if err != nil { - return err - } - - if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { - return fmterrorf("couldn't parse pem in sslrootcert") - } - } - - return nil -} - -// sslVerifyCertificateAuthority carries out a TLS handshake to the server and -// verifies the presented certificate against the CA, i.e. the one specified in -// sslrootcert or the system CA if sslrootcert was not specified. -func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { - err := client.Handshake() - if err != nil { - return err - } - certs := client.ConnectionState().PeerCertificates - opts := x509.VerifyOptions{ - DNSName: client.ConnectionState().ServerName, - Intermediates: x509.NewCertPool(), - Roots: tlsConf.RootCAs, - } - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - _, err = certs[0].Verify(opts) - return err -} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go deleted file mode 100644 index 3b7c3a2a..00000000 --- a/vendor/github.com/lib/pq/ssl_permissions.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package pq - -import "os" - -// sslKeyPermissions checks the permissions on user-supplied ssl key files. -// The key file should have very little access. -// -// libpq does not check key file permissions on Windows. -func sslKeyPermissions(sslkey string) error { - info, err := os.Stat(sslkey) - if err != nil { - return err - } - if info.Mode().Perm()&0077 != 0 { - return ErrSSLKeyHasWorldPermissions - } - return nil -} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go deleted file mode 100644 index 5d2c763c..00000000 --- a/vendor/github.com/lib/pq/ssl_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package pq - -// sslKeyPermissions checks the permissions on user-supplied ssl key files. -// The key file should have very little access. -// -// libpq does not check key file permissions on Windows. -func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go deleted file mode 100644 index f4d8a7c2..00000000 --- a/vendor/github.com/lib/pq/url.go +++ /dev/null @@ -1,76 +0,0 @@ -package pq - -import ( - "fmt" - "net" - nurl "net/url" - "sort" - "strings" -) - -// ParseURL no longer needs to be used by clients of this library since supplying a URL as a -// connection string to sql.Open() is now supported: -// -// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") -// -// It remains exported here for backwards-compatibility. -// -// ParseURL converts a url to a connection string for driver.Open. -// Example: -// -// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" -// -// converts to: -// -// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" -// -// A minimal example: -// -// "postgres://" -// -// This will be blank, causing driver.Open to use all of the defaults -func ParseURL(url string) (string, error) { - u, err := nurl.Parse(url) - if err != nil { - return "", err - } - - if u.Scheme != "postgres" && u.Scheme != "postgresql" { - return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) - } - - var kvs []string - escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) - accrue := func(k, v string) { - if v != "" { - kvs = append(kvs, k+"="+escaper.Replace(v)) - } - } - - if u.User != nil { - v := u.User.Username() - accrue("user", v) - - v, _ = u.User.Password() - accrue("password", v) - } - - if host, port, err := net.SplitHostPort(u.Host); err != nil { - accrue("host", u.Host) - } else { - accrue("host", host) - accrue("port", port) - } - - if u.Path != "" { - accrue("dbname", u.Path[1:]) - } - - q := u.Query() - for k := range q { - accrue(k, q.Get(k)) - } - - sort.Strings(kvs) // Makes testing easier (not a performance concern) - return strings.Join(kvs, " "), nil -} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go deleted file mode 100644 index a5101920..00000000 --- a/vendor/github.com/lib/pq/user_posix.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package pq is a pure Go Postgres driver for the database/sql package. - -// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun - -package pq - -import ( - "os" - "os/user" -) - -func userCurrent() (string, error) { - u, err := user.Current() - if err == nil { - return u.Username, nil - } - - name := os.Getenv("USER") - if name != "" { - return name, nil - } - - return "", ErrCouldNotDetectUsername -} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go deleted file mode 100644 index 2b691267..00000000 --- a/vendor/github.com/lib/pq/user_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package pq is a pure Go Postgres driver for the database/sql package. -package pq - -import ( - "path/filepath" - "syscall" -) - -// Perform Windows user name lookup identically to libpq. -// -// The PostgreSQL code makes use of the legacy Win32 function -// GetUserName, and that function has not been imported into stock Go. -// GetUserNameEx is available though, the difference being that a -// wider range of names are available. To get the output to be the -// same as GetUserName, only the base (or last) component of the -// result is returned. -func userCurrent() (string, error) { - pw_name := make([]uint16, 128) - pwname_size := uint32(len(pw_name)) - 1 - err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) - if err != nil { - return "", ErrCouldNotDetectUsername - } - s := syscall.UTF16ToString(pw_name) - u := filepath.Base(s) - return u, nil -} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go deleted file mode 100644 index 9a1b9e07..00000000 --- a/vendor/github.com/lib/pq/uuid.go +++ /dev/null @@ -1,23 +0,0 @@ -package pq - -import ( - "encoding/hex" - "fmt" -) - -// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. -func decodeUUIDBinary(src []byte) ([]byte, error) { - if len(src) != 16 { - return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) - } - - dst := make([]byte, 36) - dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' - hex.Encode(dst[0:], src[0:4]) - hex.Encode(dst[9:], src[4:6]) - hex.Encode(dst[14:], src[6:8]) - hex.Encode(dst[19:], src[8:10]) - hex.Encode(dst[24:], src[10:16]) - - return dst, nil -} diff --git a/vendor/github.com/miekg/unbound/.travis.yml b/vendor/github.com/miekg/unbound/.travis.yml deleted file mode 100644 index 260000ad..00000000 --- a/vendor/github.com/miekg/unbound/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.9 -before_install: - - sudo apt-get update -qq - - sudo apt-get install -qq libunbound-dev -script: - - go test -race -v -bench=. ./... diff --git a/vendor/github.com/miekg/unbound/README.md b/vendor/github.com/miekg/unbound/README.md deleted file mode 100644 index 52e67e1b..00000000 --- a/vendor/github.com/miekg/unbound/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Unbound - -A wrapper for Unbound in Go. - -Unbound's `ub_result` has been extended with an slice of dns.RRs, this alleviates -the need to parse `ub_result.data` yourself. - -The website for Unbound is https://unbound.net/, where you can find further documentation. - -Tested/compiled to work for versions: 1.4.22 and 1.6.0-3+deb9u1 (Debian Stretch). - -Note: using cgo means the executables will use shared libraries (OpenSSL, ldns and libunbound). - -The tutorials found here are the originals ones adapted to Go. diff --git a/vendor/github.com/miekg/unbound/dns.go b/vendor/github.com/miekg/unbound/dns.go deleted file mode 100644 index 04ce826a..00000000 --- a/vendor/github.com/miekg/unbound/dns.go +++ /dev/null @@ -1,87 +0,0 @@ -package unbound - -import ( - "math/rand" - "sort" - - "github.com/miekg/dns" -) - -// AddTaRR calls AddTa, but allows to directly use an dns.RR. -// This method is not found in Unbound. -func (u *Unbound) AddTaRR(ta dns.RR) error { return u.AddTa(ta.String()) } - -// DataAddRR calls DataAdd, but allows to directly use an dns.RR. -// This method is not found in Unbound. -func (u *Unbound) DataAddRR(data dns.RR) error { return u.DataAdd(data.String()) } - -// DataRemoveRR calls DataRemove, but allows to directly use an dns.RR. -// This method is not found in Unbound. -func (u *Unbound) DataRemoveRR(data dns.RR) error { return u.DataRemove(data.String()) } - -// Copied from the standard library - -// byPriorityWeight sorts SRV records by ascending priority and weight. -type byPriorityWeight []*dns.SRV - -func (s byPriorityWeight) Len() int { return len(s) } -func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byPriorityWeight) Less(i, j int) bool { - return s[i].Priority < s[j].Priority || - (s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight) -} - -// shuffleByWeight shuffles SRV records by weight using the algorithm -// described in RFC 2782. -func (addrs byPriorityWeight) shuffleByWeight() { - sum := 0 - for _, addr := range addrs { - sum += int(addr.Weight) - } - for sum > 0 && len(addrs) > 1 { - s := 0 - n := rand.Intn(sum + 1) - for i := range addrs { - s += int(addrs[i].Weight) - if s >= n { - if i > 0 { - t := addrs[i] - copy(addrs[1:i+1], addrs[0:i]) - addrs[0] = t - } - break - } - } - sum -= int(addrs[0].Weight) - addrs = addrs[1:] - } -} - -// sort reorders SRV records as specified in RFC 2782. -func (addrs byPriorityWeight) sort() { - sort.Sort(addrs) - i := 0 - for j := 1; j < len(addrs); j++ { - if addrs[i].Priority != addrs[j].Priority { - addrs[i:j].shuffleByWeight() - i = j - } - } - addrs[i:].shuffleByWeight() -} - -// byPref implements sort.Interface to sort MX records by preference -type byPref []*dns.MX - -func (s byPref) Len() int { return len(s) } -func (s byPref) Less(i, j int) bool { return s[i].Preference < s[j].Preference } -func (s byPref) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort reorders MX records as specified in RFC 5321. -func (s byPref) sort() { - for i := range s { - j := rand.Intn(i + 1) - s[i], s[j] = s[j], s[i] - } - sort.Sort(s) -} diff --git a/vendor/github.com/miekg/unbound/lookup.go b/vendor/github.com/miekg/unbound/lookup.go deleted file mode 100644 index 421c963a..00000000 --- a/vendor/github.com/miekg/unbound/lookup.go +++ /dev/null @@ -1,164 +0,0 @@ -package unbound - -import ( - "net" - - "github.com/miekg/dns" -) - -// These are function are a re-implementation of the net.Lookup* ones -// They are adapted to the package unbound and the package dns. - -// LookupAddr performs a reverse lookup for the given address, returning a -// list of names mapping to that address. -func (u *Unbound) LookupAddr(addr string) (name []string, err error) { - reverse, err := dns.ReverseAddr(addr) - if err != nil { - return nil, err - } - r, err := u.Resolve(reverse, dns.TypePTR, dns.ClassINET) - if err != nil { - return nil, err - } - for _, rr := range r.Rr { - name = append(name, rr.(*dns.PTR).Ptr) - } - return -} - -// LookupCNAME returns the canonical DNS host for the given name. Callers -// that do not care about the canonical name can call LookupHost or -// LookupIP directly; both take care of resolving the canonical name as -// part of the lookup. -func (u *Unbound) LookupCNAME(name string) (cname string, err error) { - r, err := u.Resolve(name, dns.TypeA, dns.ClassINET) - // TODO(mg): if nothing found try AAAA? - return r.CanonName, err -} - -// LookupHost looks up the given host using Unbound. It returns -// an array of that host's addresses. -func (u *Unbound) LookupHost(host string) (addrs []string, err error) { - ipaddrs, err := u.LookupIP(host) - if err != nil { - return nil, err - } - for _, ip := range ipaddrs { - addrs = append(addrs, ip.String()) - } - return addrs, nil -} - -// LookupIP looks up host using Unbound. It returns an array of -// that host's IPv4 and IPv6 addresses. -// The A and AAAA lookups are performed in parallel. -func (u *Unbound) LookupIP(host string) (addrs []net.IP, err error) { - c := make(chan *ResultError) - u.ResolveAsync(host, dns.TypeA, dns.ClassINET, c) - u.ResolveAsync(host, dns.TypeAAAA, dns.ClassINET, c) - seen := 0 - // TODO(miek): timeout? -Wait: - for { - select { - case r := <-c: - for _, rr := range r.Rr { - if x, ok := rr.(*dns.A); ok { - addrs = append(addrs, x.A) - } - if x, ok := rr.(*dns.AAAA); ok { - addrs = append(addrs, x.AAAA) - } - } - seen++ - if seen == 2 { - break Wait - } - } - } - return -} - -// LookupMX returns the DNS MX records for the given domain name sorted by -// preference. -func (u *Unbound) LookupMX(name string) (mx []*dns.MX, err error) { - r, err := u.Resolve(name, dns.TypeMX, dns.ClassINET) - if err != nil { - return nil, err - } - for _, rr := range r.Rr { - mx = append(mx, rr.(*dns.MX)) - } - byPref(mx).sort() - return -} - -// LookupNS returns the DNS NS records for the given domain name. -func (u *Unbound) LookupNS(name string) (ns []*dns.NS, err error) { - r, err := u.Resolve(name, dns.TypeNS, dns.ClassINET) - if err != nil { - return nil, err - } - for _, rr := range r.Rr { - ns = append(ns, rr.(*dns.NS)) - } - return -} - -// LookupSRV tries to resolve an SRV query of the given service, protocol, -// and domain name. The proto is "tcp" or "udp". The returned records are -// sorted by priority and randomized by weight within a priority. -// -// LookupSRV constructs the DNS name to look up following RFC 2782. That -// is, it looks up _service._proto.name. To accommodate services publishing -// SRV records under non-standard names, if both service and proto are -// empty strings, LookupSRV looks up name directly. -func (u *Unbound) LookupSRV(service, proto, name string) (cname string, srv []*dns.SRV, err error) { - r := new(Result) - if service == "" && proto == "" { - r, err = u.Resolve(name, dns.TypeSRV, dns.ClassINET) - } else { - r, err = u.Resolve("_"+service+"._"+proto+"."+name, dns.TypeSRV, dns.ClassINET) - } - if err != nil { - return "", nil, err - } - for _, rr := range r.Rr { - srv = append(srv, rr.(*dns.SRV)) - } - byPriorityWeight(srv).sort() - return "", srv, err -} - -// LookupTXT returns the DNS TXT records for the given domain name. -func (u *Unbound) LookupTXT(name string) (txt []string, err error) { - r, err := u.Resolve(name, dns.TypeTXT, dns.ClassINET) - if err != nil { - return nil, err - } - for _, rr := range r.Rr { - txt = append(txt, rr.(*dns.TXT).Txt...) - } - return -} - -// LookupTLSA returns the DNS DANE records for the given domain service, protocol -// and domainname. -// -// LookupTLSA constructs the DNS name to look up following RFC 6698. That -// is, it looks up _port._proto.name. -func (u *Unbound) LookupTLSA(service, proto, name string) (tlsa []*dns.TLSA, err error) { - tlsaname, err := dns.TLSAName(name, service, proto) - if err != nil { - return nil, err - } - - r, err := u.Resolve(tlsaname, dns.TypeTLSA, dns.ClassINET) - if err != nil { - return nil, err - } - for _, rr := range r.Rr { - tlsa = append(tlsa, rr.(*dns.TLSA)) - } - return tlsa, nil -} diff --git a/vendor/github.com/miekg/unbound/unbound.go b/vendor/github.com/miekg/unbound/unbound.go deleted file mode 100644 index add965d7..00000000 --- a/vendor/github.com/miekg/unbound/unbound.go +++ /dev/null @@ -1,386 +0,0 @@ -// Package unbound implements a wrapper for libunbound(3). -// Unbound is a DNSSEC aware resolver, see https://unbound.net/ -// for more information. It's up to the caller to configure -// Unbound with trust anchors. With these anchors a DNSSEC -// answer can be validated. -// -// The method's documentation can be found in libunbound(3). -// The names of the methods are in sync with the -// names used in unbound, but the underscores are removed and they -// are in camel-case, e.g. ub_ctx_resolv_conf becomes u.ResolvConf. -// Except for ub_ctx_create() and ub_ctx_delete(), -// which become: New() and Destroy() to be more in line with the standard -// Go practice. -// -// Basic use pattern: -// u := unbound.New() -// defer u.Destroy() -// u.ResolvConf("/etc/resolv.conf") -// u.AddTaFile("trustanchor") -// r, e := u.Resolve("miek.nl.", dns.TypeA, dns.ClassINET) -// -// The asynchronous functions are implemented using goroutines. This -// means the following functions are not useful in Go and therefor -// not implemented: ub_fd, ub_wait, ub_poll, ub_process and ub_cancel. -// -// Unbound's ub_result (named Result in the package) has been modified. -// An extra field has been added, 'Rr', which is a []dns.RR. -// -// The Lookup* functions of the net package are re-implemented in this package. -package unbound - -/* -#cgo LDFLAGS: -lunbound -#include -#include -#include - -#ifndef offsetof -#define offsetof(type, member) __builtin_offsetof (type, member) -#endif - -int array_elem_int(int *l, int i) { return l[i]; } -char * array_elem_char(char **l, int i) { if (l == NULL) return NULL; return l[i]; } -char * new_char_pointer() { char *p = NULL; return p; } -struct ub_result *new_ub_result() { - struct ub_result *r; - r = calloc(sizeof(struct ub_result), 1); - return r; -} -int ub_ttl(struct ub_result *r) { - int *p; - // Go to why_bogus add the pointer and then we will find the ttl, hopefully. - p = (int*) ((char*)r + offsetof(struct ub_result, why_bogus) + sizeof(char*)); - return (int)*p; -} -*/ -import "C" - -import ( - "encoding/binary" - "os" - "strconv" - "strings" - "time" - "unsafe" - - "github.com/miekg/dns" -) - -type Unbound struct { - ctx *C.struct_ub_ctx - version [3]int -} - -// Results is Unbound's ub_result adapted for Go. -type Result struct { - Qname string // Text string, original question - Qtype uint16 // Type code asked for - Qclass uint16 // Class code asked for - Data [][]byte // Slice of rdata items formed from the reply - Rr []dns.RR // The RR encoded from Data, Qclass, Qtype, Qname and Ttl (not in Unbound) - CanonName string // Canonical name of result - Rcode int // Additional error code in case of no data - AnswerPacket *dns.Msg // Full answer packet - HaveData bool // True if there is data - NxDomain bool // True if the name does not exist - Secure bool // True if the result is secure - Bogus bool // True if a security failure happened - WhyBogus string // String with error when bogus - Ttl uint32 // TTL for the result in seconds (0 for unbound versions < 1.4.20) - Rtt time.Duration // Time the query took (not in Unbound) -} - -// UnboundError is an error returned from Unbound, it wraps both the -// return code and the error string as returned by ub_strerror. -type UnboundError struct { - Err string - code int -} - -// ResultError encapsulates a *Result and an error. This is used to -// communicate with unbound over a channel. -type ResultError struct { - *Result - Error error -} - -func (e *UnboundError) Error() string { - return e.Err -} - -func newError(i int) error { - if i == 0 { - return nil - } - e := new(UnboundError) - e.Err = errorString(i) - e.code = i - return e -} - -func errorString(i int) string { - return C.GoString(C.ub_strerror(C.int(i))) -} - -// unbound version from 1.4.20 (inclusive) and above fill in the Tll in the result -// check if we have such a version -func (u *Unbound) haveTtlFeature() bool { - if u.version[0] < 1 { - return false - } else if u.version[0] == 1 && u.version[1] < 4 { - return false - } else if u.version[0] == 1 && u.version[1] == 4 && u.version[2] <= 20 { - return false - } else { - return true - } -} - -// New wraps Unbound's ub_ctx_create. -func New() *Unbound { - u := new(Unbound) - u.ctx = C.ub_ctx_create() - u.version = u.Version() - return u -} - -// Destroy wraps Unbound's ub_ctx_delete. -func (u *Unbound) Destroy() { - C.ub_ctx_delete(u.ctx) -} - -// ResolvConf wraps Unbound's ub_ctx_resolvconf. -func (u *Unbound) ResolvConf(fname string) error { - cfname := C.CString(fname) - defer C.free(unsafe.Pointer(cfname)) - i := C.ub_ctx_resolvconf(u.ctx, cfname) - return newError(int(i)) -} - -// SetOption wraps Unbound's ub_ctx_set_option. -func (u *Unbound) SetOption(opt, val string) error { - copt := C.CString(opt) - defer C.free(unsafe.Pointer(copt)) - cval := C.CString(val) - defer C.free(unsafe.Pointer(cval)) - i := C.ub_ctx_set_option(u.ctx, copt, cval) - return newError(int(i)) -} - -// GetOption wraps Unbound's ub_ctx_get_option. -func (u *Unbound) GetOption(opt string) (string, error) { - copt := C.CString(opt) - defer C.free(unsafe.Pointer(copt)) - - cval := C.new_char_pointer() - defer C.free(unsafe.Pointer(cval)) - i := C.ub_ctx_get_option(u.ctx, C.CString(opt), &cval) - return C.GoString(cval), newError(int(i)) -} - -// Config wraps Unbound's ub_ctx_config. -func (u *Unbound) Config(fname string) error { - cfname := C.CString(fname) - defer C.free(unsafe.Pointer(cfname)) - i := C.ub_ctx_config(u.ctx, cfname) - return newError(int(i)) -} - -// SetFwd wraps Unbound's ub_ctx_set_fwd. -func (u *Unbound) SetFwd(addr string) error { - caddr := C.CString(addr) - defer C.free(unsafe.Pointer(caddr)) - i := C.ub_ctx_set_fwd(u.ctx, caddr) - return newError(int(i)) -} - -// Hosts wraps Unbound's ub_ctx_hosts. -func (u *Unbound) Hosts(fname string) error { - cfname := C.CString(fname) - defer C.free(unsafe.Pointer(cfname)) - i := C.ub_ctx_hosts(u.ctx, cfname) - return newError(int(i)) -} - -// Resolve wraps Unbound's ub_resolve. -func (u *Unbound) Resolve(name string, rrtype, rrclass uint16) (*Result, error) { - name = dns.Fqdn(name) - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - res := C.new_ub_result() - r := new(Result) - // Normally, we would call 'defer C.ub_resolve_free(res)' here, but - // that does not work (in Go 1.6.1), see - // https://github.com/miekg/unbound/issues/8 - // This is likely related to https://github.com/golang/go/issues/15921 - t := time.Now() - i := C.ub_resolve(u.ctx, cname, C.int(rrtype), C.int(rrclass), &res) - r.Rtt = time.Since(t) - err := newError(int(i)) - if err != nil { - C.ub_resolve_free(res) - return nil, err - } - - r.Qname = C.GoString(res.qname) - r.Qtype = uint16(res.qtype) - r.Qclass = uint16(res.qclass) - - r.CanonName = C.GoString(res.canonname) - r.Rcode = int(res.rcode) - r.AnswerPacket = new(dns.Msg) - r.AnswerPacket.Unpack(C.GoBytes(res.answer_packet, res.answer_len)) // Should always work - r.HaveData = res.havedata == 1 - r.NxDomain = res.nxdomain == 1 - r.Secure = res.secure == 1 - r.Bogus = res.bogus == 1 - r.WhyBogus = C.GoString(res.why_bogus) - if u.haveTtlFeature() { - r.Ttl = uint32(C.ub_ttl(res)) - } - - // Re-create the RRs - var h dns.RR_Header - h.Name = r.Qname - h.Rrtype = r.Qtype - h.Class = r.Qclass - h.Ttl = r.Ttl - - j := 0 - if r.HaveData { - r.Data = make([][]byte, 0) - r.Rr = make([]dns.RR, 0) - b := C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j))) - - // Create the RR; write out the header details and - // the rdata to a buffer, and unpack it again into an - // actual RR, for ever rr found by resolve - hdrBuf := make([]byte, len(h.Name)+11) - off, _ := dns.PackDomainName(h.Name, hdrBuf, 0, nil, false) - binary.BigEndian.PutUint16(hdrBuf[off:], h.Rrtype) - off += 2 - binary.BigEndian.PutUint16(hdrBuf[off:], h.Class) - off += 2 - binary.BigEndian.PutUint32(hdrBuf[off:], h.Ttl) - off += 4 - - for len(b) != 0 { - h.Rdlength = uint16(len(b)) - // Note: we are rewriting the rdata len so we do not - // increase off anymore. - binary.BigEndian.PutUint16(hdrBuf[off:], h.Rdlength) - rrBuf := append(hdrBuf, b...) - - rr, _, err := dns.UnpackRR(rrBuf, 0) - if err == nil { - r.Rr = append(r.Rr, rr) - } - - r.Data = append(r.Data, b) - j++ - b = C.GoBytes(unsafe.Pointer(C.array_elem_char(res.data, C.int(j))), C.array_elem_int(res.len, C.int(j))) - } - } - C.ub_resolve_free(res) - return r, err -} - -// ResolveAsync does *not* wrap the Unbound function, instead -// it utilizes Go's goroutines and channels to implement the asynchronous behavior Unbound -// implements. As a result the function signature is different. -// The result (or an error) is returned on the channel c. -// Also the ub_cancel, ub_wait_, ub_fd, ub_process are not implemented. -func (u *Unbound) ResolveAsync(name string, rrtype, rrclass uint16, c chan *ResultError) { - go func() { - r, e := u.Resolve(name, rrtype, rrclass) - c <- &ResultError{r, e} - }() - return -} - -// AddTa wraps Unbound's ub_ctx_add_ta. -func (u *Unbound) AddTa(ta string) error { - cta := C.CString(ta) - i := C.ub_ctx_add_ta(u.ctx, cta) - return newError(int(i)) -} - -// AddTaFile wraps Unbound's ub_ctx_add_ta_file. -func (u *Unbound) AddTaFile(fname string) error { - cfname := C.CString(fname) - defer C.free(unsafe.Pointer(cfname)) - i := C.ub_ctx_add_ta_file(u.ctx, cfname) - return newError(int(i)) -} - -// TrustedKeys wraps Unbound's ub_ctx_trustedkeys. -func (u *Unbound) TrustedKeys(fname string) error { - cfname := C.CString(fname) - defer C.free(unsafe.Pointer(cfname)) - i := C.ub_ctx_trustedkeys(u.ctx, cfname) - return newError(int(i)) -} - -// ZoneAdd wraps Unbound's ub_ctx_zone_add. -func (u *Unbound) ZoneAdd(zone_name, zone_type string) error { - czone_name := C.CString(zone_name) - defer C.free(unsafe.Pointer(czone_name)) - czone_type := C.CString(zone_type) - defer C.free(unsafe.Pointer(czone_type)) - i := C.ub_ctx_zone_add(u.ctx, czone_name, czone_type) - return newError(int(i)) -} - -// ZoneRemove wraps Unbound's ub_ctx_zone_remove. -func (u *Unbound) ZoneRemove(zone_name string) error { - czone_name := C.CString(zone_name) - defer C.free(unsafe.Pointer(czone_name)) - i := C.ub_ctx_zone_remove(u.ctx, czone_name) - return newError(int(i)) -} - -// DataAdd wraps Unbound's ub_ctx_data_add. -func (u *Unbound) DataAdd(data string) error { - cdata := C.CString(data) - defer C.free(unsafe.Pointer(cdata)) - i := C.ub_ctx_data_add(u.ctx, cdata) - return newError(int(i)) -} - -// DataRemove wraps Unbound's ub_ctx_data_remove. -func (u *Unbound) DataRemove(data string) error { - cdata := C.CString(data) - defer C.free(unsafe.Pointer(cdata)) - i := C.ub_ctx_data_remove(u.ctx, cdata) - return newError(int(i)) -} - -// DebugOut wraps Unbound's ub_ctx_debugout. -func (u *Unbound) DebugOut(out *os.File) error { - cmode := C.CString("a+") - defer C.free(unsafe.Pointer(cmode)) - file := C.fdopen(C.int(out.Fd()), cmode) - i := C.ub_ctx_debugout(u.ctx, unsafe.Pointer(file)) - return newError(int(i)) -} - -// DebugLevel wraps Unbound's ub_ctx_data_level. -func (u *Unbound) DebugLevel(d int) error { - i := C.ub_ctx_debuglevel(u.ctx, C.int(d)) - return newError(int(i)) -} - -// Version wrap Ubounds's ub_version. Return the version of the Unbound -// library in as integers [major, minor, patch] -func (u *Unbound) Version() (version [3]int) { - // split the string on the dots - v := strings.SplitN(C.GoString(C.ub_version()), ".", 3) - if len(v) != 3 { - return - } - version[0], _ = strconv.Atoi(v[0]) - version[1], _ = strconv.Atoi(v[1]) - version[2], _ = strconv.Atoi(v[2]) - return -} diff --git a/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt b/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt deleted file mode 100644 index 079a934f..00000000 --- a/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016-2020 Simone Carletti - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go b/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go deleted file mode 100644 index 4b87105d..00000000 --- a/vendor/github.com/weppos/publicsuffix-go/net/publicsuffix/publicsuffix.go +++ /dev/null @@ -1,39 +0,0 @@ -// Package publicsuffix is a drop-in replacement for the golang.org/x/net/publicsuffix -// based on the weppos/publicsuffix package. -package publicsuffix - -import ( - psl "github.com/weppos/publicsuffix-go/publicsuffix" -) - -// PublicSuffix returns the public suffix of the domain -// using a copy of the publicsuffix.org database packaged into this library. -// -// Note. To maintain compatibility with the golang.org/x/net/publicsuffix -// this method doesn't return an error. However, in case of error, -// the returned value is empty. -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - //d, err := psl.Parse(domain) - //if err != nil { - // return "", false - //} - // - //return d.Rule.Value, !d.Rule.Private - - rule := psl.DefaultList.Find(domain, nil) - publicSuffix = rule.Decompose(domain)[1] - icann = !rule.Private - - // x/net/publicsuffix sets icann to false when the default rule "*" is used - if rule.Value == "" && rule.Type == psl.WildcardType { - icann = false - } - - return -} - -// EffectiveTLDPlusOne returns the effective top level domain plus one more label. -// For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org". -func EffectiveTLDPlusOne(domain string) (string, error) { - return psl.Domain(domain) -} diff --git a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go deleted file mode 100644 index c10e9427..00000000 --- a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go +++ /dev/null @@ -1,544 +0,0 @@ -//go:generate go run ../cmd/gen/gen.go - -// Package publicsuffix provides a domain name parser -// based on data from the public suffix list http://publicsuffix.org/. -// A public suffix is one under which Internet users can directly register names. -package publicsuffix - -import ( - "bufio" - "fmt" - "io" - "net/http/cookiejar" - "os" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // Version identifies the current library version. - // This is a pro forma convention given that Go dependencies - // tends to be fetched directly from the repo. - Version = "0.13.0" - - // NormalType represents a normal rule such as "com" - NormalType = 1 - // WildcardType represents a wildcard rule such as "*.com" - WildcardType = 2 - // ExceptionType represents an exception to a wildard rule - ExceptionType = 3 - - listTokenPrivateDomains = "===BEGIN PRIVATE DOMAINS===" - listTokenComment = "//" -) - -// DefaultList is the default List and it is used by Parse and Domain. -var DefaultList = NewList() - -// DefaultRule is the default Rule that represents "*". -var DefaultRule = MustNewRule("*") - -// DefaultParserOptions are the default options used to parse a Public Suffix list. -var DefaultParserOptions = &ParserOption{PrivateDomains: true, ASCIIEncoded: false} - -// DefaultFindOptions are the default options used to perform the lookup of rules in the list. -var DefaultFindOptions = &FindOptions{IgnorePrivate: false, DefaultRule: DefaultRule} - -// Rule represents a single rule in a Public Suffix List. -type Rule struct { - Type int - Value string - Length int - Private bool -} - -// ParserOption are the options you can use to customize the way a List -// is parsed from a file or a string. -type ParserOption struct { - // Set to false to skip the private domains when parsing. - // Default to true, which means the private domains are included. - PrivateDomains bool - - // Set to false if the input is encoded in U-labels (Unicode) - // as opposite to A-labels. - // Default to false, which means the list is containing Unicode domains. - // This is the default because the original PSL currently contains Unicode. - ASCIIEncoded bool -} - -// FindOptions are the options you can use to customize the way a Rule -// is searched within the list. -type FindOptions struct { - // Set to true to ignore the rules within the "Private" section of the Public Suffix List. - IgnorePrivate bool - - // The default rule to use when no rule matches the input. - // The format Public Suffix algorithm states that the rule "*" should be used when no other rule matches, - // but some consumers may have different needs. - DefaultRule *Rule -} - -// List represents a Public Suffix List. -type List struct { - // rules is kept private because you should not access rules directly - rules map[string]*Rule -} - -// NewList creates a new empty list. -func NewList() *List { - return &List{ - rules: map[string]*Rule{}, - } -} - -// NewListFromString parses a string that represents a Public Suffix source -// and returns a List initialized with the rules in the source. -func NewListFromString(src string, options *ParserOption) (*List, error) { - l := NewList() - _, err := l.LoadString(src, options) - return l, err -} - -// NewListFromFile parses a string that represents a Public Suffix source -// and returns a List initialized with the rules in the source. -func NewListFromFile(path string, options *ParserOption) (*List, error) { - l := NewList() - _, err := l.LoadFile(path, options) - return l, err -} - -// Load parses and loads a set of rules from an io.Reader into the current list. -func (l *List) Load(r io.Reader, options *ParserOption) ([]Rule, error) { - return l.parse(r, options) -} - -// LoadString parses and loads a set of rules from a String into the current list. -func (l *List) LoadString(src string, options *ParserOption) ([]Rule, error) { - r := strings.NewReader(src) - return l.parse(r, options) -} - -// LoadFile parses and loads a set of rules from a File into the current list. -func (l *List) LoadFile(path string, options *ParserOption) ([]Rule, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return l.parse(f, options) -} - -// AddRule adds a new rule to the list. -// -// The exact position of the rule into the list is unpredictable. -// The list may be optimized internally for lookups, therefore the algorithm -// will decide the best position for the new rule. -func (l *List) AddRule(r *Rule) error { - l.rules[r.Value] = r - return nil -} - -// Size returns the size of the list, which is the number of rules. -func (l *List) Size() int { - return len(l.rules) -} - -func (l *List) parse(r io.Reader, options *ParserOption) ([]Rule, error) { - if options == nil { - options = DefaultParserOptions - } - var rules []Rule - - scanner := bufio.NewScanner(r) - var section int // 1 == ICANN, 2 == PRIVATE - -Scanning: - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - switch { - - // skip blank lines - case line == "": - break - - // include private domains or stop scanner - case strings.Contains(line, listTokenPrivateDomains): - if !options.PrivateDomains { - break Scanning - } - section = 2 - - // skip comments - case strings.HasPrefix(line, listTokenComment): - break - - default: - var rule *Rule - var err error - - if options.ASCIIEncoded { - rule, err = NewRule(line) - } else { - rule, err = NewRuleUnicode(line) - } - if err != nil { - return []Rule{}, err - } - - rule.Private = (section == 2) - l.AddRule(rule) - rules = append(rules, *rule) - } - - } - - return rules, scanner.Err() -} - -// Find and returns the most appropriate rule for the domain name. -func (l *List) Find(name string, options *FindOptions) *Rule { - if options == nil { - options = DefaultFindOptions - } - - part := name - for { - rule, ok := l.rules[part] - - if ok && rule.Match(name) && !(options.IgnorePrivate && rule.Private) { - return rule - } - - i := strings.IndexRune(part, '.') - if i < 0 { - return options.DefaultRule - } - - part = part[i+1:] - } - -} - -// NewRule parses the rule content, creates and returns a Rule. -// -// The content of the rule MUST be encoded in ASCII (A-labels). -func NewRule(content string) (*Rule, error) { - var rule *Rule - var value string - - switch content[0:1] { - case "*": // wildcard - if content == "*" { - value = "" - } else { - value = content[2:] - } - rule = &Rule{Type: WildcardType, Value: value, Length: len(Labels(value)) + 1} - case "!": // exception - value = content[1:] - rule = &Rule{Type: ExceptionType, Value: value, Length: len(Labels(value))} - default: // normal - value = content - rule = &Rule{Type: NormalType, Value: value, Length: len(Labels(value))} - } - - return rule, nil -} - -// NewRuleUnicode is like NewRule, but expects the content to be encoded in Unicode (U-labels). -func NewRuleUnicode(content string) (*Rule, error) { - var err error - - content, err = ToASCII(content) - if err != nil { - return nil, err - } - - return NewRule(content) -} - -// MustNewRule is like NewRule, but panics if the content cannot be parsed. -func MustNewRule(content string) *Rule { - rule, err := NewRule(content) - if err != nil { - panic(err) - } - return rule -} - -// Match checks if the rule matches the name. -// -// A domain name is said to match a rule if and only if all of the following conditions are met: -// - When the domain and rule are split into corresponding labels, -// that the domain contains as many or more labels than the rule. -// - Beginning with the right-most labels of both the domain and the rule, -// and continuing for all labels in the rule, one finds that for every pair, -// either they are identical, or that the label from the rule is "*". -// -// See https://publicsuffix.org/list/ -func (r *Rule) Match(name string) bool { - left := strings.TrimSuffix(name, r.Value) - - // the name contains as many labels than the rule - // this is a match, unless it's a wildcard - // because the wildcard requires one more label - if left == "" { - return r.Type != WildcardType - } - - // if there is one more label, the rule match - // because either the rule is shorter than the domain - // or the rule is a wildcard and there is one more label - return left[len(left)-1:] == "." -} - -// Decompose takes a name as input and decomposes it into a tuple of , -// according to the rule definition and type. -func (r *Rule) Decompose(name string) (result [2]string) { - if r == DefaultRule { - i := strings.LastIndex(name, ".") - if i < 0 { - return - } - result[0], result[1] = name[:i], name[i+1:] - return - } - switch r.Type { - case NormalType: - name = strings.TrimSuffix(name, r.Value) - if len(name) == 0 { - return - } - result[0], result[1] = name[:len(name)-1], r.Value - case WildcardType: - name := strings.TrimSuffix(name, r.Value) - if len(name) == 0 { - return - } - name = name[:len(name)-1] - i := strings.LastIndex(name, ".") - if i < 0 { - return - } - result[0], result[1] = name[:i], name[i+1:]+"."+r.Value - case ExceptionType: - i := strings.IndexRune(r.Value, '.') - if i < 0 { - return - } - suffix := r.Value[i+1:] - name = strings.TrimSuffix(name, suffix) - if len(name) == 0 { - return - } - result[0], result[1] = name[:len(name)-1], suffix - } - return -} - -// Labels decomposes given domain name into labels, -// corresponding to the dot-separated tokens. -func Labels(name string) []string { - return strings.Split(name, ".") -} - -// DomainName represents a domain name. -type DomainName struct { - TLD string - SLD string - TRD string - Rule *Rule -} - -// String joins the components of the domain name into a single string. -// Empty labels are skipped. -// -// Examples: -// -// DomainName{"com", "example"}.String() -// // example.com -// DomainName{"com", "example", "www"}.String() -// // www.example.com -// -func (d *DomainName) String() string { - switch { - case d.TLD == "": - return "" - case d.SLD == "": - return d.TLD - case d.TRD == "": - return d.SLD + "." + d.TLD - default: - return d.TRD + "." + d.SLD + "." + d.TLD - } -} - -// Domain extract and return the domain name from the input -// using the default (Public Suffix) List. -// -// Examples: -// -// publicsuffix.Domain("example.com") -// // example.com -// publicsuffix.Domain("www.example.com") -// // example.com -// publicsuffix.Domain("www.example.co.uk") -// // example.co.uk -// -func Domain(name string) (string, error) { - return DomainFromListWithOptions(DefaultList, name, DefaultFindOptions) -} - -// Parse decomposes the name into TLD, SLD, TRD -// using the default (Public Suffix) List, -// and returns the result as a DomainName -// -// Examples: -// -// list := NewList() -// -// publicsuffix.Parse("example.com") -// // &DomainName{"com", "example"} -// publicsuffix.Parse("www.example.com") -// // &DomainName{"com", "example", "www"} -// publicsuffix.Parse("www.example.co.uk") -// // &DomainName{"co.uk", "example"} -// -func Parse(name string) (*DomainName, error) { - return ParseFromListWithOptions(DefaultList, name, DefaultFindOptions) -} - -// DomainFromListWithOptions extract and return the domain name from the input -// using the (Public Suffix) list passed as argument. -// -// Examples: -// -// list := NewList() -// -// publicsuffix.DomainFromListWithOptions(list, "example.com") -// // example.com -// publicsuffix.DomainFromListWithOptions(list, "www.example.com") -// // example.com -// publicsuffix.DomainFromListWithOptions(list, "www.example.co.uk") -// // example.co.uk -// -func DomainFromListWithOptions(l *List, name string, options *FindOptions) (string, error) { - dn, err := ParseFromListWithOptions(l, name, options) - if err != nil { - return "", err - } - return dn.SLD + "." + dn.TLD, nil -} - -// ParseFromListWithOptions decomposes the name into TLD, SLD, TRD -// using the (Public Suffix) list passed as argument, -// and returns the result as a DomainName -// -// Examples: -// -// list := NewList() -// -// publicsuffix.ParseFromListWithOptions(list, "example.com") -// // &DomainName{"com", "example"} -// publicsuffix.ParseFromListWithOptions(list, "www.example.com") -// // &DomainName{"com", "example", "www"} -// publicsuffix.ParseFromListWithOptions(list, "www.example.co.uk") -// // &DomainName{"co.uk", "example"} -// -func ParseFromListWithOptions(l *List, name string, options *FindOptions) (*DomainName, error) { - n, err := normalize(name) - if err != nil { - return nil, err - } - - r := l.Find(n, options) - if r == nil { - return nil, fmt.Errorf("no rule matching name %s", name) - } - - parts := r.Decompose(n) - left, tld := parts[0], parts[1] - if tld == "" { - return nil, fmt.Errorf("%s is a suffix", n) - } - - dn := &DomainName{ - Rule: r, - TLD: tld, - } - if i := strings.LastIndex(left, "."); i < 0 { - dn.SLD = left - } else { - dn.TRD = left[:i] - dn.SLD = left[i+1:] - } - return dn, nil -} - -func normalize(name string) (string, error) { - ret := strings.ToLower(name) - - if ret == "" { - return "", fmt.Errorf("name is blank") - } - if ret[0] == '.' { - return "", fmt.Errorf("name %s starts with a dot", ret) - } - - return ret, nil -} - -// ToASCII is a wrapper for idna.ToASCII. -// -// This wrapper exists because idna.ToASCII backward-compatibility was broken twice in few months -// and I can't call this package directly anymore. The wrapper performs some terrible-but-necessary -// before-after replacements to make sure an already ASCII input always results in the same output -// even if passed through ToASCII. -// -// See golang/net@67957fd0b1, golang/net@f2499483f9, golang/net@78ebe5c8b6, -// and weppos/publicsuffix-go#66. -func ToASCII(s string) (string, error) { - // .example.com should be .example.com - // ..example.com should be ..example.com - if strings.HasPrefix(s, ".") { - dotIndex := 0 - for i := 0; i < len(s); i++ { - if s[i] == '.' { - dotIndex = i - } else { - break - } - } - out, err := idna.ToASCII(s[dotIndex+1:]) - out = s[:dotIndex+1] + out - return out, err - } - - return idna.ToASCII(s) -} - -// ToUnicode is a wrapper for idna.ToUnicode. -// -// See ToASCII for more details about why this wrapper exists. -func ToUnicode(s string) (string, error) { - return idna.ToUnicode(s) -} - -// CookieJarList implements the cookiejar.PublicSuffixList interface. -var CookieJarList cookiejar.PublicSuffixList = cookiejarList{DefaultList} - -type cookiejarList struct { - List *List -} - -// PublicSuffix implements cookiejar.PublicSuffixList. -func (l cookiejarList) PublicSuffix(domain string) string { - rule := l.List.Find(domain, nil) - return rule.Decompose(domain)[1] -} - -// PublicSuffix implements cookiejar.String. -func (cookiejarList) String() string { - return defaultListVersion -} diff --git a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go b/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go deleted file mode 100644 index bb20a8ba..00000000 --- a/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go +++ /dev/null @@ -1,8847 +0,0 @@ -// This file is automatically generated -// Run "go run cmd/gen/gen.go" to update the list. - -package publicsuffix - -const defaultListVersion = "PSL version e7e340 (Sat Feb 15 21:59:27 2020)" - -func DefaultRules() [8828]Rule { - return r -} - -var r = [8828]Rule{ - {1, "ac", 1, false}, - {1, "com.ac", 2, false}, - {1, "edu.ac", 2, false}, - {1, "gov.ac", 2, false}, - {1, "net.ac", 2, false}, - {1, "mil.ac", 2, false}, - {1, "org.ac", 2, false}, - {1, "ad", 1, false}, - {1, "nom.ad", 2, false}, - {1, "ae", 1, false}, - {1, "co.ae", 2, false}, - {1, "net.ae", 2, false}, - {1, "org.ae", 2, false}, - {1, "sch.ae", 2, false}, - {1, "ac.ae", 2, false}, - {1, "gov.ae", 2, false}, - {1, "mil.ae", 2, false}, - {1, "aero", 1, false}, - {1, "accident-investigation.aero", 2, false}, - {1, "accident-prevention.aero", 2, false}, - {1, "aerobatic.aero", 2, false}, - {1, "aeroclub.aero", 2, false}, - {1, "aerodrome.aero", 2, false}, - {1, "agents.aero", 2, false}, - {1, "aircraft.aero", 2, false}, - {1, "airline.aero", 2, false}, - {1, "airport.aero", 2, false}, - {1, "air-surveillance.aero", 2, false}, - {1, "airtraffic.aero", 2, false}, - {1, "air-traffic-control.aero", 2, false}, - {1, "ambulance.aero", 2, false}, - {1, "amusement.aero", 2, false}, - {1, "association.aero", 2, false}, - {1, "author.aero", 2, false}, - {1, "ballooning.aero", 2, false}, - {1, "broker.aero", 2, false}, - {1, "caa.aero", 2, false}, - {1, "cargo.aero", 2, false}, - {1, "catering.aero", 2, false}, - {1, "certification.aero", 2, false}, - {1, "championship.aero", 2, false}, - {1, "charter.aero", 2, false}, - {1, "civilaviation.aero", 2, false}, - {1, "club.aero", 2, false}, - {1, "conference.aero", 2, false}, - {1, "consultant.aero", 2, false}, - {1, "consulting.aero", 2, false}, - {1, "control.aero", 2, false}, - {1, "council.aero", 2, false}, - {1, "crew.aero", 2, false}, - {1, "design.aero", 2, false}, - {1, "dgca.aero", 2, false}, - {1, "educator.aero", 2, false}, - {1, "emergency.aero", 2, false}, - {1, "engine.aero", 2, false}, - {1, "engineer.aero", 2, false}, - {1, "entertainment.aero", 2, false}, - {1, "equipment.aero", 2, false}, - {1, "exchange.aero", 2, false}, - {1, "express.aero", 2, false}, - {1, "federation.aero", 2, false}, - {1, "flight.aero", 2, false}, - {1, "freight.aero", 2, false}, - {1, "fuel.aero", 2, false}, - {1, "gliding.aero", 2, false}, - {1, "government.aero", 2, false}, - {1, "groundhandling.aero", 2, false}, - {1, "group.aero", 2, false}, - {1, "hanggliding.aero", 2, false}, - {1, "homebuilt.aero", 2, false}, - {1, "insurance.aero", 2, false}, - {1, "journal.aero", 2, false}, - {1, "journalist.aero", 2, false}, - {1, "leasing.aero", 2, false}, - {1, "logistics.aero", 2, false}, - {1, "magazine.aero", 2, false}, - {1, "maintenance.aero", 2, false}, - {1, "media.aero", 2, false}, - {1, "microlight.aero", 2, false}, - {1, "modelling.aero", 2, false}, - {1, "navigation.aero", 2, false}, - {1, "parachuting.aero", 2, false}, - {1, "paragliding.aero", 2, false}, - {1, "passenger-association.aero", 2, false}, - {1, "pilot.aero", 2, false}, - {1, "press.aero", 2, false}, - {1, "production.aero", 2, false}, - {1, "recreation.aero", 2, false}, - {1, "repbody.aero", 2, false}, - {1, "res.aero", 2, false}, - {1, "research.aero", 2, false}, - {1, "rotorcraft.aero", 2, false}, - {1, "safety.aero", 2, false}, - {1, "scientist.aero", 2, false}, - {1, "services.aero", 2, false}, - {1, "show.aero", 2, false}, - {1, "skydiving.aero", 2, false}, - {1, "software.aero", 2, false}, - {1, "student.aero", 2, false}, - {1, "trader.aero", 2, false}, - {1, "trading.aero", 2, false}, - {1, "trainer.aero", 2, false}, - {1, "union.aero", 2, false}, - {1, "workinggroup.aero", 2, false}, - {1, "works.aero", 2, false}, - {1, "af", 1, false}, - {1, "gov.af", 2, false}, - {1, "com.af", 2, false}, - {1, "org.af", 2, false}, - {1, "net.af", 2, false}, - {1, "edu.af", 2, false}, - {1, "ag", 1, false}, - {1, "com.ag", 2, false}, - {1, "org.ag", 2, false}, - {1, "net.ag", 2, false}, - {1, "co.ag", 2, false}, - {1, "nom.ag", 2, false}, - {1, "ai", 1, false}, - {1, "off.ai", 2, false}, - {1, "com.ai", 2, false}, - {1, "net.ai", 2, false}, - {1, "org.ai", 2, false}, - {1, "al", 1, false}, - {1, "com.al", 2, false}, - {1, "edu.al", 2, false}, - {1, "gov.al", 2, false}, - {1, "mil.al", 2, false}, - {1, "net.al", 2, false}, - {1, "org.al", 2, false}, - {1, "am", 1, false}, - {1, "co.am", 2, false}, - {1, "com.am", 2, false}, - {1, "commune.am", 2, false}, - {1, "net.am", 2, false}, - {1, "org.am", 2, false}, - {1, "ao", 1, false}, - {1, "ed.ao", 2, false}, - {1, "gv.ao", 2, false}, - {1, "og.ao", 2, false}, - {1, "co.ao", 2, false}, - {1, "pb.ao", 2, false}, - {1, "it.ao", 2, false}, - {1, "aq", 1, false}, - {1, "ar", 1, false}, - {1, "com.ar", 2, false}, - {1, "edu.ar", 2, false}, - {1, "gob.ar", 2, false}, - {1, "gov.ar", 2, false}, - {1, "int.ar", 2, false}, - {1, "mil.ar", 2, false}, - {1, "musica.ar", 2, false}, - {1, "net.ar", 2, false}, - {1, "org.ar", 2, false}, - {1, "tur.ar", 2, false}, - {1, "arpa", 1, false}, - {1, "e164.arpa", 2, false}, - {1, "in-addr.arpa", 2, false}, - {1, "ip6.arpa", 2, false}, - {1, "iris.arpa", 2, false}, - {1, "uri.arpa", 2, false}, - {1, "urn.arpa", 2, false}, - {1, "as", 1, false}, - {1, "gov.as", 2, false}, - {1, "asia", 1, false}, - {1, "at", 1, false}, - {1, "ac.at", 2, false}, - {1, "co.at", 2, false}, - {1, "gv.at", 2, false}, - {1, "or.at", 2, false}, - {1, "au", 1, false}, - {1, "com.au", 2, false}, - {1, "net.au", 2, false}, - {1, "org.au", 2, false}, - {1, "edu.au", 2, false}, - {1, "gov.au", 2, false}, - {1, "asn.au", 2, false}, - {1, "id.au", 2, false}, - {1, "info.au", 2, false}, - {1, "conf.au", 2, false}, - {1, "oz.au", 2, false}, - {1, "act.au", 2, false}, - {1, "nsw.au", 2, false}, - {1, "nt.au", 2, false}, - {1, "qld.au", 2, false}, - {1, "sa.au", 2, false}, - {1, "tas.au", 2, false}, - {1, "vic.au", 2, false}, - {1, "wa.au", 2, false}, - {1, "act.edu.au", 3, false}, - {1, "catholic.edu.au", 3, false}, - {1, "nsw.edu.au", 3, false}, - {1, "nt.edu.au", 3, false}, - {1, "qld.edu.au", 3, false}, - {1, "sa.edu.au", 3, false}, - {1, "tas.edu.au", 3, false}, - {1, "vic.edu.au", 3, false}, - {1, "wa.edu.au", 3, false}, - {1, "qld.gov.au", 3, false}, - {1, "sa.gov.au", 3, false}, - {1, "tas.gov.au", 3, false}, - {1, "vic.gov.au", 3, false}, - {1, "wa.gov.au", 3, false}, - {1, "education.tas.edu.au", 4, false}, - {1, "schools.nsw.edu.au", 4, false}, - {1, "aw", 1, false}, - {1, "com.aw", 2, false}, - {1, "ax", 1, false}, - {1, "az", 1, false}, - {1, "com.az", 2, false}, - {1, "net.az", 2, false}, - {1, "int.az", 2, false}, - {1, "gov.az", 2, false}, - {1, "org.az", 2, false}, - {1, "edu.az", 2, false}, - {1, "info.az", 2, false}, - {1, "pp.az", 2, false}, - {1, "mil.az", 2, false}, - {1, "name.az", 2, false}, - {1, "pro.az", 2, false}, - {1, "biz.az", 2, false}, - {1, "ba", 1, false}, - {1, "com.ba", 2, false}, - {1, "edu.ba", 2, false}, - {1, "gov.ba", 2, false}, - {1, "mil.ba", 2, false}, - {1, "net.ba", 2, false}, - {1, "org.ba", 2, false}, - {1, "bb", 1, false}, - {1, "biz.bb", 2, false}, - {1, "co.bb", 2, false}, - {1, "com.bb", 2, false}, - {1, "edu.bb", 2, false}, - {1, "gov.bb", 2, false}, - {1, "info.bb", 2, false}, - {1, "net.bb", 2, false}, - {1, "org.bb", 2, false}, - {1, "store.bb", 2, false}, - {1, "tv.bb", 2, false}, - {2, "bd", 2, false}, - {1, "be", 1, false}, - {1, "ac.be", 2, false}, - {1, "bf", 1, false}, - {1, "gov.bf", 2, false}, - {1, "bg", 1, false}, - {1, "a.bg", 2, false}, - {1, "b.bg", 2, false}, - {1, "c.bg", 2, false}, - {1, "d.bg", 2, false}, - {1, "e.bg", 2, false}, - {1, "f.bg", 2, false}, - {1, "g.bg", 2, false}, - {1, "h.bg", 2, false}, - {1, "i.bg", 2, false}, - {1, "j.bg", 2, false}, - {1, "k.bg", 2, false}, - {1, "l.bg", 2, false}, - {1, "m.bg", 2, false}, - {1, "n.bg", 2, false}, - {1, "o.bg", 2, false}, - {1, "p.bg", 2, false}, - {1, "q.bg", 2, false}, - {1, "r.bg", 2, false}, - {1, "s.bg", 2, false}, - {1, "t.bg", 2, false}, - {1, "u.bg", 2, false}, - {1, "v.bg", 2, false}, - {1, "w.bg", 2, false}, - {1, "x.bg", 2, false}, - {1, "y.bg", 2, false}, - {1, "z.bg", 2, false}, - {1, "0.bg", 2, false}, - {1, "1.bg", 2, false}, - {1, "2.bg", 2, false}, - {1, "3.bg", 2, false}, - {1, "4.bg", 2, false}, - {1, "5.bg", 2, false}, - {1, "6.bg", 2, false}, - {1, "7.bg", 2, false}, - {1, "8.bg", 2, false}, - {1, "9.bg", 2, false}, - {1, "bh", 1, false}, - {1, "com.bh", 2, false}, - {1, "edu.bh", 2, false}, - {1, "net.bh", 2, false}, - {1, "org.bh", 2, false}, - {1, "gov.bh", 2, false}, - {1, "bi", 1, false}, - {1, "co.bi", 2, false}, - {1, "com.bi", 2, false}, - {1, "edu.bi", 2, false}, - {1, "or.bi", 2, false}, - {1, "org.bi", 2, false}, - {1, "biz", 1, false}, - {1, "bj", 1, false}, - {1, "asso.bj", 2, false}, - {1, "barreau.bj", 2, false}, - {1, "gouv.bj", 2, false}, - {1, "bm", 1, false}, - {1, "com.bm", 2, false}, - {1, "edu.bm", 2, false}, - {1, "gov.bm", 2, false}, - {1, "net.bm", 2, false}, - {1, "org.bm", 2, false}, - {1, "bn", 1, false}, - {1, "com.bn", 2, false}, - {1, "edu.bn", 2, false}, - {1, "gov.bn", 2, false}, - {1, "net.bn", 2, false}, - {1, "org.bn", 2, false}, - {1, "bo", 1, false}, - {1, "com.bo", 2, false}, - {1, "edu.bo", 2, false}, - {1, "gob.bo", 2, false}, - {1, "int.bo", 2, false}, - {1, "org.bo", 2, false}, - {1, "net.bo", 2, false}, - {1, "mil.bo", 2, false}, - {1, "tv.bo", 2, false}, - {1, "web.bo", 2, false}, - {1, "academia.bo", 2, false}, - {1, "agro.bo", 2, false}, - {1, "arte.bo", 2, false}, - {1, "blog.bo", 2, false}, - {1, "bolivia.bo", 2, false}, - {1, "ciencia.bo", 2, false}, - {1, "cooperativa.bo", 2, false}, - {1, "democracia.bo", 2, false}, - {1, "deporte.bo", 2, false}, - {1, "ecologia.bo", 2, false}, - {1, "economia.bo", 2, false}, - {1, "empresa.bo", 2, false}, - {1, "indigena.bo", 2, false}, - {1, "industria.bo", 2, false}, - {1, "info.bo", 2, false}, - {1, "medicina.bo", 2, false}, - {1, "movimiento.bo", 2, false}, - {1, "musica.bo", 2, false}, - {1, "natural.bo", 2, false}, - {1, "nombre.bo", 2, false}, - {1, "noticias.bo", 2, false}, - {1, "patria.bo", 2, false}, - {1, "politica.bo", 2, false}, - {1, "profesional.bo", 2, false}, - {1, "plurinacional.bo", 2, false}, - {1, "pueblo.bo", 2, false}, - {1, "revista.bo", 2, false}, - {1, "salud.bo", 2, false}, - {1, "tecnologia.bo", 2, false}, - {1, "tksat.bo", 2, false}, - {1, "transporte.bo", 2, false}, - {1, "wiki.bo", 2, false}, - {1, "br", 1, false}, - {1, "9guacu.br", 2, false}, - {1, "abc.br", 2, false}, - {1, "adm.br", 2, false}, - {1, "adv.br", 2, false}, - {1, "agr.br", 2, false}, - {1, "aju.br", 2, false}, - {1, "am.br", 2, false}, - {1, "anani.br", 2, false}, - {1, "aparecida.br", 2, false}, - {1, "arq.br", 2, false}, - {1, "art.br", 2, false}, - {1, "ato.br", 2, false}, - {1, "b.br", 2, false}, - {1, "barueri.br", 2, false}, - {1, "belem.br", 2, false}, - {1, "bhz.br", 2, false}, - {1, "bio.br", 2, false}, - {1, "blog.br", 2, false}, - {1, "bmd.br", 2, false}, - {1, "boavista.br", 2, false}, - {1, "bsb.br", 2, false}, - {1, "campinagrande.br", 2, false}, - {1, "campinas.br", 2, false}, - {1, "caxias.br", 2, false}, - {1, "cim.br", 2, false}, - {1, "cng.br", 2, false}, - {1, "cnt.br", 2, false}, - {1, "com.br", 2, false}, - {1, "contagem.br", 2, false}, - {1, "coop.br", 2, false}, - {1, "cri.br", 2, false}, - {1, "cuiaba.br", 2, false}, - {1, "curitiba.br", 2, false}, - {1, "def.br", 2, false}, - {1, "ecn.br", 2, false}, - {1, "eco.br", 2, false}, - {1, "edu.br", 2, false}, - {1, "emp.br", 2, false}, - {1, "eng.br", 2, false}, - {1, "esp.br", 2, false}, - {1, "etc.br", 2, false}, - {1, "eti.br", 2, false}, - {1, "far.br", 2, false}, - {1, "feira.br", 2, false}, - {1, "flog.br", 2, false}, - {1, "floripa.br", 2, false}, - {1, "fm.br", 2, false}, - {1, "fnd.br", 2, false}, - {1, "fortal.br", 2, false}, - {1, "fot.br", 2, false}, - {1, "foz.br", 2, false}, - {1, "fst.br", 2, false}, - {1, "g12.br", 2, false}, - {1, "ggf.br", 2, false}, - {1, "goiania.br", 2, false}, - {1, "gov.br", 2, false}, - {1, "ac.gov.br", 3, false}, - {1, "al.gov.br", 3, false}, - {1, "am.gov.br", 3, false}, - {1, "ap.gov.br", 3, false}, - {1, "ba.gov.br", 3, false}, - {1, "ce.gov.br", 3, false}, - {1, "df.gov.br", 3, false}, - {1, "es.gov.br", 3, false}, - {1, "go.gov.br", 3, false}, - {1, "ma.gov.br", 3, false}, - {1, "mg.gov.br", 3, false}, - {1, "ms.gov.br", 3, false}, - {1, "mt.gov.br", 3, false}, - {1, "pa.gov.br", 3, false}, - {1, "pb.gov.br", 3, false}, - {1, "pe.gov.br", 3, false}, - {1, "pi.gov.br", 3, false}, - {1, "pr.gov.br", 3, false}, - {1, "rj.gov.br", 3, false}, - {1, "rn.gov.br", 3, false}, - {1, "ro.gov.br", 3, false}, - {1, "rr.gov.br", 3, false}, - {1, "rs.gov.br", 3, false}, - {1, "sc.gov.br", 3, false}, - {1, "se.gov.br", 3, false}, - {1, "sp.gov.br", 3, false}, - {1, "to.gov.br", 3, false}, - {1, "gru.br", 2, false}, - {1, "imb.br", 2, false}, - {1, "ind.br", 2, false}, - {1, "inf.br", 2, false}, - {1, "jab.br", 2, false}, - {1, "jampa.br", 2, false}, - {1, "jdf.br", 2, false}, - {1, "joinville.br", 2, false}, - {1, "jor.br", 2, false}, - {1, "jus.br", 2, false}, - {1, "leg.br", 2, false}, - {1, "lel.br", 2, false}, - {1, "londrina.br", 2, false}, - {1, "macapa.br", 2, false}, - {1, "maceio.br", 2, false}, - {1, "manaus.br", 2, false}, - {1, "maringa.br", 2, false}, - {1, "mat.br", 2, false}, - {1, "med.br", 2, false}, - {1, "mil.br", 2, false}, - {1, "morena.br", 2, false}, - {1, "mp.br", 2, false}, - {1, "mus.br", 2, false}, - {1, "natal.br", 2, false}, - {1, "net.br", 2, false}, - {1, "niteroi.br", 2, false}, - {2, "nom.br", 3, false}, - {1, "not.br", 2, false}, - {1, "ntr.br", 2, false}, - {1, "odo.br", 2, false}, - {1, "ong.br", 2, false}, - {1, "org.br", 2, false}, - {1, "osasco.br", 2, false}, - {1, "palmas.br", 2, false}, - {1, "poa.br", 2, false}, - {1, "ppg.br", 2, false}, - {1, "pro.br", 2, false}, - {1, "psc.br", 2, false}, - {1, "psi.br", 2, false}, - {1, "pvh.br", 2, false}, - {1, "qsl.br", 2, false}, - {1, "radio.br", 2, false}, - {1, "rec.br", 2, false}, - {1, "recife.br", 2, false}, - {1, "ribeirao.br", 2, false}, - {1, "rio.br", 2, false}, - {1, "riobranco.br", 2, false}, - {1, "riopreto.br", 2, false}, - {1, "salvador.br", 2, false}, - {1, "sampa.br", 2, false}, - {1, "santamaria.br", 2, false}, - {1, "santoandre.br", 2, false}, - {1, "saobernardo.br", 2, false}, - {1, "saogonca.br", 2, false}, - {1, "sjc.br", 2, false}, - {1, "slg.br", 2, false}, - {1, "slz.br", 2, false}, - {1, "sorocaba.br", 2, false}, - {1, "srv.br", 2, false}, - {1, "taxi.br", 2, false}, - {1, "tc.br", 2, false}, - {1, "teo.br", 2, false}, - {1, "the.br", 2, false}, - {1, "tmp.br", 2, false}, - {1, "trd.br", 2, false}, - {1, "tur.br", 2, false}, - {1, "tv.br", 2, false}, - {1, "udi.br", 2, false}, - {1, "vet.br", 2, false}, - {1, "vix.br", 2, false}, - {1, "vlog.br", 2, false}, - {1, "wiki.br", 2, false}, - {1, "zlg.br", 2, false}, - {1, "bs", 1, false}, - {1, "com.bs", 2, false}, - {1, "net.bs", 2, false}, - {1, "org.bs", 2, false}, - {1, "edu.bs", 2, false}, - {1, "gov.bs", 2, false}, - {1, "bt", 1, false}, - {1, "com.bt", 2, false}, - {1, "edu.bt", 2, false}, - {1, "gov.bt", 2, false}, - {1, "net.bt", 2, false}, - {1, "org.bt", 2, false}, - {1, "bv", 1, false}, - {1, "bw", 1, false}, - {1, "co.bw", 2, false}, - {1, "org.bw", 2, false}, - {1, "by", 1, false}, - {1, "gov.by", 2, false}, - {1, "mil.by", 2, false}, - {1, "com.by", 2, false}, - {1, "of.by", 2, false}, - {1, "bz", 1, false}, - {1, "com.bz", 2, false}, - {1, "net.bz", 2, false}, - {1, "org.bz", 2, false}, - {1, "edu.bz", 2, false}, - {1, "gov.bz", 2, false}, - {1, "ca", 1, false}, - {1, "ab.ca", 2, false}, - {1, "bc.ca", 2, false}, - {1, "mb.ca", 2, false}, - {1, "nb.ca", 2, false}, - {1, "nf.ca", 2, false}, - {1, "nl.ca", 2, false}, - {1, "ns.ca", 2, false}, - {1, "nt.ca", 2, false}, - {1, "nu.ca", 2, false}, - {1, "on.ca", 2, false}, - {1, "pe.ca", 2, false}, - {1, "qc.ca", 2, false}, - {1, "sk.ca", 2, false}, - {1, "yk.ca", 2, false}, - {1, "gc.ca", 2, false}, - {1, "cat", 1, false}, - {1, "cc", 1, false}, - {1, "cd", 1, false}, - {1, "gov.cd", 2, false}, - {1, "cf", 1, false}, - {1, "cg", 1, false}, - {1, "ch", 1, false}, - {1, "ci", 1, false}, - {1, "org.ci", 2, false}, - {1, "or.ci", 2, false}, - {1, "com.ci", 2, false}, - {1, "co.ci", 2, false}, - {1, "edu.ci", 2, false}, - {1, "ed.ci", 2, false}, - {1, "ac.ci", 2, false}, - {1, "net.ci", 2, false}, - {1, "go.ci", 2, false}, - {1, "asso.ci", 2, false}, - {1, "xn--aroport-bya.ci", 2, false}, - {1, "int.ci", 2, false}, - {1, "presse.ci", 2, false}, - {1, "md.ci", 2, false}, - {1, "gouv.ci", 2, false}, - {2, "ck", 2, false}, - {3, "www.ck", 2, false}, - {1, "cl", 1, false}, - {1, "gov.cl", 2, false}, - {1, "gob.cl", 2, false}, - {1, "co.cl", 2, false}, - {1, "mil.cl", 2, false}, - {1, "cm", 1, false}, - {1, "co.cm", 2, false}, - {1, "com.cm", 2, false}, - {1, "gov.cm", 2, false}, - {1, "net.cm", 2, false}, - {1, "cn", 1, false}, - {1, "ac.cn", 2, false}, - {1, "com.cn", 2, false}, - {1, "edu.cn", 2, false}, - {1, "gov.cn", 2, false}, - {1, "net.cn", 2, false}, - {1, "org.cn", 2, false}, - {1, "mil.cn", 2, false}, - {1, "xn--55qx5d.cn", 2, false}, - {1, "xn--io0a7i.cn", 2, false}, - {1, "xn--od0alg.cn", 2, false}, - {1, "ah.cn", 2, false}, - {1, "bj.cn", 2, false}, - {1, "cq.cn", 2, false}, - {1, "fj.cn", 2, false}, - {1, "gd.cn", 2, false}, - {1, "gs.cn", 2, false}, - {1, "gz.cn", 2, false}, - {1, "gx.cn", 2, false}, - {1, "ha.cn", 2, false}, - {1, "hb.cn", 2, false}, - {1, "he.cn", 2, false}, - {1, "hi.cn", 2, false}, - {1, "hl.cn", 2, false}, - {1, "hn.cn", 2, false}, - {1, "jl.cn", 2, false}, - {1, "js.cn", 2, false}, - {1, "jx.cn", 2, false}, - {1, "ln.cn", 2, false}, - {1, "nm.cn", 2, false}, - {1, "nx.cn", 2, false}, - {1, "qh.cn", 2, false}, - {1, "sc.cn", 2, false}, - {1, "sd.cn", 2, false}, - {1, "sh.cn", 2, false}, - {1, "sn.cn", 2, false}, - {1, "sx.cn", 2, false}, - {1, "tj.cn", 2, false}, - {1, "xj.cn", 2, false}, - {1, "xz.cn", 2, false}, - {1, "yn.cn", 2, false}, - {1, "zj.cn", 2, false}, - {1, "hk.cn", 2, false}, - {1, "mo.cn", 2, false}, - {1, "tw.cn", 2, false}, - {1, "co", 1, false}, - {1, "arts.co", 2, false}, - {1, "com.co", 2, false}, - {1, "edu.co", 2, false}, - {1, "firm.co", 2, false}, - {1, "gov.co", 2, false}, - {1, "info.co", 2, false}, - {1, "int.co", 2, false}, - {1, "mil.co", 2, false}, - {1, "net.co", 2, false}, - {1, "nom.co", 2, false}, - {1, "org.co", 2, false}, - {1, "rec.co", 2, false}, - {1, "web.co", 2, false}, - {1, "com", 1, false}, - {1, "coop", 1, false}, - {1, "cr", 1, false}, - {1, "ac.cr", 2, false}, - {1, "co.cr", 2, false}, - {1, "ed.cr", 2, false}, - {1, "fi.cr", 2, false}, - {1, "go.cr", 2, false}, - {1, "or.cr", 2, false}, - {1, "sa.cr", 2, false}, - {1, "cu", 1, false}, - {1, "com.cu", 2, false}, - {1, "edu.cu", 2, false}, - {1, "org.cu", 2, false}, - {1, "net.cu", 2, false}, - {1, "gov.cu", 2, false}, - {1, "inf.cu", 2, false}, - {1, "cv", 1, false}, - {1, "cw", 1, false}, - {1, "com.cw", 2, false}, - {1, "edu.cw", 2, false}, - {1, "net.cw", 2, false}, - {1, "org.cw", 2, false}, - {1, "cx", 1, false}, - {1, "gov.cx", 2, false}, - {1, "cy", 1, false}, - {1, "ac.cy", 2, false}, - {1, "biz.cy", 2, false}, - {1, "com.cy", 2, false}, - {1, "ekloges.cy", 2, false}, - {1, "gov.cy", 2, false}, - {1, "ltd.cy", 2, false}, - {1, "name.cy", 2, false}, - {1, "net.cy", 2, false}, - {1, "org.cy", 2, false}, - {1, "parliament.cy", 2, false}, - {1, "press.cy", 2, false}, - {1, "pro.cy", 2, false}, - {1, "tm.cy", 2, false}, - {1, "cz", 1, false}, - {1, "de", 1, false}, - {1, "dj", 1, false}, - {1, "dk", 1, false}, - {1, "dm", 1, false}, - {1, "com.dm", 2, false}, - {1, "net.dm", 2, false}, - {1, "org.dm", 2, false}, - {1, "edu.dm", 2, false}, - {1, "gov.dm", 2, false}, - {1, "do", 1, false}, - {1, "art.do", 2, false}, - {1, "com.do", 2, false}, - {1, "edu.do", 2, false}, - {1, "gob.do", 2, false}, - {1, "gov.do", 2, false}, - {1, "mil.do", 2, false}, - {1, "net.do", 2, false}, - {1, "org.do", 2, false}, - {1, "sld.do", 2, false}, - {1, "web.do", 2, false}, - {1, "dz", 1, false}, - {1, "com.dz", 2, false}, - {1, "org.dz", 2, false}, - {1, "net.dz", 2, false}, - {1, "gov.dz", 2, false}, - {1, "edu.dz", 2, false}, - {1, "asso.dz", 2, false}, - {1, "pol.dz", 2, false}, - {1, "art.dz", 2, false}, - {1, "ec", 1, false}, - {1, "com.ec", 2, false}, - {1, "info.ec", 2, false}, - {1, "net.ec", 2, false}, - {1, "fin.ec", 2, false}, - {1, "k12.ec", 2, false}, - {1, "med.ec", 2, false}, - {1, "pro.ec", 2, false}, - {1, "org.ec", 2, false}, - {1, "edu.ec", 2, false}, - {1, "gov.ec", 2, false}, - {1, "gob.ec", 2, false}, - {1, "mil.ec", 2, false}, - {1, "edu", 1, false}, - {1, "ee", 1, false}, - {1, "edu.ee", 2, false}, - {1, "gov.ee", 2, false}, - {1, "riik.ee", 2, false}, - {1, "lib.ee", 2, false}, - {1, "med.ee", 2, false}, - {1, "com.ee", 2, false}, - {1, "pri.ee", 2, false}, - {1, "aip.ee", 2, false}, - {1, "org.ee", 2, false}, - {1, "fie.ee", 2, false}, - {1, "eg", 1, false}, - {1, "com.eg", 2, false}, - {1, "edu.eg", 2, false}, - {1, "eun.eg", 2, false}, - {1, "gov.eg", 2, false}, - {1, "mil.eg", 2, false}, - {1, "name.eg", 2, false}, - {1, "net.eg", 2, false}, - {1, "org.eg", 2, false}, - {1, "sci.eg", 2, false}, - {2, "er", 2, false}, - {1, "es", 1, false}, - {1, "com.es", 2, false}, - {1, "nom.es", 2, false}, - {1, "org.es", 2, false}, - {1, "gob.es", 2, false}, - {1, "edu.es", 2, false}, - {1, "et", 1, false}, - {1, "com.et", 2, false}, - {1, "gov.et", 2, false}, - {1, "org.et", 2, false}, - {1, "edu.et", 2, false}, - {1, "biz.et", 2, false}, - {1, "name.et", 2, false}, - {1, "info.et", 2, false}, - {1, "net.et", 2, false}, - {1, "eu", 1, false}, - {1, "fi", 1, false}, - {1, "aland.fi", 2, false}, - {1, "fj", 1, false}, - {1, "ac.fj", 2, false}, - {1, "biz.fj", 2, false}, - {1, "com.fj", 2, false}, - {1, "gov.fj", 2, false}, - {1, "info.fj", 2, false}, - {1, "mil.fj", 2, false}, - {1, "name.fj", 2, false}, - {1, "net.fj", 2, false}, - {1, "org.fj", 2, false}, - {1, "pro.fj", 2, false}, - {2, "fk", 2, false}, - {1, "fm", 1, false}, - {1, "fo", 1, false}, - {1, "fr", 1, false}, - {1, "asso.fr", 2, false}, - {1, "com.fr", 2, false}, - {1, "gouv.fr", 2, false}, - {1, "nom.fr", 2, false}, - {1, "prd.fr", 2, false}, - {1, "tm.fr", 2, false}, - {1, "aeroport.fr", 2, false}, - {1, "avocat.fr", 2, false}, - {1, "avoues.fr", 2, false}, - {1, "cci.fr", 2, false}, - {1, "chambagri.fr", 2, false}, - {1, "chirurgiens-dentistes.fr", 2, false}, - {1, "experts-comptables.fr", 2, false}, - {1, "geometre-expert.fr", 2, false}, - {1, "greta.fr", 2, false}, - {1, "huissier-justice.fr", 2, false}, - {1, "medecin.fr", 2, false}, - {1, "notaires.fr", 2, false}, - {1, "pharmacien.fr", 2, false}, - {1, "port.fr", 2, false}, - {1, "veterinaire.fr", 2, false}, - {1, "ga", 1, false}, - {1, "gb", 1, false}, - {1, "gd", 1, false}, - {1, "ge", 1, false}, - {1, "com.ge", 2, false}, - {1, "edu.ge", 2, false}, - {1, "gov.ge", 2, false}, - {1, "org.ge", 2, false}, - {1, "mil.ge", 2, false}, - {1, "net.ge", 2, false}, - {1, "pvt.ge", 2, false}, - {1, "gf", 1, false}, - {1, "gg", 1, false}, - {1, "co.gg", 2, false}, - {1, "net.gg", 2, false}, - {1, "org.gg", 2, false}, - {1, "gh", 1, false}, - {1, "com.gh", 2, false}, - {1, "edu.gh", 2, false}, - {1, "gov.gh", 2, false}, - {1, "org.gh", 2, false}, - {1, "mil.gh", 2, false}, - {1, "gi", 1, false}, - {1, "com.gi", 2, false}, - {1, "ltd.gi", 2, false}, - {1, "gov.gi", 2, false}, - {1, "mod.gi", 2, false}, - {1, "edu.gi", 2, false}, - {1, "org.gi", 2, false}, - {1, "gl", 1, false}, - {1, "co.gl", 2, false}, - {1, "com.gl", 2, false}, - {1, "edu.gl", 2, false}, - {1, "net.gl", 2, false}, - {1, "org.gl", 2, false}, - {1, "gm", 1, false}, - {1, "gn", 1, false}, - {1, "ac.gn", 2, false}, - {1, "com.gn", 2, false}, - {1, "edu.gn", 2, false}, - {1, "gov.gn", 2, false}, - {1, "org.gn", 2, false}, - {1, "net.gn", 2, false}, - {1, "gov", 1, false}, - {1, "gp", 1, false}, - {1, "com.gp", 2, false}, - {1, "net.gp", 2, false}, - {1, "mobi.gp", 2, false}, - {1, "edu.gp", 2, false}, - {1, "org.gp", 2, false}, - {1, "asso.gp", 2, false}, - {1, "gq", 1, false}, - {1, "gr", 1, false}, - {1, "com.gr", 2, false}, - {1, "edu.gr", 2, false}, - {1, "net.gr", 2, false}, - {1, "org.gr", 2, false}, - {1, "gov.gr", 2, false}, - {1, "gs", 1, false}, - {1, "gt", 1, false}, - {1, "com.gt", 2, false}, - {1, "edu.gt", 2, false}, - {1, "gob.gt", 2, false}, - {1, "ind.gt", 2, false}, - {1, "mil.gt", 2, false}, - {1, "net.gt", 2, false}, - {1, "org.gt", 2, false}, - {1, "gu", 1, false}, - {1, "com.gu", 2, false}, - {1, "edu.gu", 2, false}, - {1, "gov.gu", 2, false}, - {1, "guam.gu", 2, false}, - {1, "info.gu", 2, false}, - {1, "net.gu", 2, false}, - {1, "org.gu", 2, false}, - {1, "web.gu", 2, false}, - {1, "gw", 1, false}, - {1, "gy", 1, false}, - {1, "co.gy", 2, false}, - {1, "com.gy", 2, false}, - {1, "edu.gy", 2, false}, - {1, "gov.gy", 2, false}, - {1, "net.gy", 2, false}, - {1, "org.gy", 2, false}, - {1, "hk", 1, false}, - {1, "com.hk", 2, false}, - {1, "edu.hk", 2, false}, - {1, "gov.hk", 2, false}, - {1, "idv.hk", 2, false}, - {1, "net.hk", 2, false}, - {1, "org.hk", 2, false}, - {1, "xn--55qx5d.hk", 2, false}, - {1, "xn--wcvs22d.hk", 2, false}, - {1, "xn--lcvr32d.hk", 2, false}, - {1, "xn--mxtq1m.hk", 2, false}, - {1, "xn--gmqw5a.hk", 2, false}, - {1, "xn--ciqpn.hk", 2, false}, - {1, "xn--gmq050i.hk", 2, false}, - {1, "xn--zf0avx.hk", 2, false}, - {1, "xn--io0a7i.hk", 2, false}, - {1, "xn--mk0axi.hk", 2, false}, - {1, "xn--od0alg.hk", 2, false}, - {1, "xn--od0aq3b.hk", 2, false}, - {1, "xn--tn0ag.hk", 2, false}, - {1, "xn--uc0atv.hk", 2, false}, - {1, "xn--uc0ay4a.hk", 2, false}, - {1, "hm", 1, false}, - {1, "hn", 1, false}, - {1, "com.hn", 2, false}, - {1, "edu.hn", 2, false}, - {1, "org.hn", 2, false}, - {1, "net.hn", 2, false}, - {1, "mil.hn", 2, false}, - {1, "gob.hn", 2, false}, - {1, "hr", 1, false}, - {1, "iz.hr", 2, false}, - {1, "from.hr", 2, false}, - {1, "name.hr", 2, false}, - {1, "com.hr", 2, false}, - {1, "ht", 1, false}, - {1, "com.ht", 2, false}, - {1, "shop.ht", 2, false}, - {1, "firm.ht", 2, false}, - {1, "info.ht", 2, false}, - {1, "adult.ht", 2, false}, - {1, "net.ht", 2, false}, - {1, "pro.ht", 2, false}, - {1, "org.ht", 2, false}, - {1, "med.ht", 2, false}, - {1, "art.ht", 2, false}, - {1, "coop.ht", 2, false}, - {1, "pol.ht", 2, false}, - {1, "asso.ht", 2, false}, - {1, "edu.ht", 2, false}, - {1, "rel.ht", 2, false}, - {1, "gouv.ht", 2, false}, - {1, "perso.ht", 2, false}, - {1, "hu", 1, false}, - {1, "co.hu", 2, false}, - {1, "info.hu", 2, false}, - {1, "org.hu", 2, false}, - {1, "priv.hu", 2, false}, - {1, "sport.hu", 2, false}, - {1, "tm.hu", 2, false}, - {1, "2000.hu", 2, false}, - {1, "agrar.hu", 2, false}, - {1, "bolt.hu", 2, false}, - {1, "casino.hu", 2, false}, - {1, "city.hu", 2, false}, - {1, "erotica.hu", 2, false}, - {1, "erotika.hu", 2, false}, - {1, "film.hu", 2, false}, - {1, "forum.hu", 2, false}, - {1, "games.hu", 2, false}, - {1, "hotel.hu", 2, false}, - {1, "ingatlan.hu", 2, false}, - {1, "jogasz.hu", 2, false}, - {1, "konyvelo.hu", 2, false}, - {1, "lakas.hu", 2, false}, - {1, "media.hu", 2, false}, - {1, "news.hu", 2, false}, - {1, "reklam.hu", 2, false}, - {1, "sex.hu", 2, false}, - {1, "shop.hu", 2, false}, - {1, "suli.hu", 2, false}, - {1, "szex.hu", 2, false}, - {1, "tozsde.hu", 2, false}, - {1, "utazas.hu", 2, false}, - {1, "video.hu", 2, false}, - {1, "id", 1, false}, - {1, "ac.id", 2, false}, - {1, "biz.id", 2, false}, - {1, "co.id", 2, false}, - {1, "desa.id", 2, false}, - {1, "go.id", 2, false}, - {1, "mil.id", 2, false}, - {1, "my.id", 2, false}, - {1, "net.id", 2, false}, - {1, "or.id", 2, false}, - {1, "ponpes.id", 2, false}, - {1, "sch.id", 2, false}, - {1, "web.id", 2, false}, - {1, "ie", 1, false}, - {1, "gov.ie", 2, false}, - {1, "il", 1, false}, - {1, "ac.il", 2, false}, - {1, "co.il", 2, false}, - {1, "gov.il", 2, false}, - {1, "idf.il", 2, false}, - {1, "k12.il", 2, false}, - {1, "muni.il", 2, false}, - {1, "net.il", 2, false}, - {1, "org.il", 2, false}, - {1, "im", 1, false}, - {1, "ac.im", 2, false}, - {1, "co.im", 2, false}, - {1, "com.im", 2, false}, - {1, "ltd.co.im", 3, false}, - {1, "net.im", 2, false}, - {1, "org.im", 2, false}, - {1, "plc.co.im", 3, false}, - {1, "tt.im", 2, false}, - {1, "tv.im", 2, false}, - {1, "in", 1, false}, - {1, "co.in", 2, false}, - {1, "firm.in", 2, false}, - {1, "net.in", 2, false}, - {1, "org.in", 2, false}, - {1, "gen.in", 2, false}, - {1, "ind.in", 2, false}, - {1, "nic.in", 2, false}, - {1, "ac.in", 2, false}, - {1, "edu.in", 2, false}, - {1, "res.in", 2, false}, - {1, "gov.in", 2, false}, - {1, "mil.in", 2, false}, - {1, "info", 1, false}, - {1, "int", 1, false}, - {1, "eu.int", 2, false}, - {1, "io", 1, false}, - {1, "com.io", 2, false}, - {1, "iq", 1, false}, - {1, "gov.iq", 2, false}, - {1, "edu.iq", 2, false}, - {1, "mil.iq", 2, false}, - {1, "com.iq", 2, false}, - {1, "org.iq", 2, false}, - {1, "net.iq", 2, false}, - {1, "ir", 1, false}, - {1, "ac.ir", 2, false}, - {1, "co.ir", 2, false}, - {1, "gov.ir", 2, false}, - {1, "id.ir", 2, false}, - {1, "net.ir", 2, false}, - {1, "org.ir", 2, false}, - {1, "sch.ir", 2, false}, - {1, "xn--mgba3a4f16a.ir", 2, false}, - {1, "xn--mgba3a4fra.ir", 2, false}, - {1, "is", 1, false}, - {1, "net.is", 2, false}, - {1, "com.is", 2, false}, - {1, "edu.is", 2, false}, - {1, "gov.is", 2, false}, - {1, "org.is", 2, false}, - {1, "int.is", 2, false}, - {1, "it", 1, false}, - {1, "gov.it", 2, false}, - {1, "edu.it", 2, false}, - {1, "abr.it", 2, false}, - {1, "abruzzo.it", 2, false}, - {1, "aosta-valley.it", 2, false}, - {1, "aostavalley.it", 2, false}, - {1, "bas.it", 2, false}, - {1, "basilicata.it", 2, false}, - {1, "cal.it", 2, false}, - {1, "calabria.it", 2, false}, - {1, "cam.it", 2, false}, - {1, "campania.it", 2, false}, - {1, "emilia-romagna.it", 2, false}, - {1, "emiliaromagna.it", 2, false}, - {1, "emr.it", 2, false}, - {1, "friuli-v-giulia.it", 2, false}, - {1, "friuli-ve-giulia.it", 2, false}, - {1, "friuli-vegiulia.it", 2, false}, - {1, "friuli-venezia-giulia.it", 2, false}, - {1, "friuli-veneziagiulia.it", 2, false}, - {1, "friuli-vgiulia.it", 2, false}, - {1, "friuliv-giulia.it", 2, false}, - {1, "friulive-giulia.it", 2, false}, - {1, "friulivegiulia.it", 2, false}, - {1, "friulivenezia-giulia.it", 2, false}, - {1, "friuliveneziagiulia.it", 2, false}, - {1, "friulivgiulia.it", 2, false}, - {1, "fvg.it", 2, false}, - {1, "laz.it", 2, false}, - {1, "lazio.it", 2, false}, - {1, "lig.it", 2, false}, - {1, "liguria.it", 2, false}, - {1, "lom.it", 2, false}, - {1, "lombardia.it", 2, false}, - {1, "lombardy.it", 2, false}, - {1, "lucania.it", 2, false}, - {1, "mar.it", 2, false}, - {1, "marche.it", 2, false}, - {1, "mol.it", 2, false}, - {1, "molise.it", 2, false}, - {1, "piedmont.it", 2, false}, - {1, "piemonte.it", 2, false}, - {1, "pmn.it", 2, false}, - {1, "pug.it", 2, false}, - {1, "puglia.it", 2, false}, - {1, "sar.it", 2, false}, - {1, "sardegna.it", 2, false}, - {1, "sardinia.it", 2, false}, - {1, "sic.it", 2, false}, - {1, "sicilia.it", 2, false}, - {1, "sicily.it", 2, false}, - {1, "taa.it", 2, false}, - {1, "tos.it", 2, false}, - {1, "toscana.it", 2, false}, - {1, "trentin-sud-tirol.it", 2, false}, - {1, "xn--trentin-sd-tirol-rzb.it", 2, false}, - {1, "trentin-sudtirol.it", 2, false}, - {1, "xn--trentin-sdtirol-7vb.it", 2, false}, - {1, "trentin-sued-tirol.it", 2, false}, - {1, "trentin-suedtirol.it", 2, false}, - {1, "trentino-a-adige.it", 2, false}, - {1, "trentino-aadige.it", 2, false}, - {1, "trentino-alto-adige.it", 2, false}, - {1, "trentino-altoadige.it", 2, false}, - {1, "trentino-s-tirol.it", 2, false}, - {1, "trentino-stirol.it", 2, false}, - {1, "trentino-sud-tirol.it", 2, false}, - {1, "xn--trentino-sd-tirol-c3b.it", 2, false}, - {1, "trentino-sudtirol.it", 2, false}, - {1, "xn--trentino-sdtirol-szb.it", 2, false}, - {1, "trentino-sued-tirol.it", 2, false}, - {1, "trentino-suedtirol.it", 2, false}, - {1, "trentino.it", 2, false}, - {1, "trentinoa-adige.it", 2, false}, - {1, "trentinoaadige.it", 2, false}, - {1, "trentinoalto-adige.it", 2, false}, - {1, "trentinoaltoadige.it", 2, false}, - {1, "trentinos-tirol.it", 2, false}, - {1, "trentinostirol.it", 2, false}, - {1, "trentinosud-tirol.it", 2, false}, - {1, "xn--trentinosd-tirol-rzb.it", 2, false}, - {1, "trentinosudtirol.it", 2, false}, - {1, "xn--trentinosdtirol-7vb.it", 2, false}, - {1, "trentinosued-tirol.it", 2, false}, - {1, "trentinosuedtirol.it", 2, false}, - {1, "trentinsud-tirol.it", 2, false}, - {1, "xn--trentinsd-tirol-6vb.it", 2, false}, - {1, "trentinsudtirol.it", 2, false}, - {1, "xn--trentinsdtirol-nsb.it", 2, false}, - {1, "trentinsued-tirol.it", 2, false}, - {1, "trentinsuedtirol.it", 2, false}, - {1, "tuscany.it", 2, false}, - {1, "umb.it", 2, false}, - {1, "umbria.it", 2, false}, - {1, "val-d-aosta.it", 2, false}, - {1, "val-daosta.it", 2, false}, - {1, "vald-aosta.it", 2, false}, - {1, "valdaosta.it", 2, false}, - {1, "valle-aosta.it", 2, false}, - {1, "valle-d-aosta.it", 2, false}, - {1, "valle-daosta.it", 2, false}, - {1, "valleaosta.it", 2, false}, - {1, "valled-aosta.it", 2, false}, - {1, "valledaosta.it", 2, false}, - {1, "vallee-aoste.it", 2, false}, - {1, "xn--valle-aoste-ebb.it", 2, false}, - {1, "vallee-d-aoste.it", 2, false}, - {1, "xn--valle-d-aoste-ehb.it", 2, false}, - {1, "valleeaoste.it", 2, false}, - {1, "xn--valleaoste-e7a.it", 2, false}, - {1, "valleedaoste.it", 2, false}, - {1, "xn--valledaoste-ebb.it", 2, false}, - {1, "vao.it", 2, false}, - {1, "vda.it", 2, false}, - {1, "ven.it", 2, false}, - {1, "veneto.it", 2, false}, - {1, "ag.it", 2, false}, - {1, "agrigento.it", 2, false}, - {1, "al.it", 2, false}, - {1, "alessandria.it", 2, false}, - {1, "alto-adige.it", 2, false}, - {1, "altoadige.it", 2, false}, - {1, "an.it", 2, false}, - {1, "ancona.it", 2, false}, - {1, "andria-barletta-trani.it", 2, false}, - {1, "andria-trani-barletta.it", 2, false}, - {1, "andriabarlettatrani.it", 2, false}, - {1, "andriatranibarletta.it", 2, false}, - {1, "ao.it", 2, false}, - {1, "aosta.it", 2, false}, - {1, "aoste.it", 2, false}, - {1, "ap.it", 2, false}, - {1, "aq.it", 2, false}, - {1, "aquila.it", 2, false}, - {1, "ar.it", 2, false}, - {1, "arezzo.it", 2, false}, - {1, "ascoli-piceno.it", 2, false}, - {1, "ascolipiceno.it", 2, false}, - {1, "asti.it", 2, false}, - {1, "at.it", 2, false}, - {1, "av.it", 2, false}, - {1, "avellino.it", 2, false}, - {1, "ba.it", 2, false}, - {1, "balsan-sudtirol.it", 2, false}, - {1, "xn--balsan-sdtirol-nsb.it", 2, false}, - {1, "balsan-suedtirol.it", 2, false}, - {1, "balsan.it", 2, false}, - {1, "bari.it", 2, false}, - {1, "barletta-trani-andria.it", 2, false}, - {1, "barlettatraniandria.it", 2, false}, - {1, "belluno.it", 2, false}, - {1, "benevento.it", 2, false}, - {1, "bergamo.it", 2, false}, - {1, "bg.it", 2, false}, - {1, "bi.it", 2, false}, - {1, "biella.it", 2, false}, - {1, "bl.it", 2, false}, - {1, "bn.it", 2, false}, - {1, "bo.it", 2, false}, - {1, "bologna.it", 2, false}, - {1, "bolzano-altoadige.it", 2, false}, - {1, "bolzano.it", 2, false}, - {1, "bozen-sudtirol.it", 2, false}, - {1, "xn--bozen-sdtirol-2ob.it", 2, false}, - {1, "bozen-suedtirol.it", 2, false}, - {1, "bozen.it", 2, false}, - {1, "br.it", 2, false}, - {1, "brescia.it", 2, false}, - {1, "brindisi.it", 2, false}, - {1, "bs.it", 2, false}, - {1, "bt.it", 2, false}, - {1, "bulsan-sudtirol.it", 2, false}, - {1, "xn--bulsan-sdtirol-nsb.it", 2, false}, - {1, "bulsan-suedtirol.it", 2, false}, - {1, "bulsan.it", 2, false}, - {1, "bz.it", 2, false}, - {1, "ca.it", 2, false}, - {1, "cagliari.it", 2, false}, - {1, "caltanissetta.it", 2, false}, - {1, "campidano-medio.it", 2, false}, - {1, "campidanomedio.it", 2, false}, - {1, "campobasso.it", 2, false}, - {1, "carbonia-iglesias.it", 2, false}, - {1, "carboniaiglesias.it", 2, false}, - {1, "carrara-massa.it", 2, false}, - {1, "carraramassa.it", 2, false}, - {1, "caserta.it", 2, false}, - {1, "catania.it", 2, false}, - {1, "catanzaro.it", 2, false}, - {1, "cb.it", 2, false}, - {1, "ce.it", 2, false}, - {1, "cesena-forli.it", 2, false}, - {1, "xn--cesena-forl-mcb.it", 2, false}, - {1, "cesenaforli.it", 2, false}, - {1, "xn--cesenaforl-i8a.it", 2, false}, - {1, "ch.it", 2, false}, - {1, "chieti.it", 2, false}, - {1, "ci.it", 2, false}, - {1, "cl.it", 2, false}, - {1, "cn.it", 2, false}, - {1, "co.it", 2, false}, - {1, "como.it", 2, false}, - {1, "cosenza.it", 2, false}, - {1, "cr.it", 2, false}, - {1, "cremona.it", 2, false}, - {1, "crotone.it", 2, false}, - {1, "cs.it", 2, false}, - {1, "ct.it", 2, false}, - {1, "cuneo.it", 2, false}, - {1, "cz.it", 2, false}, - {1, "dell-ogliastra.it", 2, false}, - {1, "dellogliastra.it", 2, false}, - {1, "en.it", 2, false}, - {1, "enna.it", 2, false}, - {1, "fc.it", 2, false}, - {1, "fe.it", 2, false}, - {1, "fermo.it", 2, false}, - {1, "ferrara.it", 2, false}, - {1, "fg.it", 2, false}, - {1, "fi.it", 2, false}, - {1, "firenze.it", 2, false}, - {1, "florence.it", 2, false}, - {1, "fm.it", 2, false}, - {1, "foggia.it", 2, false}, - {1, "forli-cesena.it", 2, false}, - {1, "xn--forl-cesena-fcb.it", 2, false}, - {1, "forlicesena.it", 2, false}, - {1, "xn--forlcesena-c8a.it", 2, false}, - {1, "fr.it", 2, false}, - {1, "frosinone.it", 2, false}, - {1, "ge.it", 2, false}, - {1, "genoa.it", 2, false}, - {1, "genova.it", 2, false}, - {1, "go.it", 2, false}, - {1, "gorizia.it", 2, false}, - {1, "gr.it", 2, false}, - {1, "grosseto.it", 2, false}, - {1, "iglesias-carbonia.it", 2, false}, - {1, "iglesiascarbonia.it", 2, false}, - {1, "im.it", 2, false}, - {1, "imperia.it", 2, false}, - {1, "is.it", 2, false}, - {1, "isernia.it", 2, false}, - {1, "kr.it", 2, false}, - {1, "la-spezia.it", 2, false}, - {1, "laquila.it", 2, false}, - {1, "laspezia.it", 2, false}, - {1, "latina.it", 2, false}, - {1, "lc.it", 2, false}, - {1, "le.it", 2, false}, - {1, "lecce.it", 2, false}, - {1, "lecco.it", 2, false}, - {1, "li.it", 2, false}, - {1, "livorno.it", 2, false}, - {1, "lo.it", 2, false}, - {1, "lodi.it", 2, false}, - {1, "lt.it", 2, false}, - {1, "lu.it", 2, false}, - {1, "lucca.it", 2, false}, - {1, "macerata.it", 2, false}, - {1, "mantova.it", 2, false}, - {1, "massa-carrara.it", 2, false}, - {1, "massacarrara.it", 2, false}, - {1, "matera.it", 2, false}, - {1, "mb.it", 2, false}, - {1, "mc.it", 2, false}, - {1, "me.it", 2, false}, - {1, "medio-campidano.it", 2, false}, - {1, "mediocampidano.it", 2, false}, - {1, "messina.it", 2, false}, - {1, "mi.it", 2, false}, - {1, "milan.it", 2, false}, - {1, "milano.it", 2, false}, - {1, "mn.it", 2, false}, - {1, "mo.it", 2, false}, - {1, "modena.it", 2, false}, - {1, "monza-brianza.it", 2, false}, - {1, "monza-e-della-brianza.it", 2, false}, - {1, "monza.it", 2, false}, - {1, "monzabrianza.it", 2, false}, - {1, "monzaebrianza.it", 2, false}, - {1, "monzaedellabrianza.it", 2, false}, - {1, "ms.it", 2, false}, - {1, "mt.it", 2, false}, - {1, "na.it", 2, false}, - {1, "naples.it", 2, false}, - {1, "napoli.it", 2, false}, - {1, "no.it", 2, false}, - {1, "novara.it", 2, false}, - {1, "nu.it", 2, false}, - {1, "nuoro.it", 2, false}, - {1, "og.it", 2, false}, - {1, "ogliastra.it", 2, false}, - {1, "olbia-tempio.it", 2, false}, - {1, "olbiatempio.it", 2, false}, - {1, "or.it", 2, false}, - {1, "oristano.it", 2, false}, - {1, "ot.it", 2, false}, - {1, "pa.it", 2, false}, - {1, "padova.it", 2, false}, - {1, "padua.it", 2, false}, - {1, "palermo.it", 2, false}, - {1, "parma.it", 2, false}, - {1, "pavia.it", 2, false}, - {1, "pc.it", 2, false}, - {1, "pd.it", 2, false}, - {1, "pe.it", 2, false}, - {1, "perugia.it", 2, false}, - {1, "pesaro-urbino.it", 2, false}, - {1, "pesarourbino.it", 2, false}, - {1, "pescara.it", 2, false}, - {1, "pg.it", 2, false}, - {1, "pi.it", 2, false}, - {1, "piacenza.it", 2, false}, - {1, "pisa.it", 2, false}, - {1, "pistoia.it", 2, false}, - {1, "pn.it", 2, false}, - {1, "po.it", 2, false}, - {1, "pordenone.it", 2, false}, - {1, "potenza.it", 2, false}, - {1, "pr.it", 2, false}, - {1, "prato.it", 2, false}, - {1, "pt.it", 2, false}, - {1, "pu.it", 2, false}, - {1, "pv.it", 2, false}, - {1, "pz.it", 2, false}, - {1, "ra.it", 2, false}, - {1, "ragusa.it", 2, false}, - {1, "ravenna.it", 2, false}, - {1, "rc.it", 2, false}, - {1, "re.it", 2, false}, - {1, "reggio-calabria.it", 2, false}, - {1, "reggio-emilia.it", 2, false}, - {1, "reggiocalabria.it", 2, false}, - {1, "reggioemilia.it", 2, false}, - {1, "rg.it", 2, false}, - {1, "ri.it", 2, false}, - {1, "rieti.it", 2, false}, - {1, "rimini.it", 2, false}, - {1, "rm.it", 2, false}, - {1, "rn.it", 2, false}, - {1, "ro.it", 2, false}, - {1, "roma.it", 2, false}, - {1, "rome.it", 2, false}, - {1, "rovigo.it", 2, false}, - {1, "sa.it", 2, false}, - {1, "salerno.it", 2, false}, - {1, "sassari.it", 2, false}, - {1, "savona.it", 2, false}, - {1, "si.it", 2, false}, - {1, "siena.it", 2, false}, - {1, "siracusa.it", 2, false}, - {1, "so.it", 2, false}, - {1, "sondrio.it", 2, false}, - {1, "sp.it", 2, false}, - {1, "sr.it", 2, false}, - {1, "ss.it", 2, false}, - {1, "suedtirol.it", 2, false}, - {1, "xn--sdtirol-n2a.it", 2, false}, - {1, "sv.it", 2, false}, - {1, "ta.it", 2, false}, - {1, "taranto.it", 2, false}, - {1, "te.it", 2, false}, - {1, "tempio-olbia.it", 2, false}, - {1, "tempioolbia.it", 2, false}, - {1, "teramo.it", 2, false}, - {1, "terni.it", 2, false}, - {1, "tn.it", 2, false}, - {1, "to.it", 2, false}, - {1, "torino.it", 2, false}, - {1, "tp.it", 2, false}, - {1, "tr.it", 2, false}, - {1, "trani-andria-barletta.it", 2, false}, - {1, "trani-barletta-andria.it", 2, false}, - {1, "traniandriabarletta.it", 2, false}, - {1, "tranibarlettaandria.it", 2, false}, - {1, "trapani.it", 2, false}, - {1, "trento.it", 2, false}, - {1, "treviso.it", 2, false}, - {1, "trieste.it", 2, false}, - {1, "ts.it", 2, false}, - {1, "turin.it", 2, false}, - {1, "tv.it", 2, false}, - {1, "ud.it", 2, false}, - {1, "udine.it", 2, false}, - {1, "urbino-pesaro.it", 2, false}, - {1, "urbinopesaro.it", 2, false}, - {1, "va.it", 2, false}, - {1, "varese.it", 2, false}, - {1, "vb.it", 2, false}, - {1, "vc.it", 2, false}, - {1, "ve.it", 2, false}, - {1, "venezia.it", 2, false}, - {1, "venice.it", 2, false}, - {1, "verbania.it", 2, false}, - {1, "vercelli.it", 2, false}, - {1, "verona.it", 2, false}, - {1, "vi.it", 2, false}, - {1, "vibo-valentia.it", 2, false}, - {1, "vibovalentia.it", 2, false}, - {1, "vicenza.it", 2, false}, - {1, "viterbo.it", 2, false}, - {1, "vr.it", 2, false}, - {1, "vs.it", 2, false}, - {1, "vt.it", 2, false}, - {1, "vv.it", 2, false}, - {1, "je", 1, false}, - {1, "co.je", 2, false}, - {1, "net.je", 2, false}, - {1, "org.je", 2, false}, - {2, "jm", 2, false}, - {1, "jo", 1, false}, - {1, "com.jo", 2, false}, - {1, "org.jo", 2, false}, - {1, "net.jo", 2, false}, - {1, "edu.jo", 2, false}, - {1, "sch.jo", 2, false}, - {1, "gov.jo", 2, false}, - {1, "mil.jo", 2, false}, - {1, "name.jo", 2, false}, - {1, "jobs", 1, false}, - {1, "jp", 1, false}, - {1, "ac.jp", 2, false}, - {1, "ad.jp", 2, false}, - {1, "co.jp", 2, false}, - {1, "ed.jp", 2, false}, - {1, "go.jp", 2, false}, - {1, "gr.jp", 2, false}, - {1, "lg.jp", 2, false}, - {1, "ne.jp", 2, false}, - {1, "or.jp", 2, false}, - {1, "aichi.jp", 2, false}, - {1, "akita.jp", 2, false}, - {1, "aomori.jp", 2, false}, - {1, "chiba.jp", 2, false}, - {1, "ehime.jp", 2, false}, - {1, "fukui.jp", 2, false}, - {1, "fukuoka.jp", 2, false}, - {1, "fukushima.jp", 2, false}, - {1, "gifu.jp", 2, false}, - {1, "gunma.jp", 2, false}, - {1, "hiroshima.jp", 2, false}, - {1, "hokkaido.jp", 2, false}, - {1, "hyogo.jp", 2, false}, - {1, "ibaraki.jp", 2, false}, - {1, "ishikawa.jp", 2, false}, - {1, "iwate.jp", 2, false}, - {1, "kagawa.jp", 2, false}, - {1, "kagoshima.jp", 2, false}, - {1, "kanagawa.jp", 2, false}, - {1, "kochi.jp", 2, false}, - {1, "kumamoto.jp", 2, false}, - {1, "kyoto.jp", 2, false}, - {1, "mie.jp", 2, false}, - {1, "miyagi.jp", 2, false}, - {1, "miyazaki.jp", 2, false}, - {1, "nagano.jp", 2, false}, - {1, "nagasaki.jp", 2, false}, - {1, "nara.jp", 2, false}, - {1, "niigata.jp", 2, false}, - {1, "oita.jp", 2, false}, - {1, "okayama.jp", 2, false}, - {1, "okinawa.jp", 2, false}, - {1, "osaka.jp", 2, false}, - {1, "saga.jp", 2, false}, - {1, "saitama.jp", 2, false}, - {1, "shiga.jp", 2, false}, - {1, "shimane.jp", 2, false}, - {1, "shizuoka.jp", 2, false}, - {1, "tochigi.jp", 2, false}, - {1, "tokushima.jp", 2, false}, - {1, "tokyo.jp", 2, false}, - {1, "tottori.jp", 2, false}, - {1, "toyama.jp", 2, false}, - {1, "wakayama.jp", 2, false}, - {1, "yamagata.jp", 2, false}, - {1, "yamaguchi.jp", 2, false}, - {1, "yamanashi.jp", 2, false}, - {1, "xn--4pvxs.jp", 2, false}, - {1, "xn--vgu402c.jp", 2, false}, - {1, "xn--c3s14m.jp", 2, false}, - {1, "xn--f6qx53a.jp", 2, false}, - {1, "xn--8pvr4u.jp", 2, false}, - {1, "xn--uist22h.jp", 2, false}, - {1, "xn--djrs72d6uy.jp", 2, false}, - {1, "xn--mkru45i.jp", 2, false}, - {1, "xn--0trq7p7nn.jp", 2, false}, - {1, "xn--8ltr62k.jp", 2, false}, - {1, "xn--2m4a15e.jp", 2, false}, - {1, "xn--efvn9s.jp", 2, false}, - {1, "xn--32vp30h.jp", 2, false}, - {1, "xn--4it797k.jp", 2, false}, - {1, "xn--1lqs71d.jp", 2, false}, - {1, "xn--5rtp49c.jp", 2, false}, - {1, "xn--5js045d.jp", 2, false}, - {1, "xn--ehqz56n.jp", 2, false}, - {1, "xn--1lqs03n.jp", 2, false}, - {1, "xn--qqqt11m.jp", 2, false}, - {1, "xn--kbrq7o.jp", 2, false}, - {1, "xn--pssu33l.jp", 2, false}, - {1, "xn--ntsq17g.jp", 2, false}, - {1, "xn--uisz3g.jp", 2, false}, - {1, "xn--6btw5a.jp", 2, false}, - {1, "xn--1ctwo.jp", 2, false}, - {1, "xn--6orx2r.jp", 2, false}, - {1, "xn--rht61e.jp", 2, false}, - {1, "xn--rht27z.jp", 2, false}, - {1, "xn--djty4k.jp", 2, false}, - {1, "xn--nit225k.jp", 2, false}, - {1, "xn--rht3d.jp", 2, false}, - {1, "xn--klty5x.jp", 2, false}, - {1, "xn--kltx9a.jp", 2, false}, - {1, "xn--kltp7d.jp", 2, false}, - {1, "xn--uuwu58a.jp", 2, false}, - {1, "xn--zbx025d.jp", 2, false}, - {1, "xn--ntso0iqx3a.jp", 2, false}, - {1, "xn--elqq16h.jp", 2, false}, - {1, "xn--4it168d.jp", 2, false}, - {1, "xn--klt787d.jp", 2, false}, - {1, "xn--rny31h.jp", 2, false}, - {1, "xn--7t0a264c.jp", 2, false}, - {1, "xn--5rtq34k.jp", 2, false}, - {1, "xn--k7yn95e.jp", 2, false}, - {1, "xn--tor131o.jp", 2, false}, - {1, "xn--d5qv7z876c.jp", 2, false}, - {2, "kawasaki.jp", 3, false}, - {2, "kitakyushu.jp", 3, false}, - {2, "kobe.jp", 3, false}, - {2, "nagoya.jp", 3, false}, - {2, "sapporo.jp", 3, false}, - {2, "sendai.jp", 3, false}, - {2, "yokohama.jp", 3, false}, - {3, "city.kawasaki.jp", 3, false}, - {3, "city.kitakyushu.jp", 3, false}, - {3, "city.kobe.jp", 3, false}, - {3, "city.nagoya.jp", 3, false}, - {3, "city.sapporo.jp", 3, false}, - {3, "city.sendai.jp", 3, false}, - {3, "city.yokohama.jp", 3, false}, - {1, "aisai.aichi.jp", 3, false}, - {1, "ama.aichi.jp", 3, false}, - {1, "anjo.aichi.jp", 3, false}, - {1, "asuke.aichi.jp", 3, false}, - {1, "chiryu.aichi.jp", 3, false}, - {1, "chita.aichi.jp", 3, false}, - {1, "fuso.aichi.jp", 3, false}, - {1, "gamagori.aichi.jp", 3, false}, - {1, "handa.aichi.jp", 3, false}, - {1, "hazu.aichi.jp", 3, false}, - {1, "hekinan.aichi.jp", 3, false}, - {1, "higashiura.aichi.jp", 3, false}, - {1, "ichinomiya.aichi.jp", 3, false}, - {1, "inazawa.aichi.jp", 3, false}, - {1, "inuyama.aichi.jp", 3, false}, - {1, "isshiki.aichi.jp", 3, false}, - {1, "iwakura.aichi.jp", 3, false}, - {1, "kanie.aichi.jp", 3, false}, - {1, "kariya.aichi.jp", 3, false}, - {1, "kasugai.aichi.jp", 3, false}, - {1, "kira.aichi.jp", 3, false}, - {1, "kiyosu.aichi.jp", 3, false}, - {1, "komaki.aichi.jp", 3, false}, - {1, "konan.aichi.jp", 3, false}, - {1, "kota.aichi.jp", 3, false}, - {1, "mihama.aichi.jp", 3, false}, - {1, "miyoshi.aichi.jp", 3, false}, - {1, "nishio.aichi.jp", 3, false}, - {1, "nisshin.aichi.jp", 3, false}, - {1, "obu.aichi.jp", 3, false}, - {1, "oguchi.aichi.jp", 3, false}, - {1, "oharu.aichi.jp", 3, false}, - {1, "okazaki.aichi.jp", 3, false}, - {1, "owariasahi.aichi.jp", 3, false}, - {1, "seto.aichi.jp", 3, false}, - {1, "shikatsu.aichi.jp", 3, false}, - {1, "shinshiro.aichi.jp", 3, false}, - {1, "shitara.aichi.jp", 3, false}, - {1, "tahara.aichi.jp", 3, false}, - {1, "takahama.aichi.jp", 3, false}, - {1, "tobishima.aichi.jp", 3, false}, - {1, "toei.aichi.jp", 3, false}, - {1, "togo.aichi.jp", 3, false}, - {1, "tokai.aichi.jp", 3, false}, - {1, "tokoname.aichi.jp", 3, false}, - {1, "toyoake.aichi.jp", 3, false}, - {1, "toyohashi.aichi.jp", 3, false}, - {1, "toyokawa.aichi.jp", 3, false}, - {1, "toyone.aichi.jp", 3, false}, - {1, "toyota.aichi.jp", 3, false}, - {1, "tsushima.aichi.jp", 3, false}, - {1, "yatomi.aichi.jp", 3, false}, - {1, "akita.akita.jp", 3, false}, - {1, "daisen.akita.jp", 3, false}, - {1, "fujisato.akita.jp", 3, false}, - {1, "gojome.akita.jp", 3, false}, - {1, "hachirogata.akita.jp", 3, false}, - {1, "happou.akita.jp", 3, false}, - {1, "higashinaruse.akita.jp", 3, false}, - {1, "honjo.akita.jp", 3, false}, - {1, "honjyo.akita.jp", 3, false}, - {1, "ikawa.akita.jp", 3, false}, - {1, "kamikoani.akita.jp", 3, false}, - {1, "kamioka.akita.jp", 3, false}, - {1, "katagami.akita.jp", 3, false}, - {1, "kazuno.akita.jp", 3, false}, - {1, "kitaakita.akita.jp", 3, false}, - {1, "kosaka.akita.jp", 3, false}, - {1, "kyowa.akita.jp", 3, false}, - {1, "misato.akita.jp", 3, false}, - {1, "mitane.akita.jp", 3, false}, - {1, "moriyoshi.akita.jp", 3, false}, - {1, "nikaho.akita.jp", 3, false}, - {1, "noshiro.akita.jp", 3, false}, - {1, "odate.akita.jp", 3, false}, - {1, "oga.akita.jp", 3, false}, - {1, "ogata.akita.jp", 3, false}, - {1, "semboku.akita.jp", 3, false}, - {1, "yokote.akita.jp", 3, false}, - {1, "yurihonjo.akita.jp", 3, false}, - {1, "aomori.aomori.jp", 3, false}, - {1, "gonohe.aomori.jp", 3, false}, - {1, "hachinohe.aomori.jp", 3, false}, - {1, "hashikami.aomori.jp", 3, false}, - {1, "hiranai.aomori.jp", 3, false}, - {1, "hirosaki.aomori.jp", 3, false}, - {1, "itayanagi.aomori.jp", 3, false}, - {1, "kuroishi.aomori.jp", 3, false}, - {1, "misawa.aomori.jp", 3, false}, - {1, "mutsu.aomori.jp", 3, false}, - {1, "nakadomari.aomori.jp", 3, false}, - {1, "noheji.aomori.jp", 3, false}, - {1, "oirase.aomori.jp", 3, false}, - {1, "owani.aomori.jp", 3, false}, - {1, "rokunohe.aomori.jp", 3, false}, - {1, "sannohe.aomori.jp", 3, false}, - {1, "shichinohe.aomori.jp", 3, false}, - {1, "shingo.aomori.jp", 3, false}, - {1, "takko.aomori.jp", 3, false}, - {1, "towada.aomori.jp", 3, false}, - {1, "tsugaru.aomori.jp", 3, false}, - {1, "tsuruta.aomori.jp", 3, false}, - {1, "abiko.chiba.jp", 3, false}, - {1, "asahi.chiba.jp", 3, false}, - {1, "chonan.chiba.jp", 3, false}, - {1, "chosei.chiba.jp", 3, false}, - {1, "choshi.chiba.jp", 3, false}, - {1, "chuo.chiba.jp", 3, false}, - {1, "funabashi.chiba.jp", 3, false}, - {1, "futtsu.chiba.jp", 3, false}, - {1, "hanamigawa.chiba.jp", 3, false}, - {1, "ichihara.chiba.jp", 3, false}, - {1, "ichikawa.chiba.jp", 3, false}, - {1, "ichinomiya.chiba.jp", 3, false}, - {1, "inzai.chiba.jp", 3, false}, - {1, "isumi.chiba.jp", 3, false}, - {1, "kamagaya.chiba.jp", 3, false}, - {1, "kamogawa.chiba.jp", 3, false}, - {1, "kashiwa.chiba.jp", 3, false}, - {1, "katori.chiba.jp", 3, false}, - {1, "katsuura.chiba.jp", 3, false}, - {1, "kimitsu.chiba.jp", 3, false}, - {1, "kisarazu.chiba.jp", 3, false}, - {1, "kozaki.chiba.jp", 3, false}, - {1, "kujukuri.chiba.jp", 3, false}, - {1, "kyonan.chiba.jp", 3, false}, - {1, "matsudo.chiba.jp", 3, false}, - {1, "midori.chiba.jp", 3, false}, - {1, "mihama.chiba.jp", 3, false}, - {1, "minamiboso.chiba.jp", 3, false}, - {1, "mobara.chiba.jp", 3, false}, - {1, "mutsuzawa.chiba.jp", 3, false}, - {1, "nagara.chiba.jp", 3, false}, - {1, "nagareyama.chiba.jp", 3, false}, - {1, "narashino.chiba.jp", 3, false}, - {1, "narita.chiba.jp", 3, false}, - {1, "noda.chiba.jp", 3, false}, - {1, "oamishirasato.chiba.jp", 3, false}, - {1, "omigawa.chiba.jp", 3, false}, - {1, "onjuku.chiba.jp", 3, false}, - {1, "otaki.chiba.jp", 3, false}, - {1, "sakae.chiba.jp", 3, false}, - {1, "sakura.chiba.jp", 3, false}, - {1, "shimofusa.chiba.jp", 3, false}, - {1, "shirako.chiba.jp", 3, false}, - {1, "shiroi.chiba.jp", 3, false}, - {1, "shisui.chiba.jp", 3, false}, - {1, "sodegaura.chiba.jp", 3, false}, - {1, "sosa.chiba.jp", 3, false}, - {1, "tako.chiba.jp", 3, false}, - {1, "tateyama.chiba.jp", 3, false}, - {1, "togane.chiba.jp", 3, false}, - {1, "tohnosho.chiba.jp", 3, false}, - {1, "tomisato.chiba.jp", 3, false}, - {1, "urayasu.chiba.jp", 3, false}, - {1, "yachimata.chiba.jp", 3, false}, - {1, "yachiyo.chiba.jp", 3, false}, - {1, "yokaichiba.chiba.jp", 3, false}, - {1, "yokoshibahikari.chiba.jp", 3, false}, - {1, "yotsukaido.chiba.jp", 3, false}, - {1, "ainan.ehime.jp", 3, false}, - {1, "honai.ehime.jp", 3, false}, - {1, "ikata.ehime.jp", 3, false}, - {1, "imabari.ehime.jp", 3, false}, - {1, "iyo.ehime.jp", 3, false}, - {1, "kamijima.ehime.jp", 3, false}, - {1, "kihoku.ehime.jp", 3, false}, - {1, "kumakogen.ehime.jp", 3, false}, - {1, "masaki.ehime.jp", 3, false}, - {1, "matsuno.ehime.jp", 3, false}, - {1, "matsuyama.ehime.jp", 3, false}, - {1, "namikata.ehime.jp", 3, false}, - {1, "niihama.ehime.jp", 3, false}, - {1, "ozu.ehime.jp", 3, false}, - {1, "saijo.ehime.jp", 3, false}, - {1, "seiyo.ehime.jp", 3, false}, - {1, "shikokuchuo.ehime.jp", 3, false}, - {1, "tobe.ehime.jp", 3, false}, - {1, "toon.ehime.jp", 3, false}, - {1, "uchiko.ehime.jp", 3, false}, - {1, "uwajima.ehime.jp", 3, false}, - {1, "yawatahama.ehime.jp", 3, false}, - {1, "echizen.fukui.jp", 3, false}, - {1, "eiheiji.fukui.jp", 3, false}, - {1, "fukui.fukui.jp", 3, false}, - {1, "ikeda.fukui.jp", 3, false}, - {1, "katsuyama.fukui.jp", 3, false}, - {1, "mihama.fukui.jp", 3, false}, - {1, "minamiechizen.fukui.jp", 3, false}, - {1, "obama.fukui.jp", 3, false}, - {1, "ohi.fukui.jp", 3, false}, - {1, "ono.fukui.jp", 3, false}, - {1, "sabae.fukui.jp", 3, false}, - {1, "sakai.fukui.jp", 3, false}, - {1, "takahama.fukui.jp", 3, false}, - {1, "tsuruga.fukui.jp", 3, false}, - {1, "wakasa.fukui.jp", 3, false}, - {1, "ashiya.fukuoka.jp", 3, false}, - {1, "buzen.fukuoka.jp", 3, false}, - {1, "chikugo.fukuoka.jp", 3, false}, - {1, "chikuho.fukuoka.jp", 3, false}, - {1, "chikujo.fukuoka.jp", 3, false}, - {1, "chikushino.fukuoka.jp", 3, false}, - {1, "chikuzen.fukuoka.jp", 3, false}, - {1, "chuo.fukuoka.jp", 3, false}, - {1, "dazaifu.fukuoka.jp", 3, false}, - {1, "fukuchi.fukuoka.jp", 3, false}, - {1, "hakata.fukuoka.jp", 3, false}, - {1, "higashi.fukuoka.jp", 3, false}, - {1, "hirokawa.fukuoka.jp", 3, false}, - {1, "hisayama.fukuoka.jp", 3, false}, - {1, "iizuka.fukuoka.jp", 3, false}, - {1, "inatsuki.fukuoka.jp", 3, false}, - {1, "kaho.fukuoka.jp", 3, false}, - {1, "kasuga.fukuoka.jp", 3, false}, - {1, "kasuya.fukuoka.jp", 3, false}, - {1, "kawara.fukuoka.jp", 3, false}, - {1, "keisen.fukuoka.jp", 3, false}, - {1, "koga.fukuoka.jp", 3, false}, - {1, "kurate.fukuoka.jp", 3, false}, - {1, "kurogi.fukuoka.jp", 3, false}, - {1, "kurume.fukuoka.jp", 3, false}, - {1, "minami.fukuoka.jp", 3, false}, - {1, "miyako.fukuoka.jp", 3, false}, - {1, "miyama.fukuoka.jp", 3, false}, - {1, "miyawaka.fukuoka.jp", 3, false}, - {1, "mizumaki.fukuoka.jp", 3, false}, - {1, "munakata.fukuoka.jp", 3, false}, - {1, "nakagawa.fukuoka.jp", 3, false}, - {1, "nakama.fukuoka.jp", 3, false}, - {1, "nishi.fukuoka.jp", 3, false}, - {1, "nogata.fukuoka.jp", 3, false}, - {1, "ogori.fukuoka.jp", 3, false}, - {1, "okagaki.fukuoka.jp", 3, false}, - {1, "okawa.fukuoka.jp", 3, false}, - {1, "oki.fukuoka.jp", 3, false}, - {1, "omuta.fukuoka.jp", 3, false}, - {1, "onga.fukuoka.jp", 3, false}, - {1, "onojo.fukuoka.jp", 3, false}, - {1, "oto.fukuoka.jp", 3, false}, - {1, "saigawa.fukuoka.jp", 3, false}, - {1, "sasaguri.fukuoka.jp", 3, false}, - {1, "shingu.fukuoka.jp", 3, false}, - {1, "shinyoshitomi.fukuoka.jp", 3, false}, - {1, "shonai.fukuoka.jp", 3, false}, - {1, "soeda.fukuoka.jp", 3, false}, - {1, "sue.fukuoka.jp", 3, false}, - {1, "tachiarai.fukuoka.jp", 3, false}, - {1, "tagawa.fukuoka.jp", 3, false}, - {1, "takata.fukuoka.jp", 3, false}, - {1, "toho.fukuoka.jp", 3, false}, - {1, "toyotsu.fukuoka.jp", 3, false}, - {1, "tsuiki.fukuoka.jp", 3, false}, - {1, "ukiha.fukuoka.jp", 3, false}, - {1, "umi.fukuoka.jp", 3, false}, - {1, "usui.fukuoka.jp", 3, false}, - {1, "yamada.fukuoka.jp", 3, false}, - {1, "yame.fukuoka.jp", 3, false}, - {1, "yanagawa.fukuoka.jp", 3, false}, - {1, "yukuhashi.fukuoka.jp", 3, false}, - {1, "aizubange.fukushima.jp", 3, false}, - {1, "aizumisato.fukushima.jp", 3, false}, - {1, "aizuwakamatsu.fukushima.jp", 3, false}, - {1, "asakawa.fukushima.jp", 3, false}, - {1, "bandai.fukushima.jp", 3, false}, - {1, "date.fukushima.jp", 3, false}, - {1, "fukushima.fukushima.jp", 3, false}, - {1, "furudono.fukushima.jp", 3, false}, - {1, "futaba.fukushima.jp", 3, false}, - {1, "hanawa.fukushima.jp", 3, false}, - {1, "higashi.fukushima.jp", 3, false}, - {1, "hirata.fukushima.jp", 3, false}, - {1, "hirono.fukushima.jp", 3, false}, - {1, "iitate.fukushima.jp", 3, false}, - {1, "inawashiro.fukushima.jp", 3, false}, - {1, "ishikawa.fukushima.jp", 3, false}, - {1, "iwaki.fukushima.jp", 3, false}, - {1, "izumizaki.fukushima.jp", 3, false}, - {1, "kagamiishi.fukushima.jp", 3, false}, - {1, "kaneyama.fukushima.jp", 3, false}, - {1, "kawamata.fukushima.jp", 3, false}, - {1, "kitakata.fukushima.jp", 3, false}, - {1, "kitashiobara.fukushima.jp", 3, false}, - {1, "koori.fukushima.jp", 3, false}, - {1, "koriyama.fukushima.jp", 3, false}, - {1, "kunimi.fukushima.jp", 3, false}, - {1, "miharu.fukushima.jp", 3, false}, - {1, "mishima.fukushima.jp", 3, false}, - {1, "namie.fukushima.jp", 3, false}, - {1, "nango.fukushima.jp", 3, false}, - {1, "nishiaizu.fukushima.jp", 3, false}, - {1, "nishigo.fukushima.jp", 3, false}, - {1, "okuma.fukushima.jp", 3, false}, - {1, "omotego.fukushima.jp", 3, false}, - {1, "ono.fukushima.jp", 3, false}, - {1, "otama.fukushima.jp", 3, false}, - {1, "samegawa.fukushima.jp", 3, false}, - {1, "shimogo.fukushima.jp", 3, false}, - {1, "shirakawa.fukushima.jp", 3, false}, - {1, "showa.fukushima.jp", 3, false}, - {1, "soma.fukushima.jp", 3, false}, - {1, "sukagawa.fukushima.jp", 3, false}, - {1, "taishin.fukushima.jp", 3, false}, - {1, "tamakawa.fukushima.jp", 3, false}, - {1, "tanagura.fukushima.jp", 3, false}, - {1, "tenei.fukushima.jp", 3, false}, - {1, "yabuki.fukushima.jp", 3, false}, - {1, "yamato.fukushima.jp", 3, false}, - {1, "yamatsuri.fukushima.jp", 3, false}, - {1, "yanaizu.fukushima.jp", 3, false}, - {1, "yugawa.fukushima.jp", 3, false}, - {1, "anpachi.gifu.jp", 3, false}, - {1, "ena.gifu.jp", 3, false}, - {1, "gifu.gifu.jp", 3, false}, - {1, "ginan.gifu.jp", 3, false}, - {1, "godo.gifu.jp", 3, false}, - {1, "gujo.gifu.jp", 3, false}, - {1, "hashima.gifu.jp", 3, false}, - {1, "hichiso.gifu.jp", 3, false}, - {1, "hida.gifu.jp", 3, false}, - {1, "higashishirakawa.gifu.jp", 3, false}, - {1, "ibigawa.gifu.jp", 3, false}, - {1, "ikeda.gifu.jp", 3, false}, - {1, "kakamigahara.gifu.jp", 3, false}, - {1, "kani.gifu.jp", 3, false}, - {1, "kasahara.gifu.jp", 3, false}, - {1, "kasamatsu.gifu.jp", 3, false}, - {1, "kawaue.gifu.jp", 3, false}, - {1, "kitagata.gifu.jp", 3, false}, - {1, "mino.gifu.jp", 3, false}, - {1, "minokamo.gifu.jp", 3, false}, - {1, "mitake.gifu.jp", 3, false}, - {1, "mizunami.gifu.jp", 3, false}, - {1, "motosu.gifu.jp", 3, false}, - {1, "nakatsugawa.gifu.jp", 3, false}, - {1, "ogaki.gifu.jp", 3, false}, - {1, "sakahogi.gifu.jp", 3, false}, - {1, "seki.gifu.jp", 3, false}, - {1, "sekigahara.gifu.jp", 3, false}, - {1, "shirakawa.gifu.jp", 3, false}, - {1, "tajimi.gifu.jp", 3, false}, - {1, "takayama.gifu.jp", 3, false}, - {1, "tarui.gifu.jp", 3, false}, - {1, "toki.gifu.jp", 3, false}, - {1, "tomika.gifu.jp", 3, false}, - {1, "wanouchi.gifu.jp", 3, false}, - {1, "yamagata.gifu.jp", 3, false}, - {1, "yaotsu.gifu.jp", 3, false}, - {1, "yoro.gifu.jp", 3, false}, - {1, "annaka.gunma.jp", 3, false}, - {1, "chiyoda.gunma.jp", 3, false}, - {1, "fujioka.gunma.jp", 3, false}, - {1, "higashiagatsuma.gunma.jp", 3, false}, - {1, "isesaki.gunma.jp", 3, false}, - {1, "itakura.gunma.jp", 3, false}, - {1, "kanna.gunma.jp", 3, false}, - {1, "kanra.gunma.jp", 3, false}, - {1, "katashina.gunma.jp", 3, false}, - {1, "kawaba.gunma.jp", 3, false}, - {1, "kiryu.gunma.jp", 3, false}, - {1, "kusatsu.gunma.jp", 3, false}, - {1, "maebashi.gunma.jp", 3, false}, - {1, "meiwa.gunma.jp", 3, false}, - {1, "midori.gunma.jp", 3, false}, - {1, "minakami.gunma.jp", 3, false}, - {1, "naganohara.gunma.jp", 3, false}, - {1, "nakanojo.gunma.jp", 3, false}, - {1, "nanmoku.gunma.jp", 3, false}, - {1, "numata.gunma.jp", 3, false}, - {1, "oizumi.gunma.jp", 3, false}, - {1, "ora.gunma.jp", 3, false}, - {1, "ota.gunma.jp", 3, false}, - {1, "shibukawa.gunma.jp", 3, false}, - {1, "shimonita.gunma.jp", 3, false}, - {1, "shinto.gunma.jp", 3, false}, - {1, "showa.gunma.jp", 3, false}, - {1, "takasaki.gunma.jp", 3, false}, - {1, "takayama.gunma.jp", 3, false}, - {1, "tamamura.gunma.jp", 3, false}, - {1, "tatebayashi.gunma.jp", 3, false}, - {1, "tomioka.gunma.jp", 3, false}, - {1, "tsukiyono.gunma.jp", 3, false}, - {1, "tsumagoi.gunma.jp", 3, false}, - {1, "ueno.gunma.jp", 3, false}, - {1, "yoshioka.gunma.jp", 3, false}, - {1, "asaminami.hiroshima.jp", 3, false}, - {1, "daiwa.hiroshima.jp", 3, false}, - {1, "etajima.hiroshima.jp", 3, false}, - {1, "fuchu.hiroshima.jp", 3, false}, - {1, "fukuyama.hiroshima.jp", 3, false}, - {1, "hatsukaichi.hiroshima.jp", 3, false}, - {1, "higashihiroshima.hiroshima.jp", 3, false}, - {1, "hongo.hiroshima.jp", 3, false}, - {1, "jinsekikogen.hiroshima.jp", 3, false}, - {1, "kaita.hiroshima.jp", 3, false}, - {1, "kui.hiroshima.jp", 3, false}, - {1, "kumano.hiroshima.jp", 3, false}, - {1, "kure.hiroshima.jp", 3, false}, - {1, "mihara.hiroshima.jp", 3, false}, - {1, "miyoshi.hiroshima.jp", 3, false}, - {1, "naka.hiroshima.jp", 3, false}, - {1, "onomichi.hiroshima.jp", 3, false}, - {1, "osakikamijima.hiroshima.jp", 3, false}, - {1, "otake.hiroshima.jp", 3, false}, - {1, "saka.hiroshima.jp", 3, false}, - {1, "sera.hiroshima.jp", 3, false}, - {1, "seranishi.hiroshima.jp", 3, false}, - {1, "shinichi.hiroshima.jp", 3, false}, - {1, "shobara.hiroshima.jp", 3, false}, - {1, "takehara.hiroshima.jp", 3, false}, - {1, "abashiri.hokkaido.jp", 3, false}, - {1, "abira.hokkaido.jp", 3, false}, - {1, "aibetsu.hokkaido.jp", 3, false}, - {1, "akabira.hokkaido.jp", 3, false}, - {1, "akkeshi.hokkaido.jp", 3, false}, - {1, "asahikawa.hokkaido.jp", 3, false}, - {1, "ashibetsu.hokkaido.jp", 3, false}, - {1, "ashoro.hokkaido.jp", 3, false}, - {1, "assabu.hokkaido.jp", 3, false}, - {1, "atsuma.hokkaido.jp", 3, false}, - {1, "bibai.hokkaido.jp", 3, false}, - {1, "biei.hokkaido.jp", 3, false}, - {1, "bifuka.hokkaido.jp", 3, false}, - {1, "bihoro.hokkaido.jp", 3, false}, - {1, "biratori.hokkaido.jp", 3, false}, - {1, "chippubetsu.hokkaido.jp", 3, false}, - {1, "chitose.hokkaido.jp", 3, false}, - {1, "date.hokkaido.jp", 3, false}, - {1, "ebetsu.hokkaido.jp", 3, false}, - {1, "embetsu.hokkaido.jp", 3, false}, - {1, "eniwa.hokkaido.jp", 3, false}, - {1, "erimo.hokkaido.jp", 3, false}, - {1, "esan.hokkaido.jp", 3, false}, - {1, "esashi.hokkaido.jp", 3, false}, - {1, "fukagawa.hokkaido.jp", 3, false}, - {1, "fukushima.hokkaido.jp", 3, false}, - {1, "furano.hokkaido.jp", 3, false}, - {1, "furubira.hokkaido.jp", 3, false}, - {1, "haboro.hokkaido.jp", 3, false}, - {1, "hakodate.hokkaido.jp", 3, false}, - {1, "hamatonbetsu.hokkaido.jp", 3, false}, - {1, "hidaka.hokkaido.jp", 3, false}, - {1, "higashikagura.hokkaido.jp", 3, false}, - {1, "higashikawa.hokkaido.jp", 3, false}, - {1, "hiroo.hokkaido.jp", 3, false}, - {1, "hokuryu.hokkaido.jp", 3, false}, - {1, "hokuto.hokkaido.jp", 3, false}, - {1, "honbetsu.hokkaido.jp", 3, false}, - {1, "horokanai.hokkaido.jp", 3, false}, - {1, "horonobe.hokkaido.jp", 3, false}, - {1, "ikeda.hokkaido.jp", 3, false}, - {1, "imakane.hokkaido.jp", 3, false}, - {1, "ishikari.hokkaido.jp", 3, false}, - {1, "iwamizawa.hokkaido.jp", 3, false}, - {1, "iwanai.hokkaido.jp", 3, false}, - {1, "kamifurano.hokkaido.jp", 3, false}, - {1, "kamikawa.hokkaido.jp", 3, false}, - {1, "kamishihoro.hokkaido.jp", 3, false}, - {1, "kamisunagawa.hokkaido.jp", 3, false}, - {1, "kamoenai.hokkaido.jp", 3, false}, - {1, "kayabe.hokkaido.jp", 3, false}, - {1, "kembuchi.hokkaido.jp", 3, false}, - {1, "kikonai.hokkaido.jp", 3, false}, - {1, "kimobetsu.hokkaido.jp", 3, false}, - {1, "kitahiroshima.hokkaido.jp", 3, false}, - {1, "kitami.hokkaido.jp", 3, false}, - {1, "kiyosato.hokkaido.jp", 3, false}, - {1, "koshimizu.hokkaido.jp", 3, false}, - {1, "kunneppu.hokkaido.jp", 3, false}, - {1, "kuriyama.hokkaido.jp", 3, false}, - {1, "kuromatsunai.hokkaido.jp", 3, false}, - {1, "kushiro.hokkaido.jp", 3, false}, - {1, "kutchan.hokkaido.jp", 3, false}, - {1, "kyowa.hokkaido.jp", 3, false}, - {1, "mashike.hokkaido.jp", 3, false}, - {1, "matsumae.hokkaido.jp", 3, false}, - {1, "mikasa.hokkaido.jp", 3, false}, - {1, "minamifurano.hokkaido.jp", 3, false}, - {1, "mombetsu.hokkaido.jp", 3, false}, - {1, "moseushi.hokkaido.jp", 3, false}, - {1, "mukawa.hokkaido.jp", 3, false}, - {1, "muroran.hokkaido.jp", 3, false}, - {1, "naie.hokkaido.jp", 3, false}, - {1, "nakagawa.hokkaido.jp", 3, false}, - {1, "nakasatsunai.hokkaido.jp", 3, false}, - {1, "nakatombetsu.hokkaido.jp", 3, false}, - {1, "nanae.hokkaido.jp", 3, false}, - {1, "nanporo.hokkaido.jp", 3, false}, - {1, "nayoro.hokkaido.jp", 3, false}, - {1, "nemuro.hokkaido.jp", 3, false}, - {1, "niikappu.hokkaido.jp", 3, false}, - {1, "niki.hokkaido.jp", 3, false}, - {1, "nishiokoppe.hokkaido.jp", 3, false}, - {1, "noboribetsu.hokkaido.jp", 3, false}, - {1, "numata.hokkaido.jp", 3, false}, - {1, "obihiro.hokkaido.jp", 3, false}, - {1, "obira.hokkaido.jp", 3, false}, - {1, "oketo.hokkaido.jp", 3, false}, - {1, "okoppe.hokkaido.jp", 3, false}, - {1, "otaru.hokkaido.jp", 3, false}, - {1, "otobe.hokkaido.jp", 3, false}, - {1, "otofuke.hokkaido.jp", 3, false}, - {1, "otoineppu.hokkaido.jp", 3, false}, - {1, "oumu.hokkaido.jp", 3, false}, - {1, "ozora.hokkaido.jp", 3, false}, - {1, "pippu.hokkaido.jp", 3, false}, - {1, "rankoshi.hokkaido.jp", 3, false}, - {1, "rebun.hokkaido.jp", 3, false}, - {1, "rikubetsu.hokkaido.jp", 3, false}, - {1, "rishiri.hokkaido.jp", 3, false}, - {1, "rishirifuji.hokkaido.jp", 3, false}, - {1, "saroma.hokkaido.jp", 3, false}, - {1, "sarufutsu.hokkaido.jp", 3, false}, - {1, "shakotan.hokkaido.jp", 3, false}, - {1, "shari.hokkaido.jp", 3, false}, - {1, "shibecha.hokkaido.jp", 3, false}, - {1, "shibetsu.hokkaido.jp", 3, false}, - {1, "shikabe.hokkaido.jp", 3, false}, - {1, "shikaoi.hokkaido.jp", 3, false}, - {1, "shimamaki.hokkaido.jp", 3, false}, - {1, "shimizu.hokkaido.jp", 3, false}, - {1, "shimokawa.hokkaido.jp", 3, false}, - {1, "shinshinotsu.hokkaido.jp", 3, false}, - {1, "shintoku.hokkaido.jp", 3, false}, - {1, "shiranuka.hokkaido.jp", 3, false}, - {1, "shiraoi.hokkaido.jp", 3, false}, - {1, "shiriuchi.hokkaido.jp", 3, false}, - {1, "sobetsu.hokkaido.jp", 3, false}, - {1, "sunagawa.hokkaido.jp", 3, false}, - {1, "taiki.hokkaido.jp", 3, false}, - {1, "takasu.hokkaido.jp", 3, false}, - {1, "takikawa.hokkaido.jp", 3, false}, - {1, "takinoue.hokkaido.jp", 3, false}, - {1, "teshikaga.hokkaido.jp", 3, false}, - {1, "tobetsu.hokkaido.jp", 3, false}, - {1, "tohma.hokkaido.jp", 3, false}, - {1, "tomakomai.hokkaido.jp", 3, false}, - {1, "tomari.hokkaido.jp", 3, false}, - {1, "toya.hokkaido.jp", 3, false}, - {1, "toyako.hokkaido.jp", 3, false}, - {1, "toyotomi.hokkaido.jp", 3, false}, - {1, "toyoura.hokkaido.jp", 3, false}, - {1, "tsubetsu.hokkaido.jp", 3, false}, - {1, "tsukigata.hokkaido.jp", 3, false}, - {1, "urakawa.hokkaido.jp", 3, false}, - {1, "urausu.hokkaido.jp", 3, false}, - {1, "uryu.hokkaido.jp", 3, false}, - {1, "utashinai.hokkaido.jp", 3, false}, - {1, "wakkanai.hokkaido.jp", 3, false}, - {1, "wassamu.hokkaido.jp", 3, false}, - {1, "yakumo.hokkaido.jp", 3, false}, - {1, "yoichi.hokkaido.jp", 3, false}, - {1, "aioi.hyogo.jp", 3, false}, - {1, "akashi.hyogo.jp", 3, false}, - {1, "ako.hyogo.jp", 3, false}, - {1, "amagasaki.hyogo.jp", 3, false}, - {1, "aogaki.hyogo.jp", 3, false}, - {1, "asago.hyogo.jp", 3, false}, - {1, "ashiya.hyogo.jp", 3, false}, - {1, "awaji.hyogo.jp", 3, false}, - {1, "fukusaki.hyogo.jp", 3, false}, - {1, "goshiki.hyogo.jp", 3, false}, - {1, "harima.hyogo.jp", 3, false}, - {1, "himeji.hyogo.jp", 3, false}, - {1, "ichikawa.hyogo.jp", 3, false}, - {1, "inagawa.hyogo.jp", 3, false}, - {1, "itami.hyogo.jp", 3, false}, - {1, "kakogawa.hyogo.jp", 3, false}, - {1, "kamigori.hyogo.jp", 3, false}, - {1, "kamikawa.hyogo.jp", 3, false}, - {1, "kasai.hyogo.jp", 3, false}, - {1, "kasuga.hyogo.jp", 3, false}, - {1, "kawanishi.hyogo.jp", 3, false}, - {1, "miki.hyogo.jp", 3, false}, - {1, "minamiawaji.hyogo.jp", 3, false}, - {1, "nishinomiya.hyogo.jp", 3, false}, - {1, "nishiwaki.hyogo.jp", 3, false}, - {1, "ono.hyogo.jp", 3, false}, - {1, "sanda.hyogo.jp", 3, false}, - {1, "sannan.hyogo.jp", 3, false}, - {1, "sasayama.hyogo.jp", 3, false}, - {1, "sayo.hyogo.jp", 3, false}, - {1, "shingu.hyogo.jp", 3, false}, - {1, "shinonsen.hyogo.jp", 3, false}, - {1, "shiso.hyogo.jp", 3, false}, - {1, "sumoto.hyogo.jp", 3, false}, - {1, "taishi.hyogo.jp", 3, false}, - {1, "taka.hyogo.jp", 3, false}, - {1, "takarazuka.hyogo.jp", 3, false}, - {1, "takasago.hyogo.jp", 3, false}, - {1, "takino.hyogo.jp", 3, false}, - {1, "tamba.hyogo.jp", 3, false}, - {1, "tatsuno.hyogo.jp", 3, false}, - {1, "toyooka.hyogo.jp", 3, false}, - {1, "yabu.hyogo.jp", 3, false}, - {1, "yashiro.hyogo.jp", 3, false}, - {1, "yoka.hyogo.jp", 3, false}, - {1, "yokawa.hyogo.jp", 3, false}, - {1, "ami.ibaraki.jp", 3, false}, - {1, "asahi.ibaraki.jp", 3, false}, - {1, "bando.ibaraki.jp", 3, false}, - {1, "chikusei.ibaraki.jp", 3, false}, - {1, "daigo.ibaraki.jp", 3, false}, - {1, "fujishiro.ibaraki.jp", 3, false}, - {1, "hitachi.ibaraki.jp", 3, false}, - {1, "hitachinaka.ibaraki.jp", 3, false}, - {1, "hitachiomiya.ibaraki.jp", 3, false}, - {1, "hitachiota.ibaraki.jp", 3, false}, - {1, "ibaraki.ibaraki.jp", 3, false}, - {1, "ina.ibaraki.jp", 3, false}, - {1, "inashiki.ibaraki.jp", 3, false}, - {1, "itako.ibaraki.jp", 3, false}, - {1, "iwama.ibaraki.jp", 3, false}, - {1, "joso.ibaraki.jp", 3, false}, - {1, "kamisu.ibaraki.jp", 3, false}, - {1, "kasama.ibaraki.jp", 3, false}, - {1, "kashima.ibaraki.jp", 3, false}, - {1, "kasumigaura.ibaraki.jp", 3, false}, - {1, "koga.ibaraki.jp", 3, false}, - {1, "miho.ibaraki.jp", 3, false}, - {1, "mito.ibaraki.jp", 3, false}, - {1, "moriya.ibaraki.jp", 3, false}, - {1, "naka.ibaraki.jp", 3, false}, - {1, "namegata.ibaraki.jp", 3, false}, - {1, "oarai.ibaraki.jp", 3, false}, - {1, "ogawa.ibaraki.jp", 3, false}, - {1, "omitama.ibaraki.jp", 3, false}, - {1, "ryugasaki.ibaraki.jp", 3, false}, - {1, "sakai.ibaraki.jp", 3, false}, - {1, "sakuragawa.ibaraki.jp", 3, false}, - {1, "shimodate.ibaraki.jp", 3, false}, - {1, "shimotsuma.ibaraki.jp", 3, false}, - {1, "shirosato.ibaraki.jp", 3, false}, - {1, "sowa.ibaraki.jp", 3, false}, - {1, "suifu.ibaraki.jp", 3, false}, - {1, "takahagi.ibaraki.jp", 3, false}, - {1, "tamatsukuri.ibaraki.jp", 3, false}, - {1, "tokai.ibaraki.jp", 3, false}, - {1, "tomobe.ibaraki.jp", 3, false}, - {1, "tone.ibaraki.jp", 3, false}, - {1, "toride.ibaraki.jp", 3, false}, - {1, "tsuchiura.ibaraki.jp", 3, false}, - {1, "tsukuba.ibaraki.jp", 3, false}, - {1, "uchihara.ibaraki.jp", 3, false}, - {1, "ushiku.ibaraki.jp", 3, false}, - {1, "yachiyo.ibaraki.jp", 3, false}, - {1, "yamagata.ibaraki.jp", 3, false}, - {1, "yawara.ibaraki.jp", 3, false}, - {1, "yuki.ibaraki.jp", 3, false}, - {1, "anamizu.ishikawa.jp", 3, false}, - {1, "hakui.ishikawa.jp", 3, false}, - {1, "hakusan.ishikawa.jp", 3, false}, - {1, "kaga.ishikawa.jp", 3, false}, - {1, "kahoku.ishikawa.jp", 3, false}, - {1, "kanazawa.ishikawa.jp", 3, false}, - {1, "kawakita.ishikawa.jp", 3, false}, - {1, "komatsu.ishikawa.jp", 3, false}, - {1, "nakanoto.ishikawa.jp", 3, false}, - {1, "nanao.ishikawa.jp", 3, false}, - {1, "nomi.ishikawa.jp", 3, false}, - {1, "nonoichi.ishikawa.jp", 3, false}, - {1, "noto.ishikawa.jp", 3, false}, - {1, "shika.ishikawa.jp", 3, false}, - {1, "suzu.ishikawa.jp", 3, false}, - {1, "tsubata.ishikawa.jp", 3, false}, - {1, "tsurugi.ishikawa.jp", 3, false}, - {1, "uchinada.ishikawa.jp", 3, false}, - {1, "wajima.ishikawa.jp", 3, false}, - {1, "fudai.iwate.jp", 3, false}, - {1, "fujisawa.iwate.jp", 3, false}, - {1, "hanamaki.iwate.jp", 3, false}, - {1, "hiraizumi.iwate.jp", 3, false}, - {1, "hirono.iwate.jp", 3, false}, - {1, "ichinohe.iwate.jp", 3, false}, - {1, "ichinoseki.iwate.jp", 3, false}, - {1, "iwaizumi.iwate.jp", 3, false}, - {1, "iwate.iwate.jp", 3, false}, - {1, "joboji.iwate.jp", 3, false}, - {1, "kamaishi.iwate.jp", 3, false}, - {1, "kanegasaki.iwate.jp", 3, false}, - {1, "karumai.iwate.jp", 3, false}, - {1, "kawai.iwate.jp", 3, false}, - {1, "kitakami.iwate.jp", 3, false}, - {1, "kuji.iwate.jp", 3, false}, - {1, "kunohe.iwate.jp", 3, false}, - {1, "kuzumaki.iwate.jp", 3, false}, - {1, "miyako.iwate.jp", 3, false}, - {1, "mizusawa.iwate.jp", 3, false}, - {1, "morioka.iwate.jp", 3, false}, - {1, "ninohe.iwate.jp", 3, false}, - {1, "noda.iwate.jp", 3, false}, - {1, "ofunato.iwate.jp", 3, false}, - {1, "oshu.iwate.jp", 3, false}, - {1, "otsuchi.iwate.jp", 3, false}, - {1, "rikuzentakata.iwate.jp", 3, false}, - {1, "shiwa.iwate.jp", 3, false}, - {1, "shizukuishi.iwate.jp", 3, false}, - {1, "sumita.iwate.jp", 3, false}, - {1, "tanohata.iwate.jp", 3, false}, - {1, "tono.iwate.jp", 3, false}, - {1, "yahaba.iwate.jp", 3, false}, - {1, "yamada.iwate.jp", 3, false}, - {1, "ayagawa.kagawa.jp", 3, false}, - {1, "higashikagawa.kagawa.jp", 3, false}, - {1, "kanonji.kagawa.jp", 3, false}, - {1, "kotohira.kagawa.jp", 3, false}, - {1, "manno.kagawa.jp", 3, false}, - {1, "marugame.kagawa.jp", 3, false}, - {1, "mitoyo.kagawa.jp", 3, false}, - {1, "naoshima.kagawa.jp", 3, false}, - {1, "sanuki.kagawa.jp", 3, false}, - {1, "tadotsu.kagawa.jp", 3, false}, - {1, "takamatsu.kagawa.jp", 3, false}, - {1, "tonosho.kagawa.jp", 3, false}, - {1, "uchinomi.kagawa.jp", 3, false}, - {1, "utazu.kagawa.jp", 3, false}, - {1, "zentsuji.kagawa.jp", 3, false}, - {1, "akune.kagoshima.jp", 3, false}, - {1, "amami.kagoshima.jp", 3, false}, - {1, "hioki.kagoshima.jp", 3, false}, - {1, "isa.kagoshima.jp", 3, false}, - {1, "isen.kagoshima.jp", 3, false}, - {1, "izumi.kagoshima.jp", 3, false}, - {1, "kagoshima.kagoshima.jp", 3, false}, - {1, "kanoya.kagoshima.jp", 3, false}, - {1, "kawanabe.kagoshima.jp", 3, false}, - {1, "kinko.kagoshima.jp", 3, false}, - {1, "kouyama.kagoshima.jp", 3, false}, - {1, "makurazaki.kagoshima.jp", 3, false}, - {1, "matsumoto.kagoshima.jp", 3, false}, - {1, "minamitane.kagoshima.jp", 3, false}, - {1, "nakatane.kagoshima.jp", 3, false}, - {1, "nishinoomote.kagoshima.jp", 3, false}, - {1, "satsumasendai.kagoshima.jp", 3, false}, - {1, "soo.kagoshima.jp", 3, false}, - {1, "tarumizu.kagoshima.jp", 3, false}, - {1, "yusui.kagoshima.jp", 3, false}, - {1, "aikawa.kanagawa.jp", 3, false}, - {1, "atsugi.kanagawa.jp", 3, false}, - {1, "ayase.kanagawa.jp", 3, false}, - {1, "chigasaki.kanagawa.jp", 3, false}, - {1, "ebina.kanagawa.jp", 3, false}, - {1, "fujisawa.kanagawa.jp", 3, false}, - {1, "hadano.kanagawa.jp", 3, false}, - {1, "hakone.kanagawa.jp", 3, false}, - {1, "hiratsuka.kanagawa.jp", 3, false}, - {1, "isehara.kanagawa.jp", 3, false}, - {1, "kaisei.kanagawa.jp", 3, false}, - {1, "kamakura.kanagawa.jp", 3, false}, - {1, "kiyokawa.kanagawa.jp", 3, false}, - {1, "matsuda.kanagawa.jp", 3, false}, - {1, "minamiashigara.kanagawa.jp", 3, false}, - {1, "miura.kanagawa.jp", 3, false}, - {1, "nakai.kanagawa.jp", 3, false}, - {1, "ninomiya.kanagawa.jp", 3, false}, - {1, "odawara.kanagawa.jp", 3, false}, - {1, "oi.kanagawa.jp", 3, false}, - {1, "oiso.kanagawa.jp", 3, false}, - {1, "sagamihara.kanagawa.jp", 3, false}, - {1, "samukawa.kanagawa.jp", 3, false}, - {1, "tsukui.kanagawa.jp", 3, false}, - {1, "yamakita.kanagawa.jp", 3, false}, - {1, "yamato.kanagawa.jp", 3, false}, - {1, "yokosuka.kanagawa.jp", 3, false}, - {1, "yugawara.kanagawa.jp", 3, false}, - {1, "zama.kanagawa.jp", 3, false}, - {1, "zushi.kanagawa.jp", 3, false}, - {1, "aki.kochi.jp", 3, false}, - {1, "geisei.kochi.jp", 3, false}, - {1, "hidaka.kochi.jp", 3, false}, - {1, "higashitsuno.kochi.jp", 3, false}, - {1, "ino.kochi.jp", 3, false}, - {1, "kagami.kochi.jp", 3, false}, - {1, "kami.kochi.jp", 3, false}, - {1, "kitagawa.kochi.jp", 3, false}, - {1, "kochi.kochi.jp", 3, false}, - {1, "mihara.kochi.jp", 3, false}, - {1, "motoyama.kochi.jp", 3, false}, - {1, "muroto.kochi.jp", 3, false}, - {1, "nahari.kochi.jp", 3, false}, - {1, "nakamura.kochi.jp", 3, false}, - {1, "nankoku.kochi.jp", 3, false}, - {1, "nishitosa.kochi.jp", 3, false}, - {1, "niyodogawa.kochi.jp", 3, false}, - {1, "ochi.kochi.jp", 3, false}, - {1, "okawa.kochi.jp", 3, false}, - {1, "otoyo.kochi.jp", 3, false}, - {1, "otsuki.kochi.jp", 3, false}, - {1, "sakawa.kochi.jp", 3, false}, - {1, "sukumo.kochi.jp", 3, false}, - {1, "susaki.kochi.jp", 3, false}, - {1, "tosa.kochi.jp", 3, false}, - {1, "tosashimizu.kochi.jp", 3, false}, - {1, "toyo.kochi.jp", 3, false}, - {1, "tsuno.kochi.jp", 3, false}, - {1, "umaji.kochi.jp", 3, false}, - {1, "yasuda.kochi.jp", 3, false}, - {1, "yusuhara.kochi.jp", 3, false}, - {1, "amakusa.kumamoto.jp", 3, false}, - {1, "arao.kumamoto.jp", 3, false}, - {1, "aso.kumamoto.jp", 3, false}, - {1, "choyo.kumamoto.jp", 3, false}, - {1, "gyokuto.kumamoto.jp", 3, false}, - {1, "kamiamakusa.kumamoto.jp", 3, false}, - {1, "kikuchi.kumamoto.jp", 3, false}, - {1, "kumamoto.kumamoto.jp", 3, false}, - {1, "mashiki.kumamoto.jp", 3, false}, - {1, "mifune.kumamoto.jp", 3, false}, - {1, "minamata.kumamoto.jp", 3, false}, - {1, "minamioguni.kumamoto.jp", 3, false}, - {1, "nagasu.kumamoto.jp", 3, false}, - {1, "nishihara.kumamoto.jp", 3, false}, - {1, "oguni.kumamoto.jp", 3, false}, - {1, "ozu.kumamoto.jp", 3, false}, - {1, "sumoto.kumamoto.jp", 3, false}, - {1, "takamori.kumamoto.jp", 3, false}, - {1, "uki.kumamoto.jp", 3, false}, - {1, "uto.kumamoto.jp", 3, false}, - {1, "yamaga.kumamoto.jp", 3, false}, - {1, "yamato.kumamoto.jp", 3, false}, - {1, "yatsushiro.kumamoto.jp", 3, false}, - {1, "ayabe.kyoto.jp", 3, false}, - {1, "fukuchiyama.kyoto.jp", 3, false}, - {1, "higashiyama.kyoto.jp", 3, false}, - {1, "ide.kyoto.jp", 3, false}, - {1, "ine.kyoto.jp", 3, false}, - {1, "joyo.kyoto.jp", 3, false}, - {1, "kameoka.kyoto.jp", 3, false}, - {1, "kamo.kyoto.jp", 3, false}, - {1, "kita.kyoto.jp", 3, false}, - {1, "kizu.kyoto.jp", 3, false}, - {1, "kumiyama.kyoto.jp", 3, false}, - {1, "kyotamba.kyoto.jp", 3, false}, - {1, "kyotanabe.kyoto.jp", 3, false}, - {1, "kyotango.kyoto.jp", 3, false}, - {1, "maizuru.kyoto.jp", 3, false}, - {1, "minami.kyoto.jp", 3, false}, - {1, "minamiyamashiro.kyoto.jp", 3, false}, - {1, "miyazu.kyoto.jp", 3, false}, - {1, "muko.kyoto.jp", 3, false}, - {1, "nagaokakyo.kyoto.jp", 3, false}, - {1, "nakagyo.kyoto.jp", 3, false}, - {1, "nantan.kyoto.jp", 3, false}, - {1, "oyamazaki.kyoto.jp", 3, false}, - {1, "sakyo.kyoto.jp", 3, false}, - {1, "seika.kyoto.jp", 3, false}, - {1, "tanabe.kyoto.jp", 3, false}, - {1, "uji.kyoto.jp", 3, false}, - {1, "ujitawara.kyoto.jp", 3, false}, - {1, "wazuka.kyoto.jp", 3, false}, - {1, "yamashina.kyoto.jp", 3, false}, - {1, "yawata.kyoto.jp", 3, false}, - {1, "asahi.mie.jp", 3, false}, - {1, "inabe.mie.jp", 3, false}, - {1, "ise.mie.jp", 3, false}, - {1, "kameyama.mie.jp", 3, false}, - {1, "kawagoe.mie.jp", 3, false}, - {1, "kiho.mie.jp", 3, false}, - {1, "kisosaki.mie.jp", 3, false}, - {1, "kiwa.mie.jp", 3, false}, - {1, "komono.mie.jp", 3, false}, - {1, "kumano.mie.jp", 3, false}, - {1, "kuwana.mie.jp", 3, false}, - {1, "matsusaka.mie.jp", 3, false}, - {1, "meiwa.mie.jp", 3, false}, - {1, "mihama.mie.jp", 3, false}, - {1, "minamiise.mie.jp", 3, false}, - {1, "misugi.mie.jp", 3, false}, - {1, "miyama.mie.jp", 3, false}, - {1, "nabari.mie.jp", 3, false}, - {1, "shima.mie.jp", 3, false}, - {1, "suzuka.mie.jp", 3, false}, - {1, "tado.mie.jp", 3, false}, - {1, "taiki.mie.jp", 3, false}, - {1, "taki.mie.jp", 3, false}, - {1, "tamaki.mie.jp", 3, false}, - {1, "toba.mie.jp", 3, false}, - {1, "tsu.mie.jp", 3, false}, - {1, "udono.mie.jp", 3, false}, - {1, "ureshino.mie.jp", 3, false}, - {1, "watarai.mie.jp", 3, false}, - {1, "yokkaichi.mie.jp", 3, false}, - {1, "furukawa.miyagi.jp", 3, false}, - {1, "higashimatsushima.miyagi.jp", 3, false}, - {1, "ishinomaki.miyagi.jp", 3, false}, - {1, "iwanuma.miyagi.jp", 3, false}, - {1, "kakuda.miyagi.jp", 3, false}, - {1, "kami.miyagi.jp", 3, false}, - {1, "kawasaki.miyagi.jp", 3, false}, - {1, "marumori.miyagi.jp", 3, false}, - {1, "matsushima.miyagi.jp", 3, false}, - {1, "minamisanriku.miyagi.jp", 3, false}, - {1, "misato.miyagi.jp", 3, false}, - {1, "murata.miyagi.jp", 3, false}, - {1, "natori.miyagi.jp", 3, false}, - {1, "ogawara.miyagi.jp", 3, false}, - {1, "ohira.miyagi.jp", 3, false}, - {1, "onagawa.miyagi.jp", 3, false}, - {1, "osaki.miyagi.jp", 3, false}, - {1, "rifu.miyagi.jp", 3, false}, - {1, "semine.miyagi.jp", 3, false}, - {1, "shibata.miyagi.jp", 3, false}, - {1, "shichikashuku.miyagi.jp", 3, false}, - {1, "shikama.miyagi.jp", 3, false}, - {1, "shiogama.miyagi.jp", 3, false}, - {1, "shiroishi.miyagi.jp", 3, false}, - {1, "tagajo.miyagi.jp", 3, false}, - {1, "taiwa.miyagi.jp", 3, false}, - {1, "tome.miyagi.jp", 3, false}, - {1, "tomiya.miyagi.jp", 3, false}, - {1, "wakuya.miyagi.jp", 3, false}, - {1, "watari.miyagi.jp", 3, false}, - {1, "yamamoto.miyagi.jp", 3, false}, - {1, "zao.miyagi.jp", 3, false}, - {1, "aya.miyazaki.jp", 3, false}, - {1, "ebino.miyazaki.jp", 3, false}, - {1, "gokase.miyazaki.jp", 3, false}, - {1, "hyuga.miyazaki.jp", 3, false}, - {1, "kadogawa.miyazaki.jp", 3, false}, - {1, "kawaminami.miyazaki.jp", 3, false}, - {1, "kijo.miyazaki.jp", 3, false}, - {1, "kitagawa.miyazaki.jp", 3, false}, - {1, "kitakata.miyazaki.jp", 3, false}, - {1, "kitaura.miyazaki.jp", 3, false}, - {1, "kobayashi.miyazaki.jp", 3, false}, - {1, "kunitomi.miyazaki.jp", 3, false}, - {1, "kushima.miyazaki.jp", 3, false}, - {1, "mimata.miyazaki.jp", 3, false}, - {1, "miyakonojo.miyazaki.jp", 3, false}, - {1, "miyazaki.miyazaki.jp", 3, false}, - {1, "morotsuka.miyazaki.jp", 3, false}, - {1, "nichinan.miyazaki.jp", 3, false}, - {1, "nishimera.miyazaki.jp", 3, false}, - {1, "nobeoka.miyazaki.jp", 3, false}, - {1, "saito.miyazaki.jp", 3, false}, - {1, "shiiba.miyazaki.jp", 3, false}, - {1, "shintomi.miyazaki.jp", 3, false}, - {1, "takaharu.miyazaki.jp", 3, false}, - {1, "takanabe.miyazaki.jp", 3, false}, - {1, "takazaki.miyazaki.jp", 3, false}, - {1, "tsuno.miyazaki.jp", 3, false}, - {1, "achi.nagano.jp", 3, false}, - {1, "agematsu.nagano.jp", 3, false}, - {1, "anan.nagano.jp", 3, false}, - {1, "aoki.nagano.jp", 3, false}, - {1, "asahi.nagano.jp", 3, false}, - {1, "azumino.nagano.jp", 3, false}, - {1, "chikuhoku.nagano.jp", 3, false}, - {1, "chikuma.nagano.jp", 3, false}, - {1, "chino.nagano.jp", 3, false}, - {1, "fujimi.nagano.jp", 3, false}, - {1, "hakuba.nagano.jp", 3, false}, - {1, "hara.nagano.jp", 3, false}, - {1, "hiraya.nagano.jp", 3, false}, - {1, "iida.nagano.jp", 3, false}, - {1, "iijima.nagano.jp", 3, false}, - {1, "iiyama.nagano.jp", 3, false}, - {1, "iizuna.nagano.jp", 3, false}, - {1, "ikeda.nagano.jp", 3, false}, - {1, "ikusaka.nagano.jp", 3, false}, - {1, "ina.nagano.jp", 3, false}, - {1, "karuizawa.nagano.jp", 3, false}, - {1, "kawakami.nagano.jp", 3, false}, - {1, "kiso.nagano.jp", 3, false}, - {1, "kisofukushima.nagano.jp", 3, false}, - {1, "kitaaiki.nagano.jp", 3, false}, - {1, "komagane.nagano.jp", 3, false}, - {1, "komoro.nagano.jp", 3, false}, - {1, "matsukawa.nagano.jp", 3, false}, - {1, "matsumoto.nagano.jp", 3, false}, - {1, "miasa.nagano.jp", 3, false}, - {1, "minamiaiki.nagano.jp", 3, false}, - {1, "minamimaki.nagano.jp", 3, false}, - {1, "minamiminowa.nagano.jp", 3, false}, - {1, "minowa.nagano.jp", 3, false}, - {1, "miyada.nagano.jp", 3, false}, - {1, "miyota.nagano.jp", 3, false}, - {1, "mochizuki.nagano.jp", 3, false}, - {1, "nagano.nagano.jp", 3, false}, - {1, "nagawa.nagano.jp", 3, false}, - {1, "nagiso.nagano.jp", 3, false}, - {1, "nakagawa.nagano.jp", 3, false}, - {1, "nakano.nagano.jp", 3, false}, - {1, "nozawaonsen.nagano.jp", 3, false}, - {1, "obuse.nagano.jp", 3, false}, - {1, "ogawa.nagano.jp", 3, false}, - {1, "okaya.nagano.jp", 3, false}, - {1, "omachi.nagano.jp", 3, false}, - {1, "omi.nagano.jp", 3, false}, - {1, "ookuwa.nagano.jp", 3, false}, - {1, "ooshika.nagano.jp", 3, false}, - {1, "otaki.nagano.jp", 3, false}, - {1, "otari.nagano.jp", 3, false}, - {1, "sakae.nagano.jp", 3, false}, - {1, "sakaki.nagano.jp", 3, false}, - {1, "saku.nagano.jp", 3, false}, - {1, "sakuho.nagano.jp", 3, false}, - {1, "shimosuwa.nagano.jp", 3, false}, - {1, "shinanomachi.nagano.jp", 3, false}, - {1, "shiojiri.nagano.jp", 3, false}, - {1, "suwa.nagano.jp", 3, false}, - {1, "suzaka.nagano.jp", 3, false}, - {1, "takagi.nagano.jp", 3, false}, - {1, "takamori.nagano.jp", 3, false}, - {1, "takayama.nagano.jp", 3, false}, - {1, "tateshina.nagano.jp", 3, false}, - {1, "tatsuno.nagano.jp", 3, false}, - {1, "togakushi.nagano.jp", 3, false}, - {1, "togura.nagano.jp", 3, false}, - {1, "tomi.nagano.jp", 3, false}, - {1, "ueda.nagano.jp", 3, false}, - {1, "wada.nagano.jp", 3, false}, - {1, "yamagata.nagano.jp", 3, false}, - {1, "yamanouchi.nagano.jp", 3, false}, - {1, "yasaka.nagano.jp", 3, false}, - {1, "yasuoka.nagano.jp", 3, false}, - {1, "chijiwa.nagasaki.jp", 3, false}, - {1, "futsu.nagasaki.jp", 3, false}, - {1, "goto.nagasaki.jp", 3, false}, - {1, "hasami.nagasaki.jp", 3, false}, - {1, "hirado.nagasaki.jp", 3, false}, - {1, "iki.nagasaki.jp", 3, false}, - {1, "isahaya.nagasaki.jp", 3, false}, - {1, "kawatana.nagasaki.jp", 3, false}, - {1, "kuchinotsu.nagasaki.jp", 3, false}, - {1, "matsuura.nagasaki.jp", 3, false}, - {1, "nagasaki.nagasaki.jp", 3, false}, - {1, "obama.nagasaki.jp", 3, false}, - {1, "omura.nagasaki.jp", 3, false}, - {1, "oseto.nagasaki.jp", 3, false}, - {1, "saikai.nagasaki.jp", 3, false}, - {1, "sasebo.nagasaki.jp", 3, false}, - {1, "seihi.nagasaki.jp", 3, false}, - {1, "shimabara.nagasaki.jp", 3, false}, - {1, "shinkamigoto.nagasaki.jp", 3, false}, - {1, "togitsu.nagasaki.jp", 3, false}, - {1, "tsushima.nagasaki.jp", 3, false}, - {1, "unzen.nagasaki.jp", 3, false}, - {1, "ando.nara.jp", 3, false}, - {1, "gose.nara.jp", 3, false}, - {1, "heguri.nara.jp", 3, false}, - {1, "higashiyoshino.nara.jp", 3, false}, - {1, "ikaruga.nara.jp", 3, false}, - {1, "ikoma.nara.jp", 3, false}, - {1, "kamikitayama.nara.jp", 3, false}, - {1, "kanmaki.nara.jp", 3, false}, - {1, "kashiba.nara.jp", 3, false}, - {1, "kashihara.nara.jp", 3, false}, - {1, "katsuragi.nara.jp", 3, false}, - {1, "kawai.nara.jp", 3, false}, - {1, "kawakami.nara.jp", 3, false}, - {1, "kawanishi.nara.jp", 3, false}, - {1, "koryo.nara.jp", 3, false}, - {1, "kurotaki.nara.jp", 3, false}, - {1, "mitsue.nara.jp", 3, false}, - {1, "miyake.nara.jp", 3, false}, - {1, "nara.nara.jp", 3, false}, - {1, "nosegawa.nara.jp", 3, false}, - {1, "oji.nara.jp", 3, false}, - {1, "ouda.nara.jp", 3, false}, - {1, "oyodo.nara.jp", 3, false}, - {1, "sakurai.nara.jp", 3, false}, - {1, "sango.nara.jp", 3, false}, - {1, "shimoichi.nara.jp", 3, false}, - {1, "shimokitayama.nara.jp", 3, false}, - {1, "shinjo.nara.jp", 3, false}, - {1, "soni.nara.jp", 3, false}, - {1, "takatori.nara.jp", 3, false}, - {1, "tawaramoto.nara.jp", 3, false}, - {1, "tenkawa.nara.jp", 3, false}, - {1, "tenri.nara.jp", 3, false}, - {1, "uda.nara.jp", 3, false}, - {1, "yamatokoriyama.nara.jp", 3, false}, - {1, "yamatotakada.nara.jp", 3, false}, - {1, "yamazoe.nara.jp", 3, false}, - {1, "yoshino.nara.jp", 3, false}, - {1, "aga.niigata.jp", 3, false}, - {1, "agano.niigata.jp", 3, false}, - {1, "gosen.niigata.jp", 3, false}, - {1, "itoigawa.niigata.jp", 3, false}, - {1, "izumozaki.niigata.jp", 3, false}, - {1, "joetsu.niigata.jp", 3, false}, - {1, "kamo.niigata.jp", 3, false}, - {1, "kariwa.niigata.jp", 3, false}, - {1, "kashiwazaki.niigata.jp", 3, false}, - {1, "minamiuonuma.niigata.jp", 3, false}, - {1, "mitsuke.niigata.jp", 3, false}, - {1, "muika.niigata.jp", 3, false}, - {1, "murakami.niigata.jp", 3, false}, - {1, "myoko.niigata.jp", 3, false}, - {1, "nagaoka.niigata.jp", 3, false}, - {1, "niigata.niigata.jp", 3, false}, - {1, "ojiya.niigata.jp", 3, false}, - {1, "omi.niigata.jp", 3, false}, - {1, "sado.niigata.jp", 3, false}, - {1, "sanjo.niigata.jp", 3, false}, - {1, "seiro.niigata.jp", 3, false}, - {1, "seirou.niigata.jp", 3, false}, - {1, "sekikawa.niigata.jp", 3, false}, - {1, "shibata.niigata.jp", 3, false}, - {1, "tagami.niigata.jp", 3, false}, - {1, "tainai.niigata.jp", 3, false}, - {1, "tochio.niigata.jp", 3, false}, - {1, "tokamachi.niigata.jp", 3, false}, - {1, "tsubame.niigata.jp", 3, false}, - {1, "tsunan.niigata.jp", 3, false}, - {1, "uonuma.niigata.jp", 3, false}, - {1, "yahiko.niigata.jp", 3, false}, - {1, "yoita.niigata.jp", 3, false}, - {1, "yuzawa.niigata.jp", 3, false}, - {1, "beppu.oita.jp", 3, false}, - {1, "bungoono.oita.jp", 3, false}, - {1, "bungotakada.oita.jp", 3, false}, - {1, "hasama.oita.jp", 3, false}, - {1, "hiji.oita.jp", 3, false}, - {1, "himeshima.oita.jp", 3, false}, - {1, "hita.oita.jp", 3, false}, - {1, "kamitsue.oita.jp", 3, false}, - {1, "kokonoe.oita.jp", 3, false}, - {1, "kuju.oita.jp", 3, false}, - {1, "kunisaki.oita.jp", 3, false}, - {1, "kusu.oita.jp", 3, false}, - {1, "oita.oita.jp", 3, false}, - {1, "saiki.oita.jp", 3, false}, - {1, "taketa.oita.jp", 3, false}, - {1, "tsukumi.oita.jp", 3, false}, - {1, "usa.oita.jp", 3, false}, - {1, "usuki.oita.jp", 3, false}, - {1, "yufu.oita.jp", 3, false}, - {1, "akaiwa.okayama.jp", 3, false}, - {1, "asakuchi.okayama.jp", 3, false}, - {1, "bizen.okayama.jp", 3, false}, - {1, "hayashima.okayama.jp", 3, false}, - {1, "ibara.okayama.jp", 3, false}, - {1, "kagamino.okayama.jp", 3, false}, - {1, "kasaoka.okayama.jp", 3, false}, - {1, "kibichuo.okayama.jp", 3, false}, - {1, "kumenan.okayama.jp", 3, false}, - {1, "kurashiki.okayama.jp", 3, false}, - {1, "maniwa.okayama.jp", 3, false}, - {1, "misaki.okayama.jp", 3, false}, - {1, "nagi.okayama.jp", 3, false}, - {1, "niimi.okayama.jp", 3, false}, - {1, "nishiawakura.okayama.jp", 3, false}, - {1, "okayama.okayama.jp", 3, false}, - {1, "satosho.okayama.jp", 3, false}, - {1, "setouchi.okayama.jp", 3, false}, - {1, "shinjo.okayama.jp", 3, false}, - {1, "shoo.okayama.jp", 3, false}, - {1, "soja.okayama.jp", 3, false}, - {1, "takahashi.okayama.jp", 3, false}, - {1, "tamano.okayama.jp", 3, false}, - {1, "tsuyama.okayama.jp", 3, false}, - {1, "wake.okayama.jp", 3, false}, - {1, "yakage.okayama.jp", 3, false}, - {1, "aguni.okinawa.jp", 3, false}, - {1, "ginowan.okinawa.jp", 3, false}, - {1, "ginoza.okinawa.jp", 3, false}, - {1, "gushikami.okinawa.jp", 3, false}, - {1, "haebaru.okinawa.jp", 3, false}, - {1, "higashi.okinawa.jp", 3, false}, - {1, "hirara.okinawa.jp", 3, false}, - {1, "iheya.okinawa.jp", 3, false}, - {1, "ishigaki.okinawa.jp", 3, false}, - {1, "ishikawa.okinawa.jp", 3, false}, - {1, "itoman.okinawa.jp", 3, false}, - {1, "izena.okinawa.jp", 3, false}, - {1, "kadena.okinawa.jp", 3, false}, - {1, "kin.okinawa.jp", 3, false}, - {1, "kitadaito.okinawa.jp", 3, false}, - {1, "kitanakagusuku.okinawa.jp", 3, false}, - {1, "kumejima.okinawa.jp", 3, false}, - {1, "kunigami.okinawa.jp", 3, false}, - {1, "minamidaito.okinawa.jp", 3, false}, - {1, "motobu.okinawa.jp", 3, false}, - {1, "nago.okinawa.jp", 3, false}, - {1, "naha.okinawa.jp", 3, false}, - {1, "nakagusuku.okinawa.jp", 3, false}, - {1, "nakijin.okinawa.jp", 3, false}, - {1, "nanjo.okinawa.jp", 3, false}, - {1, "nishihara.okinawa.jp", 3, false}, - {1, "ogimi.okinawa.jp", 3, false}, - {1, "okinawa.okinawa.jp", 3, false}, - {1, "onna.okinawa.jp", 3, false}, - {1, "shimoji.okinawa.jp", 3, false}, - {1, "taketomi.okinawa.jp", 3, false}, - {1, "tarama.okinawa.jp", 3, false}, - {1, "tokashiki.okinawa.jp", 3, false}, - {1, "tomigusuku.okinawa.jp", 3, false}, - {1, "tonaki.okinawa.jp", 3, false}, - {1, "urasoe.okinawa.jp", 3, false}, - {1, "uruma.okinawa.jp", 3, false}, - {1, "yaese.okinawa.jp", 3, false}, - {1, "yomitan.okinawa.jp", 3, false}, - {1, "yonabaru.okinawa.jp", 3, false}, - {1, "yonaguni.okinawa.jp", 3, false}, - {1, "zamami.okinawa.jp", 3, false}, - {1, "abeno.osaka.jp", 3, false}, - {1, "chihayaakasaka.osaka.jp", 3, false}, - {1, "chuo.osaka.jp", 3, false}, - {1, "daito.osaka.jp", 3, false}, - {1, "fujiidera.osaka.jp", 3, false}, - {1, "habikino.osaka.jp", 3, false}, - {1, "hannan.osaka.jp", 3, false}, - {1, "higashiosaka.osaka.jp", 3, false}, - {1, "higashisumiyoshi.osaka.jp", 3, false}, - {1, "higashiyodogawa.osaka.jp", 3, false}, - {1, "hirakata.osaka.jp", 3, false}, - {1, "ibaraki.osaka.jp", 3, false}, - {1, "ikeda.osaka.jp", 3, false}, - {1, "izumi.osaka.jp", 3, false}, - {1, "izumiotsu.osaka.jp", 3, false}, - {1, "izumisano.osaka.jp", 3, false}, - {1, "kadoma.osaka.jp", 3, false}, - {1, "kaizuka.osaka.jp", 3, false}, - {1, "kanan.osaka.jp", 3, false}, - {1, "kashiwara.osaka.jp", 3, false}, - {1, "katano.osaka.jp", 3, false}, - {1, "kawachinagano.osaka.jp", 3, false}, - {1, "kishiwada.osaka.jp", 3, false}, - {1, "kita.osaka.jp", 3, false}, - {1, "kumatori.osaka.jp", 3, false}, - {1, "matsubara.osaka.jp", 3, false}, - {1, "minato.osaka.jp", 3, false}, - {1, "minoh.osaka.jp", 3, false}, - {1, "misaki.osaka.jp", 3, false}, - {1, "moriguchi.osaka.jp", 3, false}, - {1, "neyagawa.osaka.jp", 3, false}, - {1, "nishi.osaka.jp", 3, false}, - {1, "nose.osaka.jp", 3, false}, - {1, "osakasayama.osaka.jp", 3, false}, - {1, "sakai.osaka.jp", 3, false}, - {1, "sayama.osaka.jp", 3, false}, - {1, "sennan.osaka.jp", 3, false}, - {1, "settsu.osaka.jp", 3, false}, - {1, "shijonawate.osaka.jp", 3, false}, - {1, "shimamoto.osaka.jp", 3, false}, - {1, "suita.osaka.jp", 3, false}, - {1, "tadaoka.osaka.jp", 3, false}, - {1, "taishi.osaka.jp", 3, false}, - {1, "tajiri.osaka.jp", 3, false}, - {1, "takaishi.osaka.jp", 3, false}, - {1, "takatsuki.osaka.jp", 3, false}, - {1, "tondabayashi.osaka.jp", 3, false}, - {1, "toyonaka.osaka.jp", 3, false}, - {1, "toyono.osaka.jp", 3, false}, - {1, "yao.osaka.jp", 3, false}, - {1, "ariake.saga.jp", 3, false}, - {1, "arita.saga.jp", 3, false}, - {1, "fukudomi.saga.jp", 3, false}, - {1, "genkai.saga.jp", 3, false}, - {1, "hamatama.saga.jp", 3, false}, - {1, "hizen.saga.jp", 3, false}, - {1, "imari.saga.jp", 3, false}, - {1, "kamimine.saga.jp", 3, false}, - {1, "kanzaki.saga.jp", 3, false}, - {1, "karatsu.saga.jp", 3, false}, - {1, "kashima.saga.jp", 3, false}, - {1, "kitagata.saga.jp", 3, false}, - {1, "kitahata.saga.jp", 3, false}, - {1, "kiyama.saga.jp", 3, false}, - {1, "kouhoku.saga.jp", 3, false}, - {1, "kyuragi.saga.jp", 3, false}, - {1, "nishiarita.saga.jp", 3, false}, - {1, "ogi.saga.jp", 3, false}, - {1, "omachi.saga.jp", 3, false}, - {1, "ouchi.saga.jp", 3, false}, - {1, "saga.saga.jp", 3, false}, - {1, "shiroishi.saga.jp", 3, false}, - {1, "taku.saga.jp", 3, false}, - {1, "tara.saga.jp", 3, false}, - {1, "tosu.saga.jp", 3, false}, - {1, "yoshinogari.saga.jp", 3, false}, - {1, "arakawa.saitama.jp", 3, false}, - {1, "asaka.saitama.jp", 3, false}, - {1, "chichibu.saitama.jp", 3, false}, - {1, "fujimi.saitama.jp", 3, false}, - {1, "fujimino.saitama.jp", 3, false}, - {1, "fukaya.saitama.jp", 3, false}, - {1, "hanno.saitama.jp", 3, false}, - {1, "hanyu.saitama.jp", 3, false}, - {1, "hasuda.saitama.jp", 3, false}, - {1, "hatogaya.saitama.jp", 3, false}, - {1, "hatoyama.saitama.jp", 3, false}, - {1, "hidaka.saitama.jp", 3, false}, - {1, "higashichichibu.saitama.jp", 3, false}, - {1, "higashimatsuyama.saitama.jp", 3, false}, - {1, "honjo.saitama.jp", 3, false}, - {1, "ina.saitama.jp", 3, false}, - {1, "iruma.saitama.jp", 3, false}, - {1, "iwatsuki.saitama.jp", 3, false}, - {1, "kamiizumi.saitama.jp", 3, false}, - {1, "kamikawa.saitama.jp", 3, false}, - {1, "kamisato.saitama.jp", 3, false}, - {1, "kasukabe.saitama.jp", 3, false}, - {1, "kawagoe.saitama.jp", 3, false}, - {1, "kawaguchi.saitama.jp", 3, false}, - {1, "kawajima.saitama.jp", 3, false}, - {1, "kazo.saitama.jp", 3, false}, - {1, "kitamoto.saitama.jp", 3, false}, - {1, "koshigaya.saitama.jp", 3, false}, - {1, "kounosu.saitama.jp", 3, false}, - {1, "kuki.saitama.jp", 3, false}, - {1, "kumagaya.saitama.jp", 3, false}, - {1, "matsubushi.saitama.jp", 3, false}, - {1, "minano.saitama.jp", 3, false}, - {1, "misato.saitama.jp", 3, false}, - {1, "miyashiro.saitama.jp", 3, false}, - {1, "miyoshi.saitama.jp", 3, false}, - {1, "moroyama.saitama.jp", 3, false}, - {1, "nagatoro.saitama.jp", 3, false}, - {1, "namegawa.saitama.jp", 3, false}, - {1, "niiza.saitama.jp", 3, false}, - {1, "ogano.saitama.jp", 3, false}, - {1, "ogawa.saitama.jp", 3, false}, - {1, "ogose.saitama.jp", 3, false}, - {1, "okegawa.saitama.jp", 3, false}, - {1, "omiya.saitama.jp", 3, false}, - {1, "otaki.saitama.jp", 3, false}, - {1, "ranzan.saitama.jp", 3, false}, - {1, "ryokami.saitama.jp", 3, false}, - {1, "saitama.saitama.jp", 3, false}, - {1, "sakado.saitama.jp", 3, false}, - {1, "satte.saitama.jp", 3, false}, - {1, "sayama.saitama.jp", 3, false}, - {1, "shiki.saitama.jp", 3, false}, - {1, "shiraoka.saitama.jp", 3, false}, - {1, "soka.saitama.jp", 3, false}, - {1, "sugito.saitama.jp", 3, false}, - {1, "toda.saitama.jp", 3, false}, - {1, "tokigawa.saitama.jp", 3, false}, - {1, "tokorozawa.saitama.jp", 3, false}, - {1, "tsurugashima.saitama.jp", 3, false}, - {1, "urawa.saitama.jp", 3, false}, - {1, "warabi.saitama.jp", 3, false}, - {1, "yashio.saitama.jp", 3, false}, - {1, "yokoze.saitama.jp", 3, false}, - {1, "yono.saitama.jp", 3, false}, - {1, "yorii.saitama.jp", 3, false}, - {1, "yoshida.saitama.jp", 3, false}, - {1, "yoshikawa.saitama.jp", 3, false}, - {1, "yoshimi.saitama.jp", 3, false}, - {1, "aisho.shiga.jp", 3, false}, - {1, "gamo.shiga.jp", 3, false}, - {1, "higashiomi.shiga.jp", 3, false}, - {1, "hikone.shiga.jp", 3, false}, - {1, "koka.shiga.jp", 3, false}, - {1, "konan.shiga.jp", 3, false}, - {1, "kosei.shiga.jp", 3, false}, - {1, "koto.shiga.jp", 3, false}, - {1, "kusatsu.shiga.jp", 3, false}, - {1, "maibara.shiga.jp", 3, false}, - {1, "moriyama.shiga.jp", 3, false}, - {1, "nagahama.shiga.jp", 3, false}, - {1, "nishiazai.shiga.jp", 3, false}, - {1, "notogawa.shiga.jp", 3, false}, - {1, "omihachiman.shiga.jp", 3, false}, - {1, "otsu.shiga.jp", 3, false}, - {1, "ritto.shiga.jp", 3, false}, - {1, "ryuoh.shiga.jp", 3, false}, - {1, "takashima.shiga.jp", 3, false}, - {1, "takatsuki.shiga.jp", 3, false}, - {1, "torahime.shiga.jp", 3, false}, - {1, "toyosato.shiga.jp", 3, false}, - {1, "yasu.shiga.jp", 3, false}, - {1, "akagi.shimane.jp", 3, false}, - {1, "ama.shimane.jp", 3, false}, - {1, "gotsu.shimane.jp", 3, false}, - {1, "hamada.shimane.jp", 3, false}, - {1, "higashiizumo.shimane.jp", 3, false}, - {1, "hikawa.shimane.jp", 3, false}, - {1, "hikimi.shimane.jp", 3, false}, - {1, "izumo.shimane.jp", 3, false}, - {1, "kakinoki.shimane.jp", 3, false}, - {1, "masuda.shimane.jp", 3, false}, - {1, "matsue.shimane.jp", 3, false}, - {1, "misato.shimane.jp", 3, false}, - {1, "nishinoshima.shimane.jp", 3, false}, - {1, "ohda.shimane.jp", 3, false}, - {1, "okinoshima.shimane.jp", 3, false}, - {1, "okuizumo.shimane.jp", 3, false}, - {1, "shimane.shimane.jp", 3, false}, - {1, "tamayu.shimane.jp", 3, false}, - {1, "tsuwano.shimane.jp", 3, false}, - {1, "unnan.shimane.jp", 3, false}, - {1, "yakumo.shimane.jp", 3, false}, - {1, "yasugi.shimane.jp", 3, false}, - {1, "yatsuka.shimane.jp", 3, false}, - {1, "arai.shizuoka.jp", 3, false}, - {1, "atami.shizuoka.jp", 3, false}, - {1, "fuji.shizuoka.jp", 3, false}, - {1, "fujieda.shizuoka.jp", 3, false}, - {1, "fujikawa.shizuoka.jp", 3, false}, - {1, "fujinomiya.shizuoka.jp", 3, false}, - {1, "fukuroi.shizuoka.jp", 3, false}, - {1, "gotemba.shizuoka.jp", 3, false}, - {1, "haibara.shizuoka.jp", 3, false}, - {1, "hamamatsu.shizuoka.jp", 3, false}, - {1, "higashiizu.shizuoka.jp", 3, false}, - {1, "ito.shizuoka.jp", 3, false}, - {1, "iwata.shizuoka.jp", 3, false}, - {1, "izu.shizuoka.jp", 3, false}, - {1, "izunokuni.shizuoka.jp", 3, false}, - {1, "kakegawa.shizuoka.jp", 3, false}, - {1, "kannami.shizuoka.jp", 3, false}, - {1, "kawanehon.shizuoka.jp", 3, false}, - {1, "kawazu.shizuoka.jp", 3, false}, - {1, "kikugawa.shizuoka.jp", 3, false}, - {1, "kosai.shizuoka.jp", 3, false}, - {1, "makinohara.shizuoka.jp", 3, false}, - {1, "matsuzaki.shizuoka.jp", 3, false}, - {1, "minamiizu.shizuoka.jp", 3, false}, - {1, "mishima.shizuoka.jp", 3, false}, - {1, "morimachi.shizuoka.jp", 3, false}, - {1, "nishiizu.shizuoka.jp", 3, false}, - {1, "numazu.shizuoka.jp", 3, false}, - {1, "omaezaki.shizuoka.jp", 3, false}, - {1, "shimada.shizuoka.jp", 3, false}, - {1, "shimizu.shizuoka.jp", 3, false}, - {1, "shimoda.shizuoka.jp", 3, false}, - {1, "shizuoka.shizuoka.jp", 3, false}, - {1, "susono.shizuoka.jp", 3, false}, - {1, "yaizu.shizuoka.jp", 3, false}, - {1, "yoshida.shizuoka.jp", 3, false}, - {1, "ashikaga.tochigi.jp", 3, false}, - {1, "bato.tochigi.jp", 3, false}, - {1, "haga.tochigi.jp", 3, false}, - {1, "ichikai.tochigi.jp", 3, false}, - {1, "iwafune.tochigi.jp", 3, false}, - {1, "kaminokawa.tochigi.jp", 3, false}, - {1, "kanuma.tochigi.jp", 3, false}, - {1, "karasuyama.tochigi.jp", 3, false}, - {1, "kuroiso.tochigi.jp", 3, false}, - {1, "mashiko.tochigi.jp", 3, false}, - {1, "mibu.tochigi.jp", 3, false}, - {1, "moka.tochigi.jp", 3, false}, - {1, "motegi.tochigi.jp", 3, false}, - {1, "nasu.tochigi.jp", 3, false}, - {1, "nasushiobara.tochigi.jp", 3, false}, - {1, "nikko.tochigi.jp", 3, false}, - {1, "nishikata.tochigi.jp", 3, false}, - {1, "nogi.tochigi.jp", 3, false}, - {1, "ohira.tochigi.jp", 3, false}, - {1, "ohtawara.tochigi.jp", 3, false}, - {1, "oyama.tochigi.jp", 3, false}, - {1, "sakura.tochigi.jp", 3, false}, - {1, "sano.tochigi.jp", 3, false}, - {1, "shimotsuke.tochigi.jp", 3, false}, - {1, "shioya.tochigi.jp", 3, false}, - {1, "takanezawa.tochigi.jp", 3, false}, - {1, "tochigi.tochigi.jp", 3, false}, - {1, "tsuga.tochigi.jp", 3, false}, - {1, "ujiie.tochigi.jp", 3, false}, - {1, "utsunomiya.tochigi.jp", 3, false}, - {1, "yaita.tochigi.jp", 3, false}, - {1, "aizumi.tokushima.jp", 3, false}, - {1, "anan.tokushima.jp", 3, false}, - {1, "ichiba.tokushima.jp", 3, false}, - {1, "itano.tokushima.jp", 3, false}, - {1, "kainan.tokushima.jp", 3, false}, - {1, "komatsushima.tokushima.jp", 3, false}, - {1, "matsushige.tokushima.jp", 3, false}, - {1, "mima.tokushima.jp", 3, false}, - {1, "minami.tokushima.jp", 3, false}, - {1, "miyoshi.tokushima.jp", 3, false}, - {1, "mugi.tokushima.jp", 3, false}, - {1, "nakagawa.tokushima.jp", 3, false}, - {1, "naruto.tokushima.jp", 3, false}, - {1, "sanagochi.tokushima.jp", 3, false}, - {1, "shishikui.tokushima.jp", 3, false}, - {1, "tokushima.tokushima.jp", 3, false}, - {1, "wajiki.tokushima.jp", 3, false}, - {1, "adachi.tokyo.jp", 3, false}, - {1, "akiruno.tokyo.jp", 3, false}, - {1, "akishima.tokyo.jp", 3, false}, - {1, "aogashima.tokyo.jp", 3, false}, - {1, "arakawa.tokyo.jp", 3, false}, - {1, "bunkyo.tokyo.jp", 3, false}, - {1, "chiyoda.tokyo.jp", 3, false}, - {1, "chofu.tokyo.jp", 3, false}, - {1, "chuo.tokyo.jp", 3, false}, - {1, "edogawa.tokyo.jp", 3, false}, - {1, "fuchu.tokyo.jp", 3, false}, - {1, "fussa.tokyo.jp", 3, false}, - {1, "hachijo.tokyo.jp", 3, false}, - {1, "hachioji.tokyo.jp", 3, false}, - {1, "hamura.tokyo.jp", 3, false}, - {1, "higashikurume.tokyo.jp", 3, false}, - {1, "higashimurayama.tokyo.jp", 3, false}, - {1, "higashiyamato.tokyo.jp", 3, false}, - {1, "hino.tokyo.jp", 3, false}, - {1, "hinode.tokyo.jp", 3, false}, - {1, "hinohara.tokyo.jp", 3, false}, - {1, "inagi.tokyo.jp", 3, false}, - {1, "itabashi.tokyo.jp", 3, false}, - {1, "katsushika.tokyo.jp", 3, false}, - {1, "kita.tokyo.jp", 3, false}, - {1, "kiyose.tokyo.jp", 3, false}, - {1, "kodaira.tokyo.jp", 3, false}, - {1, "koganei.tokyo.jp", 3, false}, - {1, "kokubunji.tokyo.jp", 3, false}, - {1, "komae.tokyo.jp", 3, false}, - {1, "koto.tokyo.jp", 3, false}, - {1, "kouzushima.tokyo.jp", 3, false}, - {1, "kunitachi.tokyo.jp", 3, false}, - {1, "machida.tokyo.jp", 3, false}, - {1, "meguro.tokyo.jp", 3, false}, - {1, "minato.tokyo.jp", 3, false}, - {1, "mitaka.tokyo.jp", 3, false}, - {1, "mizuho.tokyo.jp", 3, false}, - {1, "musashimurayama.tokyo.jp", 3, false}, - {1, "musashino.tokyo.jp", 3, false}, - {1, "nakano.tokyo.jp", 3, false}, - {1, "nerima.tokyo.jp", 3, false}, - {1, "ogasawara.tokyo.jp", 3, false}, - {1, "okutama.tokyo.jp", 3, false}, - {1, "ome.tokyo.jp", 3, false}, - {1, "oshima.tokyo.jp", 3, false}, - {1, "ota.tokyo.jp", 3, false}, - {1, "setagaya.tokyo.jp", 3, false}, - {1, "shibuya.tokyo.jp", 3, false}, - {1, "shinagawa.tokyo.jp", 3, false}, - {1, "shinjuku.tokyo.jp", 3, false}, - {1, "suginami.tokyo.jp", 3, false}, - {1, "sumida.tokyo.jp", 3, false}, - {1, "tachikawa.tokyo.jp", 3, false}, - {1, "taito.tokyo.jp", 3, false}, - {1, "tama.tokyo.jp", 3, false}, - {1, "toshima.tokyo.jp", 3, false}, - {1, "chizu.tottori.jp", 3, false}, - {1, "hino.tottori.jp", 3, false}, - {1, "kawahara.tottori.jp", 3, false}, - {1, "koge.tottori.jp", 3, false}, - {1, "kotoura.tottori.jp", 3, false}, - {1, "misasa.tottori.jp", 3, false}, - {1, "nanbu.tottori.jp", 3, false}, - {1, "nichinan.tottori.jp", 3, false}, - {1, "sakaiminato.tottori.jp", 3, false}, - {1, "tottori.tottori.jp", 3, false}, - {1, "wakasa.tottori.jp", 3, false}, - {1, "yazu.tottori.jp", 3, false}, - {1, "yonago.tottori.jp", 3, false}, - {1, "asahi.toyama.jp", 3, false}, - {1, "fuchu.toyama.jp", 3, false}, - {1, "fukumitsu.toyama.jp", 3, false}, - {1, "funahashi.toyama.jp", 3, false}, - {1, "himi.toyama.jp", 3, false}, - {1, "imizu.toyama.jp", 3, false}, - {1, "inami.toyama.jp", 3, false}, - {1, "johana.toyama.jp", 3, false}, - {1, "kamiichi.toyama.jp", 3, false}, - {1, "kurobe.toyama.jp", 3, false}, - {1, "nakaniikawa.toyama.jp", 3, false}, - {1, "namerikawa.toyama.jp", 3, false}, - {1, "nanto.toyama.jp", 3, false}, - {1, "nyuzen.toyama.jp", 3, false}, - {1, "oyabe.toyama.jp", 3, false}, - {1, "taira.toyama.jp", 3, false}, - {1, "takaoka.toyama.jp", 3, false}, - {1, "tateyama.toyama.jp", 3, false}, - {1, "toga.toyama.jp", 3, false}, - {1, "tonami.toyama.jp", 3, false}, - {1, "toyama.toyama.jp", 3, false}, - {1, "unazuki.toyama.jp", 3, false}, - {1, "uozu.toyama.jp", 3, false}, - {1, "yamada.toyama.jp", 3, false}, - {1, "arida.wakayama.jp", 3, false}, - {1, "aridagawa.wakayama.jp", 3, false}, - {1, "gobo.wakayama.jp", 3, false}, - {1, "hashimoto.wakayama.jp", 3, false}, - {1, "hidaka.wakayama.jp", 3, false}, - {1, "hirogawa.wakayama.jp", 3, false}, - {1, "inami.wakayama.jp", 3, false}, - {1, "iwade.wakayama.jp", 3, false}, - {1, "kainan.wakayama.jp", 3, false}, - {1, "kamitonda.wakayama.jp", 3, false}, - {1, "katsuragi.wakayama.jp", 3, false}, - {1, "kimino.wakayama.jp", 3, false}, - {1, "kinokawa.wakayama.jp", 3, false}, - {1, "kitayama.wakayama.jp", 3, false}, - {1, "koya.wakayama.jp", 3, false}, - {1, "koza.wakayama.jp", 3, false}, - {1, "kozagawa.wakayama.jp", 3, false}, - {1, "kudoyama.wakayama.jp", 3, false}, - {1, "kushimoto.wakayama.jp", 3, false}, - {1, "mihama.wakayama.jp", 3, false}, - {1, "misato.wakayama.jp", 3, false}, - {1, "nachikatsuura.wakayama.jp", 3, false}, - {1, "shingu.wakayama.jp", 3, false}, - {1, "shirahama.wakayama.jp", 3, false}, - {1, "taiji.wakayama.jp", 3, false}, - {1, "tanabe.wakayama.jp", 3, false}, - {1, "wakayama.wakayama.jp", 3, false}, - {1, "yuasa.wakayama.jp", 3, false}, - {1, "yura.wakayama.jp", 3, false}, - {1, "asahi.yamagata.jp", 3, false}, - {1, "funagata.yamagata.jp", 3, false}, - {1, "higashine.yamagata.jp", 3, false}, - {1, "iide.yamagata.jp", 3, false}, - {1, "kahoku.yamagata.jp", 3, false}, - {1, "kaminoyama.yamagata.jp", 3, false}, - {1, "kaneyama.yamagata.jp", 3, false}, - {1, "kawanishi.yamagata.jp", 3, false}, - {1, "mamurogawa.yamagata.jp", 3, false}, - {1, "mikawa.yamagata.jp", 3, false}, - {1, "murayama.yamagata.jp", 3, false}, - {1, "nagai.yamagata.jp", 3, false}, - {1, "nakayama.yamagata.jp", 3, false}, - {1, "nanyo.yamagata.jp", 3, false}, - {1, "nishikawa.yamagata.jp", 3, false}, - {1, "obanazawa.yamagata.jp", 3, false}, - {1, "oe.yamagata.jp", 3, false}, - {1, "oguni.yamagata.jp", 3, false}, - {1, "ohkura.yamagata.jp", 3, false}, - {1, "oishida.yamagata.jp", 3, false}, - {1, "sagae.yamagata.jp", 3, false}, - {1, "sakata.yamagata.jp", 3, false}, - {1, "sakegawa.yamagata.jp", 3, false}, - {1, "shinjo.yamagata.jp", 3, false}, - {1, "shirataka.yamagata.jp", 3, false}, - {1, "shonai.yamagata.jp", 3, false}, - {1, "takahata.yamagata.jp", 3, false}, - {1, "tendo.yamagata.jp", 3, false}, - {1, "tozawa.yamagata.jp", 3, false}, - {1, "tsuruoka.yamagata.jp", 3, false}, - {1, "yamagata.yamagata.jp", 3, false}, - {1, "yamanobe.yamagata.jp", 3, false}, - {1, "yonezawa.yamagata.jp", 3, false}, - {1, "yuza.yamagata.jp", 3, false}, - {1, "abu.yamaguchi.jp", 3, false}, - {1, "hagi.yamaguchi.jp", 3, false}, - {1, "hikari.yamaguchi.jp", 3, false}, - {1, "hofu.yamaguchi.jp", 3, false}, - {1, "iwakuni.yamaguchi.jp", 3, false}, - {1, "kudamatsu.yamaguchi.jp", 3, false}, - {1, "mitou.yamaguchi.jp", 3, false}, - {1, "nagato.yamaguchi.jp", 3, false}, - {1, "oshima.yamaguchi.jp", 3, false}, - {1, "shimonoseki.yamaguchi.jp", 3, false}, - {1, "shunan.yamaguchi.jp", 3, false}, - {1, "tabuse.yamaguchi.jp", 3, false}, - {1, "tokuyama.yamaguchi.jp", 3, false}, - {1, "toyota.yamaguchi.jp", 3, false}, - {1, "ube.yamaguchi.jp", 3, false}, - {1, "yuu.yamaguchi.jp", 3, false}, - {1, "chuo.yamanashi.jp", 3, false}, - {1, "doshi.yamanashi.jp", 3, false}, - {1, "fuefuki.yamanashi.jp", 3, false}, - {1, "fujikawa.yamanashi.jp", 3, false}, - {1, "fujikawaguchiko.yamanashi.jp", 3, false}, - {1, "fujiyoshida.yamanashi.jp", 3, false}, - {1, "hayakawa.yamanashi.jp", 3, false}, - {1, "hokuto.yamanashi.jp", 3, false}, - {1, "ichikawamisato.yamanashi.jp", 3, false}, - {1, "kai.yamanashi.jp", 3, false}, - {1, "kofu.yamanashi.jp", 3, false}, - {1, "koshu.yamanashi.jp", 3, false}, - {1, "kosuge.yamanashi.jp", 3, false}, - {1, "minami-alps.yamanashi.jp", 3, false}, - {1, "minobu.yamanashi.jp", 3, false}, - {1, "nakamichi.yamanashi.jp", 3, false}, - {1, "nanbu.yamanashi.jp", 3, false}, - {1, "narusawa.yamanashi.jp", 3, false}, - {1, "nirasaki.yamanashi.jp", 3, false}, - {1, "nishikatsura.yamanashi.jp", 3, false}, - {1, "oshino.yamanashi.jp", 3, false}, - {1, "otsuki.yamanashi.jp", 3, false}, - {1, "showa.yamanashi.jp", 3, false}, - {1, "tabayama.yamanashi.jp", 3, false}, - {1, "tsuru.yamanashi.jp", 3, false}, - {1, "uenohara.yamanashi.jp", 3, false}, - {1, "yamanakako.yamanashi.jp", 3, false}, - {1, "yamanashi.yamanashi.jp", 3, false}, - {1, "ke", 1, false}, - {1, "ac.ke", 2, false}, - {1, "co.ke", 2, false}, - {1, "go.ke", 2, false}, - {1, "info.ke", 2, false}, - {1, "me.ke", 2, false}, - {1, "mobi.ke", 2, false}, - {1, "ne.ke", 2, false}, - {1, "or.ke", 2, false}, - {1, "sc.ke", 2, false}, - {1, "kg", 1, false}, - {1, "org.kg", 2, false}, - {1, "net.kg", 2, false}, - {1, "com.kg", 2, false}, - {1, "edu.kg", 2, false}, - {1, "gov.kg", 2, false}, - {1, "mil.kg", 2, false}, - {2, "kh", 2, false}, - {1, "ki", 1, false}, - {1, "edu.ki", 2, false}, - {1, "biz.ki", 2, false}, - {1, "net.ki", 2, false}, - {1, "org.ki", 2, false}, - {1, "gov.ki", 2, false}, - {1, "info.ki", 2, false}, - {1, "com.ki", 2, false}, - {1, "km", 1, false}, - {1, "org.km", 2, false}, - {1, "nom.km", 2, false}, - {1, "gov.km", 2, false}, - {1, "prd.km", 2, false}, - {1, "tm.km", 2, false}, - {1, "edu.km", 2, false}, - {1, "mil.km", 2, false}, - {1, "ass.km", 2, false}, - {1, "com.km", 2, false}, - {1, "coop.km", 2, false}, - {1, "asso.km", 2, false}, - {1, "presse.km", 2, false}, - {1, "medecin.km", 2, false}, - {1, "notaires.km", 2, false}, - {1, "pharmaciens.km", 2, false}, - {1, "veterinaire.km", 2, false}, - {1, "gouv.km", 2, false}, - {1, "kn", 1, false}, - {1, "net.kn", 2, false}, - {1, "org.kn", 2, false}, - {1, "edu.kn", 2, false}, - {1, "gov.kn", 2, false}, - {1, "kp", 1, false}, - {1, "com.kp", 2, false}, - {1, "edu.kp", 2, false}, - {1, "gov.kp", 2, false}, - {1, "org.kp", 2, false}, - {1, "rep.kp", 2, false}, - {1, "tra.kp", 2, false}, - {1, "kr", 1, false}, - {1, "ac.kr", 2, false}, - {1, "co.kr", 2, false}, - {1, "es.kr", 2, false}, - {1, "go.kr", 2, false}, - {1, "hs.kr", 2, false}, - {1, "kg.kr", 2, false}, - {1, "mil.kr", 2, false}, - {1, "ms.kr", 2, false}, - {1, "ne.kr", 2, false}, - {1, "or.kr", 2, false}, - {1, "pe.kr", 2, false}, - {1, "re.kr", 2, false}, - {1, "sc.kr", 2, false}, - {1, "busan.kr", 2, false}, - {1, "chungbuk.kr", 2, false}, - {1, "chungnam.kr", 2, false}, - {1, "daegu.kr", 2, false}, - {1, "daejeon.kr", 2, false}, - {1, "gangwon.kr", 2, false}, - {1, "gwangju.kr", 2, false}, - {1, "gyeongbuk.kr", 2, false}, - {1, "gyeonggi.kr", 2, false}, - {1, "gyeongnam.kr", 2, false}, - {1, "incheon.kr", 2, false}, - {1, "jeju.kr", 2, false}, - {1, "jeonbuk.kr", 2, false}, - {1, "jeonnam.kr", 2, false}, - {1, "seoul.kr", 2, false}, - {1, "ulsan.kr", 2, false}, - {1, "kw", 1, false}, - {1, "com.kw", 2, false}, - {1, "edu.kw", 2, false}, - {1, "emb.kw", 2, false}, - {1, "gov.kw", 2, false}, - {1, "ind.kw", 2, false}, - {1, "net.kw", 2, false}, - {1, "org.kw", 2, false}, - {1, "ky", 1, false}, - {1, "edu.ky", 2, false}, - {1, "gov.ky", 2, false}, - {1, "com.ky", 2, false}, - {1, "org.ky", 2, false}, - {1, "net.ky", 2, false}, - {1, "kz", 1, false}, - {1, "org.kz", 2, false}, - {1, "edu.kz", 2, false}, - {1, "net.kz", 2, false}, - {1, "gov.kz", 2, false}, - {1, "mil.kz", 2, false}, - {1, "com.kz", 2, false}, - {1, "la", 1, false}, - {1, "int.la", 2, false}, - {1, "net.la", 2, false}, - {1, "info.la", 2, false}, - {1, "edu.la", 2, false}, - {1, "gov.la", 2, false}, - {1, "per.la", 2, false}, - {1, "com.la", 2, false}, - {1, "org.la", 2, false}, - {1, "lb", 1, false}, - {1, "com.lb", 2, false}, - {1, "edu.lb", 2, false}, - {1, "gov.lb", 2, false}, - {1, "net.lb", 2, false}, - {1, "org.lb", 2, false}, - {1, "lc", 1, false}, - {1, "com.lc", 2, false}, - {1, "net.lc", 2, false}, - {1, "co.lc", 2, false}, - {1, "org.lc", 2, false}, - {1, "edu.lc", 2, false}, - {1, "gov.lc", 2, false}, - {1, "li", 1, false}, - {1, "lk", 1, false}, - {1, "gov.lk", 2, false}, - {1, "sch.lk", 2, false}, - {1, "net.lk", 2, false}, - {1, "int.lk", 2, false}, - {1, "com.lk", 2, false}, - {1, "org.lk", 2, false}, - {1, "edu.lk", 2, false}, - {1, "ngo.lk", 2, false}, - {1, "soc.lk", 2, false}, - {1, "web.lk", 2, false}, - {1, "ltd.lk", 2, false}, - {1, "assn.lk", 2, false}, - {1, "grp.lk", 2, false}, - {1, "hotel.lk", 2, false}, - {1, "ac.lk", 2, false}, - {1, "lr", 1, false}, - {1, "com.lr", 2, false}, - {1, "edu.lr", 2, false}, - {1, "gov.lr", 2, false}, - {1, "org.lr", 2, false}, - {1, "net.lr", 2, false}, - {1, "ls", 1, false}, - {1, "ac.ls", 2, false}, - {1, "biz.ls", 2, false}, - {1, "co.ls", 2, false}, - {1, "edu.ls", 2, false}, - {1, "gov.ls", 2, false}, - {1, "info.ls", 2, false}, - {1, "net.ls", 2, false}, - {1, "org.ls", 2, false}, - {1, "sc.ls", 2, false}, - {1, "lt", 1, false}, - {1, "gov.lt", 2, false}, - {1, "lu", 1, false}, - {1, "lv", 1, false}, - {1, "com.lv", 2, false}, - {1, "edu.lv", 2, false}, - {1, "gov.lv", 2, false}, - {1, "org.lv", 2, false}, - {1, "mil.lv", 2, false}, - {1, "id.lv", 2, false}, - {1, "net.lv", 2, false}, - {1, "asn.lv", 2, false}, - {1, "conf.lv", 2, false}, - {1, "ly", 1, false}, - {1, "com.ly", 2, false}, - {1, "net.ly", 2, false}, - {1, "gov.ly", 2, false}, - {1, "plc.ly", 2, false}, - {1, "edu.ly", 2, false}, - {1, "sch.ly", 2, false}, - {1, "med.ly", 2, false}, - {1, "org.ly", 2, false}, - {1, "id.ly", 2, false}, - {1, "ma", 1, false}, - {1, "co.ma", 2, false}, - {1, "net.ma", 2, false}, - {1, "gov.ma", 2, false}, - {1, "org.ma", 2, false}, - {1, "ac.ma", 2, false}, - {1, "press.ma", 2, false}, - {1, "mc", 1, false}, - {1, "tm.mc", 2, false}, - {1, "asso.mc", 2, false}, - {1, "md", 1, false}, - {1, "me", 1, false}, - {1, "co.me", 2, false}, - {1, "net.me", 2, false}, - {1, "org.me", 2, false}, - {1, "edu.me", 2, false}, - {1, "ac.me", 2, false}, - {1, "gov.me", 2, false}, - {1, "its.me", 2, false}, - {1, "priv.me", 2, false}, - {1, "mg", 1, false}, - {1, "org.mg", 2, false}, - {1, "nom.mg", 2, false}, - {1, "gov.mg", 2, false}, - {1, "prd.mg", 2, false}, - {1, "tm.mg", 2, false}, - {1, "edu.mg", 2, false}, - {1, "mil.mg", 2, false}, - {1, "com.mg", 2, false}, - {1, "co.mg", 2, false}, - {1, "mh", 1, false}, - {1, "mil", 1, false}, - {1, "mk", 1, false}, - {1, "com.mk", 2, false}, - {1, "org.mk", 2, false}, - {1, "net.mk", 2, false}, - {1, "edu.mk", 2, false}, - {1, "gov.mk", 2, false}, - {1, "inf.mk", 2, false}, - {1, "name.mk", 2, false}, - {1, "ml", 1, false}, - {1, "com.ml", 2, false}, - {1, "edu.ml", 2, false}, - {1, "gouv.ml", 2, false}, - {1, "gov.ml", 2, false}, - {1, "net.ml", 2, false}, - {1, "org.ml", 2, false}, - {1, "presse.ml", 2, false}, - {2, "mm", 2, false}, - {1, "mn", 1, false}, - {1, "gov.mn", 2, false}, - {1, "edu.mn", 2, false}, - {1, "org.mn", 2, false}, - {1, "mo", 1, false}, - {1, "com.mo", 2, false}, - {1, "net.mo", 2, false}, - {1, "org.mo", 2, false}, - {1, "edu.mo", 2, false}, - {1, "gov.mo", 2, false}, - {1, "mobi", 1, false}, - {1, "mp", 1, false}, - {1, "mq", 1, false}, - {1, "mr", 1, false}, - {1, "gov.mr", 2, false}, - {1, "ms", 1, false}, - {1, "com.ms", 2, false}, - {1, "edu.ms", 2, false}, - {1, "gov.ms", 2, false}, - {1, "net.ms", 2, false}, - {1, "org.ms", 2, false}, - {1, "mt", 1, false}, - {1, "com.mt", 2, false}, - {1, "edu.mt", 2, false}, - {1, "net.mt", 2, false}, - {1, "org.mt", 2, false}, - {1, "mu", 1, false}, - {1, "com.mu", 2, false}, - {1, "net.mu", 2, false}, - {1, "org.mu", 2, false}, - {1, "gov.mu", 2, false}, - {1, "ac.mu", 2, false}, - {1, "co.mu", 2, false}, - {1, "or.mu", 2, false}, - {1, "museum", 1, false}, - {1, "academy.museum", 2, false}, - {1, "agriculture.museum", 2, false}, - {1, "air.museum", 2, false}, - {1, "airguard.museum", 2, false}, - {1, "alabama.museum", 2, false}, - {1, "alaska.museum", 2, false}, - {1, "amber.museum", 2, false}, - {1, "ambulance.museum", 2, false}, - {1, "american.museum", 2, false}, - {1, "americana.museum", 2, false}, - {1, "americanantiques.museum", 2, false}, - {1, "americanart.museum", 2, false}, - {1, "amsterdam.museum", 2, false}, - {1, "and.museum", 2, false}, - {1, "annefrank.museum", 2, false}, - {1, "anthro.museum", 2, false}, - {1, "anthropology.museum", 2, false}, - {1, "antiques.museum", 2, false}, - {1, "aquarium.museum", 2, false}, - {1, "arboretum.museum", 2, false}, - {1, "archaeological.museum", 2, false}, - {1, "archaeology.museum", 2, false}, - {1, "architecture.museum", 2, false}, - {1, "art.museum", 2, false}, - {1, "artanddesign.museum", 2, false}, - {1, "artcenter.museum", 2, false}, - {1, "artdeco.museum", 2, false}, - {1, "arteducation.museum", 2, false}, - {1, "artgallery.museum", 2, false}, - {1, "arts.museum", 2, false}, - {1, "artsandcrafts.museum", 2, false}, - {1, "asmatart.museum", 2, false}, - {1, "assassination.museum", 2, false}, - {1, "assisi.museum", 2, false}, - {1, "association.museum", 2, false}, - {1, "astronomy.museum", 2, false}, - {1, "atlanta.museum", 2, false}, - {1, "austin.museum", 2, false}, - {1, "australia.museum", 2, false}, - {1, "automotive.museum", 2, false}, - {1, "aviation.museum", 2, false}, - {1, "axis.museum", 2, false}, - {1, "badajoz.museum", 2, false}, - {1, "baghdad.museum", 2, false}, - {1, "bahn.museum", 2, false}, - {1, "bale.museum", 2, false}, - {1, "baltimore.museum", 2, false}, - {1, "barcelona.museum", 2, false}, - {1, "baseball.museum", 2, false}, - {1, "basel.museum", 2, false}, - {1, "baths.museum", 2, false}, - {1, "bauern.museum", 2, false}, - {1, "beauxarts.museum", 2, false}, - {1, "beeldengeluid.museum", 2, false}, - {1, "bellevue.museum", 2, false}, - {1, "bergbau.museum", 2, false}, - {1, "berkeley.museum", 2, false}, - {1, "berlin.museum", 2, false}, - {1, "bern.museum", 2, false}, - {1, "bible.museum", 2, false}, - {1, "bilbao.museum", 2, false}, - {1, "bill.museum", 2, false}, - {1, "birdart.museum", 2, false}, - {1, "birthplace.museum", 2, false}, - {1, "bonn.museum", 2, false}, - {1, "boston.museum", 2, false}, - {1, "botanical.museum", 2, false}, - {1, "botanicalgarden.museum", 2, false}, - {1, "botanicgarden.museum", 2, false}, - {1, "botany.museum", 2, false}, - {1, "brandywinevalley.museum", 2, false}, - {1, "brasil.museum", 2, false}, - {1, "bristol.museum", 2, false}, - {1, "british.museum", 2, false}, - {1, "britishcolumbia.museum", 2, false}, - {1, "broadcast.museum", 2, false}, - {1, "brunel.museum", 2, false}, - {1, "brussel.museum", 2, false}, - {1, "brussels.museum", 2, false}, - {1, "bruxelles.museum", 2, false}, - {1, "building.museum", 2, false}, - {1, "burghof.museum", 2, false}, - {1, "bus.museum", 2, false}, - {1, "bushey.museum", 2, false}, - {1, "cadaques.museum", 2, false}, - {1, "california.museum", 2, false}, - {1, "cambridge.museum", 2, false}, - {1, "can.museum", 2, false}, - {1, "canada.museum", 2, false}, - {1, "capebreton.museum", 2, false}, - {1, "carrier.museum", 2, false}, - {1, "cartoonart.museum", 2, false}, - {1, "casadelamoneda.museum", 2, false}, - {1, "castle.museum", 2, false}, - {1, "castres.museum", 2, false}, - {1, "celtic.museum", 2, false}, - {1, "center.museum", 2, false}, - {1, "chattanooga.museum", 2, false}, - {1, "cheltenham.museum", 2, false}, - {1, "chesapeakebay.museum", 2, false}, - {1, "chicago.museum", 2, false}, - {1, "children.museum", 2, false}, - {1, "childrens.museum", 2, false}, - {1, "childrensgarden.museum", 2, false}, - {1, "chiropractic.museum", 2, false}, - {1, "chocolate.museum", 2, false}, - {1, "christiansburg.museum", 2, false}, - {1, "cincinnati.museum", 2, false}, - {1, "cinema.museum", 2, false}, - {1, "circus.museum", 2, false}, - {1, "civilisation.museum", 2, false}, - {1, "civilization.museum", 2, false}, - {1, "civilwar.museum", 2, false}, - {1, "clinton.museum", 2, false}, - {1, "clock.museum", 2, false}, - {1, "coal.museum", 2, false}, - {1, "coastaldefence.museum", 2, false}, - {1, "cody.museum", 2, false}, - {1, "coldwar.museum", 2, false}, - {1, "collection.museum", 2, false}, - {1, "colonialwilliamsburg.museum", 2, false}, - {1, "coloradoplateau.museum", 2, false}, - {1, "columbia.museum", 2, false}, - {1, "columbus.museum", 2, false}, - {1, "communication.museum", 2, false}, - {1, "communications.museum", 2, false}, - {1, "community.museum", 2, false}, - {1, "computer.museum", 2, false}, - {1, "computerhistory.museum", 2, false}, - {1, "xn--comunicaes-v6a2o.museum", 2, false}, - {1, "contemporary.museum", 2, false}, - {1, "contemporaryart.museum", 2, false}, - {1, "convent.museum", 2, false}, - {1, "copenhagen.museum", 2, false}, - {1, "corporation.museum", 2, false}, - {1, "xn--correios-e-telecomunicaes-ghc29a.museum", 2, false}, - {1, "corvette.museum", 2, false}, - {1, "costume.museum", 2, false}, - {1, "countryestate.museum", 2, false}, - {1, "county.museum", 2, false}, - {1, "crafts.museum", 2, false}, - {1, "cranbrook.museum", 2, false}, - {1, "creation.museum", 2, false}, - {1, "cultural.museum", 2, false}, - {1, "culturalcenter.museum", 2, false}, - {1, "culture.museum", 2, false}, - {1, "cyber.museum", 2, false}, - {1, "cymru.museum", 2, false}, - {1, "dali.museum", 2, false}, - {1, "dallas.museum", 2, false}, - {1, "database.museum", 2, false}, - {1, "ddr.museum", 2, false}, - {1, "decorativearts.museum", 2, false}, - {1, "delaware.museum", 2, false}, - {1, "delmenhorst.museum", 2, false}, - {1, "denmark.museum", 2, false}, - {1, "depot.museum", 2, false}, - {1, "design.museum", 2, false}, - {1, "detroit.museum", 2, false}, - {1, "dinosaur.museum", 2, false}, - {1, "discovery.museum", 2, false}, - {1, "dolls.museum", 2, false}, - {1, "donostia.museum", 2, false}, - {1, "durham.museum", 2, false}, - {1, "eastafrica.museum", 2, false}, - {1, "eastcoast.museum", 2, false}, - {1, "education.museum", 2, false}, - {1, "educational.museum", 2, false}, - {1, "egyptian.museum", 2, false}, - {1, "eisenbahn.museum", 2, false}, - {1, "elburg.museum", 2, false}, - {1, "elvendrell.museum", 2, false}, - {1, "embroidery.museum", 2, false}, - {1, "encyclopedic.museum", 2, false}, - {1, "england.museum", 2, false}, - {1, "entomology.museum", 2, false}, - {1, "environment.museum", 2, false}, - {1, "environmentalconservation.museum", 2, false}, - {1, "epilepsy.museum", 2, false}, - {1, "essex.museum", 2, false}, - {1, "estate.museum", 2, false}, - {1, "ethnology.museum", 2, false}, - {1, "exeter.museum", 2, false}, - {1, "exhibition.museum", 2, false}, - {1, "family.museum", 2, false}, - {1, "farm.museum", 2, false}, - {1, "farmequipment.museum", 2, false}, - {1, "farmers.museum", 2, false}, - {1, "farmstead.museum", 2, false}, - {1, "field.museum", 2, false}, - {1, "figueres.museum", 2, false}, - {1, "filatelia.museum", 2, false}, - {1, "film.museum", 2, false}, - {1, "fineart.museum", 2, false}, - {1, "finearts.museum", 2, false}, - {1, "finland.museum", 2, false}, - {1, "flanders.museum", 2, false}, - {1, "florida.museum", 2, false}, - {1, "force.museum", 2, false}, - {1, "fortmissoula.museum", 2, false}, - {1, "fortworth.museum", 2, false}, - {1, "foundation.museum", 2, false}, - {1, "francaise.museum", 2, false}, - {1, "frankfurt.museum", 2, false}, - {1, "franziskaner.museum", 2, false}, - {1, "freemasonry.museum", 2, false}, - {1, "freiburg.museum", 2, false}, - {1, "fribourg.museum", 2, false}, - {1, "frog.museum", 2, false}, - {1, "fundacio.museum", 2, false}, - {1, "furniture.museum", 2, false}, - {1, "gallery.museum", 2, false}, - {1, "garden.museum", 2, false}, - {1, "gateway.museum", 2, false}, - {1, "geelvinck.museum", 2, false}, - {1, "gemological.museum", 2, false}, - {1, "geology.museum", 2, false}, - {1, "georgia.museum", 2, false}, - {1, "giessen.museum", 2, false}, - {1, "glas.museum", 2, false}, - {1, "glass.museum", 2, false}, - {1, "gorge.museum", 2, false}, - {1, "grandrapids.museum", 2, false}, - {1, "graz.museum", 2, false}, - {1, "guernsey.museum", 2, false}, - {1, "halloffame.museum", 2, false}, - {1, "hamburg.museum", 2, false}, - {1, "handson.museum", 2, false}, - {1, "harvestcelebration.museum", 2, false}, - {1, "hawaii.museum", 2, false}, - {1, "health.museum", 2, false}, - {1, "heimatunduhren.museum", 2, false}, - {1, "hellas.museum", 2, false}, - {1, "helsinki.museum", 2, false}, - {1, "hembygdsforbund.museum", 2, false}, - {1, "heritage.museum", 2, false}, - {1, "histoire.museum", 2, false}, - {1, "historical.museum", 2, false}, - {1, "historicalsociety.museum", 2, false}, - {1, "historichouses.museum", 2, false}, - {1, "historisch.museum", 2, false}, - {1, "historisches.museum", 2, false}, - {1, "history.museum", 2, false}, - {1, "historyofscience.museum", 2, false}, - {1, "horology.museum", 2, false}, - {1, "house.museum", 2, false}, - {1, "humanities.museum", 2, false}, - {1, "illustration.museum", 2, false}, - {1, "imageandsound.museum", 2, false}, - {1, "indian.museum", 2, false}, - {1, "indiana.museum", 2, false}, - {1, "indianapolis.museum", 2, false}, - {1, "indianmarket.museum", 2, false}, - {1, "intelligence.museum", 2, false}, - {1, "interactive.museum", 2, false}, - {1, "iraq.museum", 2, false}, - {1, "iron.museum", 2, false}, - {1, "isleofman.museum", 2, false}, - {1, "jamison.museum", 2, false}, - {1, "jefferson.museum", 2, false}, - {1, "jerusalem.museum", 2, false}, - {1, "jewelry.museum", 2, false}, - {1, "jewish.museum", 2, false}, - {1, "jewishart.museum", 2, false}, - {1, "jfk.museum", 2, false}, - {1, "journalism.museum", 2, false}, - {1, "judaica.museum", 2, false}, - {1, "judygarland.museum", 2, false}, - {1, "juedisches.museum", 2, false}, - {1, "juif.museum", 2, false}, - {1, "karate.museum", 2, false}, - {1, "karikatur.museum", 2, false}, - {1, "kids.museum", 2, false}, - {1, "koebenhavn.museum", 2, false}, - {1, "koeln.museum", 2, false}, - {1, "kunst.museum", 2, false}, - {1, "kunstsammlung.museum", 2, false}, - {1, "kunstunddesign.museum", 2, false}, - {1, "labor.museum", 2, false}, - {1, "labour.museum", 2, false}, - {1, "lajolla.museum", 2, false}, - {1, "lancashire.museum", 2, false}, - {1, "landes.museum", 2, false}, - {1, "lans.museum", 2, false}, - {1, "xn--lns-qla.museum", 2, false}, - {1, "larsson.museum", 2, false}, - {1, "lewismiller.museum", 2, false}, - {1, "lincoln.museum", 2, false}, - {1, "linz.museum", 2, false}, - {1, "living.museum", 2, false}, - {1, "livinghistory.museum", 2, false}, - {1, "localhistory.museum", 2, false}, - {1, "london.museum", 2, false}, - {1, "losangeles.museum", 2, false}, - {1, "louvre.museum", 2, false}, - {1, "loyalist.museum", 2, false}, - {1, "lucerne.museum", 2, false}, - {1, "luxembourg.museum", 2, false}, - {1, "luzern.museum", 2, false}, - {1, "mad.museum", 2, false}, - {1, "madrid.museum", 2, false}, - {1, "mallorca.museum", 2, false}, - {1, "manchester.museum", 2, false}, - {1, "mansion.museum", 2, false}, - {1, "mansions.museum", 2, false}, - {1, "manx.museum", 2, false}, - {1, "marburg.museum", 2, false}, - {1, "maritime.museum", 2, false}, - {1, "maritimo.museum", 2, false}, - {1, "maryland.museum", 2, false}, - {1, "marylhurst.museum", 2, false}, - {1, "media.museum", 2, false}, - {1, "medical.museum", 2, false}, - {1, "medizinhistorisches.museum", 2, false}, - {1, "meeres.museum", 2, false}, - {1, "memorial.museum", 2, false}, - {1, "mesaverde.museum", 2, false}, - {1, "michigan.museum", 2, false}, - {1, "midatlantic.museum", 2, false}, - {1, "military.museum", 2, false}, - {1, "mill.museum", 2, false}, - {1, "miners.museum", 2, false}, - {1, "mining.museum", 2, false}, - {1, "minnesota.museum", 2, false}, - {1, "missile.museum", 2, false}, - {1, "missoula.museum", 2, false}, - {1, "modern.museum", 2, false}, - {1, "moma.museum", 2, false}, - {1, "money.museum", 2, false}, - {1, "monmouth.museum", 2, false}, - {1, "monticello.museum", 2, false}, - {1, "montreal.museum", 2, false}, - {1, "moscow.museum", 2, false}, - {1, "motorcycle.museum", 2, false}, - {1, "muenchen.museum", 2, false}, - {1, "muenster.museum", 2, false}, - {1, "mulhouse.museum", 2, false}, - {1, "muncie.museum", 2, false}, - {1, "museet.museum", 2, false}, - {1, "museumcenter.museum", 2, false}, - {1, "museumvereniging.museum", 2, false}, - {1, "music.museum", 2, false}, - {1, "national.museum", 2, false}, - {1, "nationalfirearms.museum", 2, false}, - {1, "nationalheritage.museum", 2, false}, - {1, "nativeamerican.museum", 2, false}, - {1, "naturalhistory.museum", 2, false}, - {1, "naturalhistorymuseum.museum", 2, false}, - {1, "naturalsciences.museum", 2, false}, - {1, "nature.museum", 2, false}, - {1, "naturhistorisches.museum", 2, false}, - {1, "natuurwetenschappen.museum", 2, false}, - {1, "naumburg.museum", 2, false}, - {1, "naval.museum", 2, false}, - {1, "nebraska.museum", 2, false}, - {1, "neues.museum", 2, false}, - {1, "newhampshire.museum", 2, false}, - {1, "newjersey.museum", 2, false}, - {1, "newmexico.museum", 2, false}, - {1, "newport.museum", 2, false}, - {1, "newspaper.museum", 2, false}, - {1, "newyork.museum", 2, false}, - {1, "niepce.museum", 2, false}, - {1, "norfolk.museum", 2, false}, - {1, "north.museum", 2, false}, - {1, "nrw.museum", 2, false}, - {1, "nyc.museum", 2, false}, - {1, "nyny.museum", 2, false}, - {1, "oceanographic.museum", 2, false}, - {1, "oceanographique.museum", 2, false}, - {1, "omaha.museum", 2, false}, - {1, "online.museum", 2, false}, - {1, "ontario.museum", 2, false}, - {1, "openair.museum", 2, false}, - {1, "oregon.museum", 2, false}, - {1, "oregontrail.museum", 2, false}, - {1, "otago.museum", 2, false}, - {1, "oxford.museum", 2, false}, - {1, "pacific.museum", 2, false}, - {1, "paderborn.museum", 2, false}, - {1, "palace.museum", 2, false}, - {1, "paleo.museum", 2, false}, - {1, "palmsprings.museum", 2, false}, - {1, "panama.museum", 2, false}, - {1, "paris.museum", 2, false}, - {1, "pasadena.museum", 2, false}, - {1, "pharmacy.museum", 2, false}, - {1, "philadelphia.museum", 2, false}, - {1, "philadelphiaarea.museum", 2, false}, - {1, "philately.museum", 2, false}, - {1, "phoenix.museum", 2, false}, - {1, "photography.museum", 2, false}, - {1, "pilots.museum", 2, false}, - {1, "pittsburgh.museum", 2, false}, - {1, "planetarium.museum", 2, false}, - {1, "plantation.museum", 2, false}, - {1, "plants.museum", 2, false}, - {1, "plaza.museum", 2, false}, - {1, "portal.museum", 2, false}, - {1, "portland.museum", 2, false}, - {1, "portlligat.museum", 2, false}, - {1, "posts-and-telecommunications.museum", 2, false}, - {1, "preservation.museum", 2, false}, - {1, "presidio.museum", 2, false}, - {1, "press.museum", 2, false}, - {1, "project.museum", 2, false}, - {1, "public.museum", 2, false}, - {1, "pubol.museum", 2, false}, - {1, "quebec.museum", 2, false}, - {1, "railroad.museum", 2, false}, - {1, "railway.museum", 2, false}, - {1, "research.museum", 2, false}, - {1, "resistance.museum", 2, false}, - {1, "riodejaneiro.museum", 2, false}, - {1, "rochester.museum", 2, false}, - {1, "rockart.museum", 2, false}, - {1, "roma.museum", 2, false}, - {1, "russia.museum", 2, false}, - {1, "saintlouis.museum", 2, false}, - {1, "salem.museum", 2, false}, - {1, "salvadordali.museum", 2, false}, - {1, "salzburg.museum", 2, false}, - {1, "sandiego.museum", 2, false}, - {1, "sanfrancisco.museum", 2, false}, - {1, "santabarbara.museum", 2, false}, - {1, "santacruz.museum", 2, false}, - {1, "santafe.museum", 2, false}, - {1, "saskatchewan.museum", 2, false}, - {1, "satx.museum", 2, false}, - {1, "savannahga.museum", 2, false}, - {1, "schlesisches.museum", 2, false}, - {1, "schoenbrunn.museum", 2, false}, - {1, "schokoladen.museum", 2, false}, - {1, "school.museum", 2, false}, - {1, "schweiz.museum", 2, false}, - {1, "science.museum", 2, false}, - {1, "scienceandhistory.museum", 2, false}, - {1, "scienceandindustry.museum", 2, false}, - {1, "sciencecenter.museum", 2, false}, - {1, "sciencecenters.museum", 2, false}, - {1, "science-fiction.museum", 2, false}, - {1, "sciencehistory.museum", 2, false}, - {1, "sciences.museum", 2, false}, - {1, "sciencesnaturelles.museum", 2, false}, - {1, "scotland.museum", 2, false}, - {1, "seaport.museum", 2, false}, - {1, "settlement.museum", 2, false}, - {1, "settlers.museum", 2, false}, - {1, "shell.museum", 2, false}, - {1, "sherbrooke.museum", 2, false}, - {1, "sibenik.museum", 2, false}, - {1, "silk.museum", 2, false}, - {1, "ski.museum", 2, false}, - {1, "skole.museum", 2, false}, - {1, "society.museum", 2, false}, - {1, "sologne.museum", 2, false}, - {1, "soundandvision.museum", 2, false}, - {1, "southcarolina.museum", 2, false}, - {1, "southwest.museum", 2, false}, - {1, "space.museum", 2, false}, - {1, "spy.museum", 2, false}, - {1, "square.museum", 2, false}, - {1, "stadt.museum", 2, false}, - {1, "stalbans.museum", 2, false}, - {1, "starnberg.museum", 2, false}, - {1, "state.museum", 2, false}, - {1, "stateofdelaware.museum", 2, false}, - {1, "station.museum", 2, false}, - {1, "steam.museum", 2, false}, - {1, "steiermark.museum", 2, false}, - {1, "stjohn.museum", 2, false}, - {1, "stockholm.museum", 2, false}, - {1, "stpetersburg.museum", 2, false}, - {1, "stuttgart.museum", 2, false}, - {1, "suisse.museum", 2, false}, - {1, "surgeonshall.museum", 2, false}, - {1, "surrey.museum", 2, false}, - {1, "svizzera.museum", 2, false}, - {1, "sweden.museum", 2, false}, - {1, "sydney.museum", 2, false}, - {1, "tank.museum", 2, false}, - {1, "tcm.museum", 2, false}, - {1, "technology.museum", 2, false}, - {1, "telekommunikation.museum", 2, false}, - {1, "television.museum", 2, false}, - {1, "texas.museum", 2, false}, - {1, "textile.museum", 2, false}, - {1, "theater.museum", 2, false}, - {1, "time.museum", 2, false}, - {1, "timekeeping.museum", 2, false}, - {1, "topology.museum", 2, false}, - {1, "torino.museum", 2, false}, - {1, "touch.museum", 2, false}, - {1, "town.museum", 2, false}, - {1, "transport.museum", 2, false}, - {1, "tree.museum", 2, false}, - {1, "trolley.museum", 2, false}, - {1, "trust.museum", 2, false}, - {1, "trustee.museum", 2, false}, - {1, "uhren.museum", 2, false}, - {1, "ulm.museum", 2, false}, - {1, "undersea.museum", 2, false}, - {1, "university.museum", 2, false}, - {1, "usa.museum", 2, false}, - {1, "usantiques.museum", 2, false}, - {1, "usarts.museum", 2, false}, - {1, "uscountryestate.museum", 2, false}, - {1, "usculture.museum", 2, false}, - {1, "usdecorativearts.museum", 2, false}, - {1, "usgarden.museum", 2, false}, - {1, "ushistory.museum", 2, false}, - {1, "ushuaia.museum", 2, false}, - {1, "uslivinghistory.museum", 2, false}, - {1, "utah.museum", 2, false}, - {1, "uvic.museum", 2, false}, - {1, "valley.museum", 2, false}, - {1, "vantaa.museum", 2, false}, - {1, "versailles.museum", 2, false}, - {1, "viking.museum", 2, false}, - {1, "village.museum", 2, false}, - {1, "virginia.museum", 2, false}, - {1, "virtual.museum", 2, false}, - {1, "virtuel.museum", 2, false}, - {1, "vlaanderen.museum", 2, false}, - {1, "volkenkunde.museum", 2, false}, - {1, "wales.museum", 2, false}, - {1, "wallonie.museum", 2, false}, - {1, "war.museum", 2, false}, - {1, "washingtondc.museum", 2, false}, - {1, "watchandclock.museum", 2, false}, - {1, "watch-and-clock.museum", 2, false}, - {1, "western.museum", 2, false}, - {1, "westfalen.museum", 2, false}, - {1, "whaling.museum", 2, false}, - {1, "wildlife.museum", 2, false}, - {1, "williamsburg.museum", 2, false}, - {1, "windmill.museum", 2, false}, - {1, "workshop.museum", 2, false}, - {1, "york.museum", 2, false}, - {1, "yorkshire.museum", 2, false}, - {1, "yosemite.museum", 2, false}, - {1, "youth.museum", 2, false}, - {1, "zoological.museum", 2, false}, - {1, "zoology.museum", 2, false}, - {1, "xn--9dbhblg6di.museum", 2, false}, - {1, "xn--h1aegh.museum", 2, false}, - {1, "mv", 1, false}, - {1, "aero.mv", 2, false}, - {1, "biz.mv", 2, false}, - {1, "com.mv", 2, false}, - {1, "coop.mv", 2, false}, - {1, "edu.mv", 2, false}, - {1, "gov.mv", 2, false}, - {1, "info.mv", 2, false}, - {1, "int.mv", 2, false}, - {1, "mil.mv", 2, false}, - {1, "museum.mv", 2, false}, - {1, "name.mv", 2, false}, - {1, "net.mv", 2, false}, - {1, "org.mv", 2, false}, - {1, "pro.mv", 2, false}, - {1, "mw", 1, false}, - {1, "ac.mw", 2, false}, - {1, "biz.mw", 2, false}, - {1, "co.mw", 2, false}, - {1, "com.mw", 2, false}, - {1, "coop.mw", 2, false}, - {1, "edu.mw", 2, false}, - {1, "gov.mw", 2, false}, - {1, "int.mw", 2, false}, - {1, "museum.mw", 2, false}, - {1, "net.mw", 2, false}, - {1, "org.mw", 2, false}, - {1, "mx", 1, false}, - {1, "com.mx", 2, false}, - {1, "org.mx", 2, false}, - {1, "gob.mx", 2, false}, - {1, "edu.mx", 2, false}, - {1, "net.mx", 2, false}, - {1, "my", 1, false}, - {1, "com.my", 2, false}, - {1, "net.my", 2, false}, - {1, "org.my", 2, false}, - {1, "gov.my", 2, false}, - {1, "edu.my", 2, false}, - {1, "mil.my", 2, false}, - {1, "name.my", 2, false}, - {1, "mz", 1, false}, - {1, "ac.mz", 2, false}, - {1, "adv.mz", 2, false}, - {1, "co.mz", 2, false}, - {1, "edu.mz", 2, false}, - {1, "gov.mz", 2, false}, - {1, "mil.mz", 2, false}, - {1, "net.mz", 2, false}, - {1, "org.mz", 2, false}, - {1, "na", 1, false}, - {1, "info.na", 2, false}, - {1, "pro.na", 2, false}, - {1, "name.na", 2, false}, - {1, "school.na", 2, false}, - {1, "or.na", 2, false}, - {1, "dr.na", 2, false}, - {1, "us.na", 2, false}, - {1, "mx.na", 2, false}, - {1, "ca.na", 2, false}, - {1, "in.na", 2, false}, - {1, "cc.na", 2, false}, - {1, "tv.na", 2, false}, - {1, "ws.na", 2, false}, - {1, "mobi.na", 2, false}, - {1, "co.na", 2, false}, - {1, "com.na", 2, false}, - {1, "org.na", 2, false}, - {1, "name", 1, false}, - {1, "nc", 1, false}, - {1, "asso.nc", 2, false}, - {1, "nom.nc", 2, false}, - {1, "ne", 1, false}, - {1, "net", 1, false}, - {1, "nf", 1, false}, - {1, "com.nf", 2, false}, - {1, "net.nf", 2, false}, - {1, "per.nf", 2, false}, - {1, "rec.nf", 2, false}, - {1, "web.nf", 2, false}, - {1, "arts.nf", 2, false}, - {1, "firm.nf", 2, false}, - {1, "info.nf", 2, false}, - {1, "other.nf", 2, false}, - {1, "store.nf", 2, false}, - {1, "ng", 1, false}, - {1, "com.ng", 2, false}, - {1, "edu.ng", 2, false}, - {1, "gov.ng", 2, false}, - {1, "i.ng", 2, false}, - {1, "mil.ng", 2, false}, - {1, "mobi.ng", 2, false}, - {1, "name.ng", 2, false}, - {1, "net.ng", 2, false}, - {1, "org.ng", 2, false}, - {1, "sch.ng", 2, false}, - {1, "ni", 1, false}, - {1, "ac.ni", 2, false}, - {1, "biz.ni", 2, false}, - {1, "co.ni", 2, false}, - {1, "com.ni", 2, false}, - {1, "edu.ni", 2, false}, - {1, "gob.ni", 2, false}, - {1, "in.ni", 2, false}, - {1, "info.ni", 2, false}, - {1, "int.ni", 2, false}, - {1, "mil.ni", 2, false}, - {1, "net.ni", 2, false}, - {1, "nom.ni", 2, false}, - {1, "org.ni", 2, false}, - {1, "web.ni", 2, false}, - {1, "nl", 1, false}, - {1, "no", 1, false}, - {1, "fhs.no", 2, false}, - {1, "vgs.no", 2, false}, - {1, "fylkesbibl.no", 2, false}, - {1, "folkebibl.no", 2, false}, - {1, "museum.no", 2, false}, - {1, "idrett.no", 2, false}, - {1, "priv.no", 2, false}, - {1, "mil.no", 2, false}, - {1, "stat.no", 2, false}, - {1, "dep.no", 2, false}, - {1, "kommune.no", 2, false}, - {1, "herad.no", 2, false}, - {1, "aa.no", 2, false}, - {1, "ah.no", 2, false}, - {1, "bu.no", 2, false}, - {1, "fm.no", 2, false}, - {1, "hl.no", 2, false}, - {1, "hm.no", 2, false}, - {1, "jan-mayen.no", 2, false}, - {1, "mr.no", 2, false}, - {1, "nl.no", 2, false}, - {1, "nt.no", 2, false}, - {1, "of.no", 2, false}, - {1, "ol.no", 2, false}, - {1, "oslo.no", 2, false}, - {1, "rl.no", 2, false}, - {1, "sf.no", 2, false}, - {1, "st.no", 2, false}, - {1, "svalbard.no", 2, false}, - {1, "tm.no", 2, false}, - {1, "tr.no", 2, false}, - {1, "va.no", 2, false}, - {1, "vf.no", 2, false}, - {1, "gs.aa.no", 3, false}, - {1, "gs.ah.no", 3, false}, - {1, "gs.bu.no", 3, false}, - {1, "gs.fm.no", 3, false}, - {1, "gs.hl.no", 3, false}, - {1, "gs.hm.no", 3, false}, - {1, "gs.jan-mayen.no", 3, false}, - {1, "gs.mr.no", 3, false}, - {1, "gs.nl.no", 3, false}, - {1, "gs.nt.no", 3, false}, - {1, "gs.of.no", 3, false}, - {1, "gs.ol.no", 3, false}, - {1, "gs.oslo.no", 3, false}, - {1, "gs.rl.no", 3, false}, - {1, "gs.sf.no", 3, false}, - {1, "gs.st.no", 3, false}, - {1, "gs.svalbard.no", 3, false}, - {1, "gs.tm.no", 3, false}, - {1, "gs.tr.no", 3, false}, - {1, "gs.va.no", 3, false}, - {1, "gs.vf.no", 3, false}, - {1, "akrehamn.no", 2, false}, - {1, "xn--krehamn-dxa.no", 2, false}, - {1, "algard.no", 2, false}, - {1, "xn--lgrd-poac.no", 2, false}, - {1, "arna.no", 2, false}, - {1, "brumunddal.no", 2, false}, - {1, "bryne.no", 2, false}, - {1, "bronnoysund.no", 2, false}, - {1, "xn--brnnysund-m8ac.no", 2, false}, - {1, "drobak.no", 2, false}, - {1, "xn--drbak-wua.no", 2, false}, - {1, "egersund.no", 2, false}, - {1, "fetsund.no", 2, false}, - {1, "floro.no", 2, false}, - {1, "xn--flor-jra.no", 2, false}, - {1, "fredrikstad.no", 2, false}, - {1, "hokksund.no", 2, false}, - {1, "honefoss.no", 2, false}, - {1, "xn--hnefoss-q1a.no", 2, false}, - {1, "jessheim.no", 2, false}, - {1, "jorpeland.no", 2, false}, - {1, "xn--jrpeland-54a.no", 2, false}, - {1, "kirkenes.no", 2, false}, - {1, "kopervik.no", 2, false}, - {1, "krokstadelva.no", 2, false}, - {1, "langevag.no", 2, false}, - {1, "xn--langevg-jxa.no", 2, false}, - {1, "leirvik.no", 2, false}, - {1, "mjondalen.no", 2, false}, - {1, "xn--mjndalen-64a.no", 2, false}, - {1, "mo-i-rana.no", 2, false}, - {1, "mosjoen.no", 2, false}, - {1, "xn--mosjen-eya.no", 2, false}, - {1, "nesoddtangen.no", 2, false}, - {1, "orkanger.no", 2, false}, - {1, "osoyro.no", 2, false}, - {1, "xn--osyro-wua.no", 2, false}, - {1, "raholt.no", 2, false}, - {1, "xn--rholt-mra.no", 2, false}, - {1, "sandnessjoen.no", 2, false}, - {1, "xn--sandnessjen-ogb.no", 2, false}, - {1, "skedsmokorset.no", 2, false}, - {1, "slattum.no", 2, false}, - {1, "spjelkavik.no", 2, false}, - {1, "stathelle.no", 2, false}, - {1, "stavern.no", 2, false}, - {1, "stjordalshalsen.no", 2, false}, - {1, "xn--stjrdalshalsen-sqb.no", 2, false}, - {1, "tananger.no", 2, false}, - {1, "tranby.no", 2, false}, - {1, "vossevangen.no", 2, false}, - {1, "afjord.no", 2, false}, - {1, "xn--fjord-lra.no", 2, false}, - {1, "agdenes.no", 2, false}, - {1, "al.no", 2, false}, - {1, "xn--l-1fa.no", 2, false}, - {1, "alesund.no", 2, false}, - {1, "xn--lesund-hua.no", 2, false}, - {1, "alstahaug.no", 2, false}, - {1, "alta.no", 2, false}, - {1, "xn--lt-liac.no", 2, false}, - {1, "alaheadju.no", 2, false}, - {1, "xn--laheadju-7ya.no", 2, false}, - {1, "alvdal.no", 2, false}, - {1, "amli.no", 2, false}, - {1, "xn--mli-tla.no", 2, false}, - {1, "amot.no", 2, false}, - {1, "xn--mot-tla.no", 2, false}, - {1, "andebu.no", 2, false}, - {1, "andoy.no", 2, false}, - {1, "xn--andy-ira.no", 2, false}, - {1, "andasuolo.no", 2, false}, - {1, "ardal.no", 2, false}, - {1, "xn--rdal-poa.no", 2, false}, - {1, "aremark.no", 2, false}, - {1, "arendal.no", 2, false}, - {1, "xn--s-1fa.no", 2, false}, - {1, "aseral.no", 2, false}, - {1, "xn--seral-lra.no", 2, false}, - {1, "asker.no", 2, false}, - {1, "askim.no", 2, false}, - {1, "askvoll.no", 2, false}, - {1, "askoy.no", 2, false}, - {1, "xn--asky-ira.no", 2, false}, - {1, "asnes.no", 2, false}, - {1, "xn--snes-poa.no", 2, false}, - {1, "audnedaln.no", 2, false}, - {1, "aukra.no", 2, false}, - {1, "aure.no", 2, false}, - {1, "aurland.no", 2, false}, - {1, "aurskog-holand.no", 2, false}, - {1, "xn--aurskog-hland-jnb.no", 2, false}, - {1, "austevoll.no", 2, false}, - {1, "austrheim.no", 2, false}, - {1, "averoy.no", 2, false}, - {1, "xn--avery-yua.no", 2, false}, - {1, "balestrand.no", 2, false}, - {1, "ballangen.no", 2, false}, - {1, "balat.no", 2, false}, - {1, "xn--blt-elab.no", 2, false}, - {1, "balsfjord.no", 2, false}, - {1, "bahccavuotna.no", 2, false}, - {1, "xn--bhccavuotna-k7a.no", 2, false}, - {1, "bamble.no", 2, false}, - {1, "bardu.no", 2, false}, - {1, "beardu.no", 2, false}, - {1, "beiarn.no", 2, false}, - {1, "bajddar.no", 2, false}, - {1, "xn--bjddar-pta.no", 2, false}, - {1, "baidar.no", 2, false}, - {1, "xn--bidr-5nac.no", 2, false}, - {1, "berg.no", 2, false}, - {1, "bergen.no", 2, false}, - {1, "berlevag.no", 2, false}, - {1, "xn--berlevg-jxa.no", 2, false}, - {1, "bearalvahki.no", 2, false}, - {1, "xn--bearalvhki-y4a.no", 2, false}, - {1, "bindal.no", 2, false}, - {1, "birkenes.no", 2, false}, - {1, "bjarkoy.no", 2, false}, - {1, "xn--bjarky-fya.no", 2, false}, - {1, "bjerkreim.no", 2, false}, - {1, "bjugn.no", 2, false}, - {1, "bodo.no", 2, false}, - {1, "xn--bod-2na.no", 2, false}, - {1, "badaddja.no", 2, false}, - {1, "xn--bdddj-mrabd.no", 2, false}, - {1, "budejju.no", 2, false}, - {1, "bokn.no", 2, false}, - {1, "bremanger.no", 2, false}, - {1, "bronnoy.no", 2, false}, - {1, "xn--brnny-wuac.no", 2, false}, - {1, "bygland.no", 2, false}, - {1, "bykle.no", 2, false}, - {1, "barum.no", 2, false}, - {1, "xn--brum-voa.no", 2, false}, - {1, "bo.telemark.no", 3, false}, - {1, "xn--b-5ga.telemark.no", 3, false}, - {1, "bo.nordland.no", 3, false}, - {1, "xn--b-5ga.nordland.no", 3, false}, - {1, "bievat.no", 2, false}, - {1, "xn--bievt-0qa.no", 2, false}, - {1, "bomlo.no", 2, false}, - {1, "xn--bmlo-gra.no", 2, false}, - {1, "batsfjord.no", 2, false}, - {1, "xn--btsfjord-9za.no", 2, false}, - {1, "bahcavuotna.no", 2, false}, - {1, "xn--bhcavuotna-s4a.no", 2, false}, - {1, "dovre.no", 2, false}, - {1, "drammen.no", 2, false}, - {1, "drangedal.no", 2, false}, - {1, "dyroy.no", 2, false}, - {1, "xn--dyry-ira.no", 2, false}, - {1, "donna.no", 2, false}, - {1, "xn--dnna-gra.no", 2, false}, - {1, "eid.no", 2, false}, - {1, "eidfjord.no", 2, false}, - {1, "eidsberg.no", 2, false}, - {1, "eidskog.no", 2, false}, - {1, "eidsvoll.no", 2, false}, - {1, "eigersund.no", 2, false}, - {1, "elverum.no", 2, false}, - {1, "enebakk.no", 2, false}, - {1, "engerdal.no", 2, false}, - {1, "etne.no", 2, false}, - {1, "etnedal.no", 2, false}, - {1, "evenes.no", 2, false}, - {1, "evenassi.no", 2, false}, - {1, "xn--eveni-0qa01ga.no", 2, false}, - {1, "evje-og-hornnes.no", 2, false}, - {1, "farsund.no", 2, false}, - {1, "fauske.no", 2, false}, - {1, "fuossko.no", 2, false}, - {1, "fuoisku.no", 2, false}, - {1, "fedje.no", 2, false}, - {1, "fet.no", 2, false}, - {1, "finnoy.no", 2, false}, - {1, "xn--finny-yua.no", 2, false}, - {1, "fitjar.no", 2, false}, - {1, "fjaler.no", 2, false}, - {1, "fjell.no", 2, false}, - {1, "flakstad.no", 2, false}, - {1, "flatanger.no", 2, false}, - {1, "flekkefjord.no", 2, false}, - {1, "flesberg.no", 2, false}, - {1, "flora.no", 2, false}, - {1, "fla.no", 2, false}, - {1, "xn--fl-zia.no", 2, false}, - {1, "folldal.no", 2, false}, - {1, "forsand.no", 2, false}, - {1, "fosnes.no", 2, false}, - {1, "frei.no", 2, false}, - {1, "frogn.no", 2, false}, - {1, "froland.no", 2, false}, - {1, "frosta.no", 2, false}, - {1, "frana.no", 2, false}, - {1, "xn--frna-woa.no", 2, false}, - {1, "froya.no", 2, false}, - {1, "xn--frya-hra.no", 2, false}, - {1, "fusa.no", 2, false}, - {1, "fyresdal.no", 2, false}, - {1, "forde.no", 2, false}, - {1, "xn--frde-gra.no", 2, false}, - {1, "gamvik.no", 2, false}, - {1, "gangaviika.no", 2, false}, - {1, "xn--ggaviika-8ya47h.no", 2, false}, - {1, "gaular.no", 2, false}, - {1, "gausdal.no", 2, false}, - {1, "gildeskal.no", 2, false}, - {1, "xn--gildeskl-g0a.no", 2, false}, - {1, "giske.no", 2, false}, - {1, "gjemnes.no", 2, false}, - {1, "gjerdrum.no", 2, false}, - {1, "gjerstad.no", 2, false}, - {1, "gjesdal.no", 2, false}, - {1, "gjovik.no", 2, false}, - {1, "xn--gjvik-wua.no", 2, false}, - {1, "gloppen.no", 2, false}, - {1, "gol.no", 2, false}, - {1, "gran.no", 2, false}, - {1, "grane.no", 2, false}, - {1, "granvin.no", 2, false}, - {1, "gratangen.no", 2, false}, - {1, "grimstad.no", 2, false}, - {1, "grong.no", 2, false}, - {1, "kraanghke.no", 2, false}, - {1, "xn--kranghke-b0a.no", 2, false}, - {1, "grue.no", 2, false}, - {1, "gulen.no", 2, false}, - {1, "hadsel.no", 2, false}, - {1, "halden.no", 2, false}, - {1, "halsa.no", 2, false}, - {1, "hamar.no", 2, false}, - {1, "hamaroy.no", 2, false}, - {1, "habmer.no", 2, false}, - {1, "xn--hbmer-xqa.no", 2, false}, - {1, "hapmir.no", 2, false}, - {1, "xn--hpmir-xqa.no", 2, false}, - {1, "hammerfest.no", 2, false}, - {1, "hammarfeasta.no", 2, false}, - {1, "xn--hmmrfeasta-s4ac.no", 2, false}, - {1, "haram.no", 2, false}, - {1, "hareid.no", 2, false}, - {1, "harstad.no", 2, false}, - {1, "hasvik.no", 2, false}, - {1, "aknoluokta.no", 2, false}, - {1, "xn--koluokta-7ya57h.no", 2, false}, - {1, "hattfjelldal.no", 2, false}, - {1, "aarborte.no", 2, false}, - {1, "haugesund.no", 2, false}, - {1, "hemne.no", 2, false}, - {1, "hemnes.no", 2, false}, - {1, "hemsedal.no", 2, false}, - {1, "heroy.more-og-romsdal.no", 3, false}, - {1, "xn--hery-ira.xn--mre-og-romsdal-qqb.no", 3, false}, - {1, "heroy.nordland.no", 3, false}, - {1, "xn--hery-ira.nordland.no", 3, false}, - {1, "hitra.no", 2, false}, - {1, "hjartdal.no", 2, false}, - {1, "hjelmeland.no", 2, false}, - {1, "hobol.no", 2, false}, - {1, "xn--hobl-ira.no", 2, false}, - {1, "hof.no", 2, false}, - {1, "hol.no", 2, false}, - {1, "hole.no", 2, false}, - {1, "holmestrand.no", 2, false}, - {1, "holtalen.no", 2, false}, - {1, "xn--holtlen-hxa.no", 2, false}, - {1, "hornindal.no", 2, false}, - {1, "horten.no", 2, false}, - {1, "hurdal.no", 2, false}, - {1, "hurum.no", 2, false}, - {1, "hvaler.no", 2, false}, - {1, "hyllestad.no", 2, false}, - {1, "hagebostad.no", 2, false}, - {1, "xn--hgebostad-g3a.no", 2, false}, - {1, "hoyanger.no", 2, false}, - {1, "xn--hyanger-q1a.no", 2, false}, - {1, "hoylandet.no", 2, false}, - {1, "xn--hylandet-54a.no", 2, false}, - {1, "ha.no", 2, false}, - {1, "xn--h-2fa.no", 2, false}, - {1, "ibestad.no", 2, false}, - {1, "inderoy.no", 2, false}, - {1, "xn--indery-fya.no", 2, false}, - {1, "iveland.no", 2, false}, - {1, "jevnaker.no", 2, false}, - {1, "jondal.no", 2, false}, - {1, "jolster.no", 2, false}, - {1, "xn--jlster-bya.no", 2, false}, - {1, "karasjok.no", 2, false}, - {1, "karasjohka.no", 2, false}, - {1, "xn--krjohka-hwab49j.no", 2, false}, - {1, "karlsoy.no", 2, false}, - {1, "galsa.no", 2, false}, - {1, "xn--gls-elac.no", 2, false}, - {1, "karmoy.no", 2, false}, - {1, "xn--karmy-yua.no", 2, false}, - {1, "kautokeino.no", 2, false}, - {1, "guovdageaidnu.no", 2, false}, - {1, "klepp.no", 2, false}, - {1, "klabu.no", 2, false}, - {1, "xn--klbu-woa.no", 2, false}, - {1, "kongsberg.no", 2, false}, - {1, "kongsvinger.no", 2, false}, - {1, "kragero.no", 2, false}, - {1, "xn--krager-gya.no", 2, false}, - {1, "kristiansand.no", 2, false}, - {1, "kristiansund.no", 2, false}, - {1, "krodsherad.no", 2, false}, - {1, "xn--krdsherad-m8a.no", 2, false}, - {1, "kvalsund.no", 2, false}, - {1, "rahkkeravju.no", 2, false}, - {1, "xn--rhkkervju-01af.no", 2, false}, - {1, "kvam.no", 2, false}, - {1, "kvinesdal.no", 2, false}, - {1, "kvinnherad.no", 2, false}, - {1, "kviteseid.no", 2, false}, - {1, "kvitsoy.no", 2, false}, - {1, "xn--kvitsy-fya.no", 2, false}, - {1, "kvafjord.no", 2, false}, - {1, "xn--kvfjord-nxa.no", 2, false}, - {1, "giehtavuoatna.no", 2, false}, - {1, "kvanangen.no", 2, false}, - {1, "xn--kvnangen-k0a.no", 2, false}, - {1, "navuotna.no", 2, false}, - {1, "xn--nvuotna-hwa.no", 2, false}, - {1, "kafjord.no", 2, false}, - {1, "xn--kfjord-iua.no", 2, false}, - {1, "gaivuotna.no", 2, false}, - {1, "xn--givuotna-8ya.no", 2, false}, - {1, "larvik.no", 2, false}, - {1, "lavangen.no", 2, false}, - {1, "lavagis.no", 2, false}, - {1, "loabat.no", 2, false}, - {1, "xn--loabt-0qa.no", 2, false}, - {1, "lebesby.no", 2, false}, - {1, "davvesiida.no", 2, false}, - {1, "leikanger.no", 2, false}, - {1, "leirfjord.no", 2, false}, - {1, "leka.no", 2, false}, - {1, "leksvik.no", 2, false}, - {1, "lenvik.no", 2, false}, - {1, "leangaviika.no", 2, false}, - {1, "xn--leagaviika-52b.no", 2, false}, - {1, "lesja.no", 2, false}, - {1, "levanger.no", 2, false}, - {1, "lier.no", 2, false}, - {1, "lierne.no", 2, false}, - {1, "lillehammer.no", 2, false}, - {1, "lillesand.no", 2, false}, - {1, "lindesnes.no", 2, false}, - {1, "lindas.no", 2, false}, - {1, "xn--linds-pra.no", 2, false}, - {1, "lom.no", 2, false}, - {1, "loppa.no", 2, false}, - {1, "lahppi.no", 2, false}, - {1, "xn--lhppi-xqa.no", 2, false}, - {1, "lund.no", 2, false}, - {1, "lunner.no", 2, false}, - {1, "luroy.no", 2, false}, - {1, "xn--lury-ira.no", 2, false}, - {1, "luster.no", 2, false}, - {1, "lyngdal.no", 2, false}, - {1, "lyngen.no", 2, false}, - {1, "ivgu.no", 2, false}, - {1, "lardal.no", 2, false}, - {1, "lerdal.no", 2, false}, - {1, "xn--lrdal-sra.no", 2, false}, - {1, "lodingen.no", 2, false}, - {1, "xn--ldingen-q1a.no", 2, false}, - {1, "lorenskog.no", 2, false}, - {1, "xn--lrenskog-54a.no", 2, false}, - {1, "loten.no", 2, false}, - {1, "xn--lten-gra.no", 2, false}, - {1, "malvik.no", 2, false}, - {1, "masoy.no", 2, false}, - {1, "xn--msy-ula0h.no", 2, false}, - {1, "muosat.no", 2, false}, - {1, "xn--muost-0qa.no", 2, false}, - {1, "mandal.no", 2, false}, - {1, "marker.no", 2, false}, - {1, "marnardal.no", 2, false}, - {1, "masfjorden.no", 2, false}, - {1, "meland.no", 2, false}, - {1, "meldal.no", 2, false}, - {1, "melhus.no", 2, false}, - {1, "meloy.no", 2, false}, - {1, "xn--mely-ira.no", 2, false}, - {1, "meraker.no", 2, false}, - {1, "xn--merker-kua.no", 2, false}, - {1, "moareke.no", 2, false}, - {1, "xn--moreke-jua.no", 2, false}, - {1, "midsund.no", 2, false}, - {1, "midtre-gauldal.no", 2, false}, - {1, "modalen.no", 2, false}, - {1, "modum.no", 2, false}, - {1, "molde.no", 2, false}, - {1, "moskenes.no", 2, false}, - {1, "moss.no", 2, false}, - {1, "mosvik.no", 2, false}, - {1, "malselv.no", 2, false}, - {1, "xn--mlselv-iua.no", 2, false}, - {1, "malatvuopmi.no", 2, false}, - {1, "xn--mlatvuopmi-s4a.no", 2, false}, - {1, "namdalseid.no", 2, false}, - {1, "aejrie.no", 2, false}, - {1, "namsos.no", 2, false}, - {1, "namsskogan.no", 2, false}, - {1, "naamesjevuemie.no", 2, false}, - {1, "xn--nmesjevuemie-tcba.no", 2, false}, - {1, "laakesvuemie.no", 2, false}, - {1, "nannestad.no", 2, false}, - {1, "narvik.no", 2, false}, - {1, "narviika.no", 2, false}, - {1, "naustdal.no", 2, false}, - {1, "nedre-eiker.no", 2, false}, - {1, "nes.akershus.no", 3, false}, - {1, "nes.buskerud.no", 3, false}, - {1, "nesna.no", 2, false}, - {1, "nesodden.no", 2, false}, - {1, "nesseby.no", 2, false}, - {1, "unjarga.no", 2, false}, - {1, "xn--unjrga-rta.no", 2, false}, - {1, "nesset.no", 2, false}, - {1, "nissedal.no", 2, false}, - {1, "nittedal.no", 2, false}, - {1, "nord-aurdal.no", 2, false}, - {1, "nord-fron.no", 2, false}, - {1, "nord-odal.no", 2, false}, - {1, "norddal.no", 2, false}, - {1, "nordkapp.no", 2, false}, - {1, "davvenjarga.no", 2, false}, - {1, "xn--davvenjrga-y4a.no", 2, false}, - {1, "nordre-land.no", 2, false}, - {1, "nordreisa.no", 2, false}, - {1, "raisa.no", 2, false}, - {1, "xn--risa-5na.no", 2, false}, - {1, "nore-og-uvdal.no", 2, false}, - {1, "notodden.no", 2, false}, - {1, "naroy.no", 2, false}, - {1, "xn--nry-yla5g.no", 2, false}, - {1, "notteroy.no", 2, false}, - {1, "xn--nttery-byae.no", 2, false}, - {1, "odda.no", 2, false}, - {1, "oksnes.no", 2, false}, - {1, "xn--ksnes-uua.no", 2, false}, - {1, "oppdal.no", 2, false}, - {1, "oppegard.no", 2, false}, - {1, "xn--oppegrd-ixa.no", 2, false}, - {1, "orkdal.no", 2, false}, - {1, "orland.no", 2, false}, - {1, "xn--rland-uua.no", 2, false}, - {1, "orskog.no", 2, false}, - {1, "xn--rskog-uua.no", 2, false}, - {1, "orsta.no", 2, false}, - {1, "xn--rsta-fra.no", 2, false}, - {1, "os.hedmark.no", 3, false}, - {1, "os.hordaland.no", 3, false}, - {1, "osen.no", 2, false}, - {1, "osteroy.no", 2, false}, - {1, "xn--ostery-fya.no", 2, false}, - {1, "ostre-toten.no", 2, false}, - {1, "xn--stre-toten-zcb.no", 2, false}, - {1, "overhalla.no", 2, false}, - {1, "ovre-eiker.no", 2, false}, - {1, "xn--vre-eiker-k8a.no", 2, false}, - {1, "oyer.no", 2, false}, - {1, "xn--yer-zna.no", 2, false}, - {1, "oygarden.no", 2, false}, - {1, "xn--ygarden-p1a.no", 2, false}, - {1, "oystre-slidre.no", 2, false}, - {1, "xn--ystre-slidre-ujb.no", 2, false}, - {1, "porsanger.no", 2, false}, - {1, "porsangu.no", 2, false}, - {1, "xn--porsgu-sta26f.no", 2, false}, - {1, "porsgrunn.no", 2, false}, - {1, "radoy.no", 2, false}, - {1, "xn--rady-ira.no", 2, false}, - {1, "rakkestad.no", 2, false}, - {1, "rana.no", 2, false}, - {1, "ruovat.no", 2, false}, - {1, "randaberg.no", 2, false}, - {1, "rauma.no", 2, false}, - {1, "rendalen.no", 2, false}, - {1, "rennebu.no", 2, false}, - {1, "rennesoy.no", 2, false}, - {1, "xn--rennesy-v1a.no", 2, false}, - {1, "rindal.no", 2, false}, - {1, "ringebu.no", 2, false}, - {1, "ringerike.no", 2, false}, - {1, "ringsaker.no", 2, false}, - {1, "rissa.no", 2, false}, - {1, "risor.no", 2, false}, - {1, "xn--risr-ira.no", 2, false}, - {1, "roan.no", 2, false}, - {1, "rollag.no", 2, false}, - {1, "rygge.no", 2, false}, - {1, "ralingen.no", 2, false}, - {1, "xn--rlingen-mxa.no", 2, false}, - {1, "rodoy.no", 2, false}, - {1, "xn--rdy-0nab.no", 2, false}, - {1, "romskog.no", 2, false}, - {1, "xn--rmskog-bya.no", 2, false}, - {1, "roros.no", 2, false}, - {1, "xn--rros-gra.no", 2, false}, - {1, "rost.no", 2, false}, - {1, "xn--rst-0na.no", 2, false}, - {1, "royken.no", 2, false}, - {1, "xn--ryken-vua.no", 2, false}, - {1, "royrvik.no", 2, false}, - {1, "xn--ryrvik-bya.no", 2, false}, - {1, "rade.no", 2, false}, - {1, "xn--rde-ula.no", 2, false}, - {1, "salangen.no", 2, false}, - {1, "siellak.no", 2, false}, - {1, "saltdal.no", 2, false}, - {1, "salat.no", 2, false}, - {1, "xn--slt-elab.no", 2, false}, - {1, "xn--slat-5na.no", 2, false}, - {1, "samnanger.no", 2, false}, - {1, "sande.more-og-romsdal.no", 3, false}, - {1, "sande.xn--mre-og-romsdal-qqb.no", 3, false}, - {1, "sande.vestfold.no", 3, false}, - {1, "sandefjord.no", 2, false}, - {1, "sandnes.no", 2, false}, - {1, "sandoy.no", 2, false}, - {1, "xn--sandy-yua.no", 2, false}, - {1, "sarpsborg.no", 2, false}, - {1, "sauda.no", 2, false}, - {1, "sauherad.no", 2, false}, - {1, "sel.no", 2, false}, - {1, "selbu.no", 2, false}, - {1, "selje.no", 2, false}, - {1, "seljord.no", 2, false}, - {1, "sigdal.no", 2, false}, - {1, "siljan.no", 2, false}, - {1, "sirdal.no", 2, false}, - {1, "skaun.no", 2, false}, - {1, "skedsmo.no", 2, false}, - {1, "ski.no", 2, false}, - {1, "skien.no", 2, false}, - {1, "skiptvet.no", 2, false}, - {1, "skjervoy.no", 2, false}, - {1, "xn--skjervy-v1a.no", 2, false}, - {1, "skierva.no", 2, false}, - {1, "xn--skierv-uta.no", 2, false}, - {1, "skjak.no", 2, false}, - {1, "xn--skjk-soa.no", 2, false}, - {1, "skodje.no", 2, false}, - {1, "skanland.no", 2, false}, - {1, "xn--sknland-fxa.no", 2, false}, - {1, "skanit.no", 2, false}, - {1, "xn--sknit-yqa.no", 2, false}, - {1, "smola.no", 2, false}, - {1, "xn--smla-hra.no", 2, false}, - {1, "snillfjord.no", 2, false}, - {1, "snasa.no", 2, false}, - {1, "xn--snsa-roa.no", 2, false}, - {1, "snoasa.no", 2, false}, - {1, "snaase.no", 2, false}, - {1, "xn--snase-nra.no", 2, false}, - {1, "sogndal.no", 2, false}, - {1, "sokndal.no", 2, false}, - {1, "sola.no", 2, false}, - {1, "solund.no", 2, false}, - {1, "songdalen.no", 2, false}, - {1, "sortland.no", 2, false}, - {1, "spydeberg.no", 2, false}, - {1, "stange.no", 2, false}, - {1, "stavanger.no", 2, false}, - {1, "steigen.no", 2, false}, - {1, "steinkjer.no", 2, false}, - {1, "stjordal.no", 2, false}, - {1, "xn--stjrdal-s1a.no", 2, false}, - {1, "stokke.no", 2, false}, - {1, "stor-elvdal.no", 2, false}, - {1, "stord.no", 2, false}, - {1, "stordal.no", 2, false}, - {1, "storfjord.no", 2, false}, - {1, "omasvuotna.no", 2, false}, - {1, "strand.no", 2, false}, - {1, "stranda.no", 2, false}, - {1, "stryn.no", 2, false}, - {1, "sula.no", 2, false}, - {1, "suldal.no", 2, false}, - {1, "sund.no", 2, false}, - {1, "sunndal.no", 2, false}, - {1, "surnadal.no", 2, false}, - {1, "sveio.no", 2, false}, - {1, "svelvik.no", 2, false}, - {1, "sykkylven.no", 2, false}, - {1, "sogne.no", 2, false}, - {1, "xn--sgne-gra.no", 2, false}, - {1, "somna.no", 2, false}, - {1, "xn--smna-gra.no", 2, false}, - {1, "sondre-land.no", 2, false}, - {1, "xn--sndre-land-0cb.no", 2, false}, - {1, "sor-aurdal.no", 2, false}, - {1, "xn--sr-aurdal-l8a.no", 2, false}, - {1, "sor-fron.no", 2, false}, - {1, "xn--sr-fron-q1a.no", 2, false}, - {1, "sor-odal.no", 2, false}, - {1, "xn--sr-odal-q1a.no", 2, false}, - {1, "sor-varanger.no", 2, false}, - {1, "xn--sr-varanger-ggb.no", 2, false}, - {1, "matta-varjjat.no", 2, false}, - {1, "xn--mtta-vrjjat-k7af.no", 2, false}, - {1, "sorfold.no", 2, false}, - {1, "xn--srfold-bya.no", 2, false}, - {1, "sorreisa.no", 2, false}, - {1, "xn--srreisa-q1a.no", 2, false}, - {1, "sorum.no", 2, false}, - {1, "xn--srum-gra.no", 2, false}, - {1, "tana.no", 2, false}, - {1, "deatnu.no", 2, false}, - {1, "time.no", 2, false}, - {1, "tingvoll.no", 2, false}, - {1, "tinn.no", 2, false}, - {1, "tjeldsund.no", 2, false}, - {1, "dielddanuorri.no", 2, false}, - {1, "tjome.no", 2, false}, - {1, "xn--tjme-hra.no", 2, false}, - {1, "tokke.no", 2, false}, - {1, "tolga.no", 2, false}, - {1, "torsken.no", 2, false}, - {1, "tranoy.no", 2, false}, - {1, "xn--trany-yua.no", 2, false}, - {1, "tromso.no", 2, false}, - {1, "xn--troms-zua.no", 2, false}, - {1, "tromsa.no", 2, false}, - {1, "romsa.no", 2, false}, - {1, "trondheim.no", 2, false}, - {1, "troandin.no", 2, false}, - {1, "trysil.no", 2, false}, - {1, "trana.no", 2, false}, - {1, "xn--trna-woa.no", 2, false}, - {1, "trogstad.no", 2, false}, - {1, "xn--trgstad-r1a.no", 2, false}, - {1, "tvedestrand.no", 2, false}, - {1, "tydal.no", 2, false}, - {1, "tynset.no", 2, false}, - {1, "tysfjord.no", 2, false}, - {1, "divtasvuodna.no", 2, false}, - {1, "divttasvuotna.no", 2, false}, - {1, "tysnes.no", 2, false}, - {1, "tysvar.no", 2, false}, - {1, "xn--tysvr-vra.no", 2, false}, - {1, "tonsberg.no", 2, false}, - {1, "xn--tnsberg-q1a.no", 2, false}, - {1, "ullensaker.no", 2, false}, - {1, "ullensvang.no", 2, false}, - {1, "ulvik.no", 2, false}, - {1, "utsira.no", 2, false}, - {1, "vadso.no", 2, false}, - {1, "xn--vads-jra.no", 2, false}, - {1, "cahcesuolo.no", 2, false}, - {1, "xn--hcesuolo-7ya35b.no", 2, false}, - {1, "vaksdal.no", 2, false}, - {1, "valle.no", 2, false}, - {1, "vang.no", 2, false}, - {1, "vanylven.no", 2, false}, - {1, "vardo.no", 2, false}, - {1, "xn--vard-jra.no", 2, false}, - {1, "varggat.no", 2, false}, - {1, "xn--vrggt-xqad.no", 2, false}, - {1, "vefsn.no", 2, false}, - {1, "vaapste.no", 2, false}, - {1, "vega.no", 2, false}, - {1, "vegarshei.no", 2, false}, - {1, "xn--vegrshei-c0a.no", 2, false}, - {1, "vennesla.no", 2, false}, - {1, "verdal.no", 2, false}, - {1, "verran.no", 2, false}, - {1, "vestby.no", 2, false}, - {1, "vestnes.no", 2, false}, - {1, "vestre-slidre.no", 2, false}, - {1, "vestre-toten.no", 2, false}, - {1, "vestvagoy.no", 2, false}, - {1, "xn--vestvgy-ixa6o.no", 2, false}, - {1, "vevelstad.no", 2, false}, - {1, "vik.no", 2, false}, - {1, "vikna.no", 2, false}, - {1, "vindafjord.no", 2, false}, - {1, "volda.no", 2, false}, - {1, "voss.no", 2, false}, - {1, "varoy.no", 2, false}, - {1, "xn--vry-yla5g.no", 2, false}, - {1, "vagan.no", 2, false}, - {1, "xn--vgan-qoa.no", 2, false}, - {1, "voagat.no", 2, false}, - {1, "vagsoy.no", 2, false}, - {1, "xn--vgsy-qoa0j.no", 2, false}, - {1, "vaga.no", 2, false}, - {1, "xn--vg-yiab.no", 2, false}, - {1, "valer.ostfold.no", 3, false}, - {1, "xn--vler-qoa.xn--stfold-9xa.no", 3, false}, - {1, "valer.hedmark.no", 3, false}, - {1, "xn--vler-qoa.hedmark.no", 3, false}, - {2, "np", 2, false}, - {1, "nr", 1, false}, - {1, "biz.nr", 2, false}, - {1, "info.nr", 2, false}, - {1, "gov.nr", 2, false}, - {1, "edu.nr", 2, false}, - {1, "org.nr", 2, false}, - {1, "net.nr", 2, false}, - {1, "com.nr", 2, false}, - {1, "nu", 1, false}, - {1, "nz", 1, false}, - {1, "ac.nz", 2, false}, - {1, "co.nz", 2, false}, - {1, "cri.nz", 2, false}, - {1, "geek.nz", 2, false}, - {1, "gen.nz", 2, false}, - {1, "govt.nz", 2, false}, - {1, "health.nz", 2, false}, - {1, "iwi.nz", 2, false}, - {1, "kiwi.nz", 2, false}, - {1, "maori.nz", 2, false}, - {1, "mil.nz", 2, false}, - {1, "xn--mori-qsa.nz", 2, false}, - {1, "net.nz", 2, false}, - {1, "org.nz", 2, false}, - {1, "parliament.nz", 2, false}, - {1, "school.nz", 2, false}, - {1, "om", 1, false}, - {1, "co.om", 2, false}, - {1, "com.om", 2, false}, - {1, "edu.om", 2, false}, - {1, "gov.om", 2, false}, - {1, "med.om", 2, false}, - {1, "museum.om", 2, false}, - {1, "net.om", 2, false}, - {1, "org.om", 2, false}, - {1, "pro.om", 2, false}, - {1, "onion", 1, false}, - {1, "org", 1, false}, - {1, "pa", 1, false}, - {1, "ac.pa", 2, false}, - {1, "gob.pa", 2, false}, - {1, "com.pa", 2, false}, - {1, "org.pa", 2, false}, - {1, "sld.pa", 2, false}, - {1, "edu.pa", 2, false}, - {1, "net.pa", 2, false}, - {1, "ing.pa", 2, false}, - {1, "abo.pa", 2, false}, - {1, "med.pa", 2, false}, - {1, "nom.pa", 2, false}, - {1, "pe", 1, false}, - {1, "edu.pe", 2, false}, - {1, "gob.pe", 2, false}, - {1, "nom.pe", 2, false}, - {1, "mil.pe", 2, false}, - {1, "org.pe", 2, false}, - {1, "com.pe", 2, false}, - {1, "net.pe", 2, false}, - {1, "pf", 1, false}, - {1, "com.pf", 2, false}, - {1, "org.pf", 2, false}, - {1, "edu.pf", 2, false}, - {2, "pg", 2, false}, - {1, "ph", 1, false}, - {1, "com.ph", 2, false}, - {1, "net.ph", 2, false}, - {1, "org.ph", 2, false}, - {1, "gov.ph", 2, false}, - {1, "edu.ph", 2, false}, - {1, "ngo.ph", 2, false}, - {1, "mil.ph", 2, false}, - {1, "i.ph", 2, false}, - {1, "pk", 1, false}, - {1, "com.pk", 2, false}, - {1, "net.pk", 2, false}, - {1, "edu.pk", 2, false}, - {1, "org.pk", 2, false}, - {1, "fam.pk", 2, false}, - {1, "biz.pk", 2, false}, - {1, "web.pk", 2, false}, - {1, "gov.pk", 2, false}, - {1, "gob.pk", 2, false}, - {1, "gok.pk", 2, false}, - {1, "gon.pk", 2, false}, - {1, "gop.pk", 2, false}, - {1, "gos.pk", 2, false}, - {1, "info.pk", 2, false}, - {1, "pl", 1, false}, - {1, "com.pl", 2, false}, - {1, "net.pl", 2, false}, - {1, "org.pl", 2, false}, - {1, "aid.pl", 2, false}, - {1, "agro.pl", 2, false}, - {1, "atm.pl", 2, false}, - {1, "auto.pl", 2, false}, - {1, "biz.pl", 2, false}, - {1, "edu.pl", 2, false}, - {1, "gmina.pl", 2, false}, - {1, "gsm.pl", 2, false}, - {1, "info.pl", 2, false}, - {1, "mail.pl", 2, false}, - {1, "miasta.pl", 2, false}, - {1, "media.pl", 2, false}, - {1, "mil.pl", 2, false}, - {1, "nieruchomosci.pl", 2, false}, - {1, "nom.pl", 2, false}, - {1, "pc.pl", 2, false}, - {1, "powiat.pl", 2, false}, - {1, "priv.pl", 2, false}, - {1, "realestate.pl", 2, false}, - {1, "rel.pl", 2, false}, - {1, "sex.pl", 2, false}, - {1, "shop.pl", 2, false}, - {1, "sklep.pl", 2, false}, - {1, "sos.pl", 2, false}, - {1, "szkola.pl", 2, false}, - {1, "targi.pl", 2, false}, - {1, "tm.pl", 2, false}, - {1, "tourism.pl", 2, false}, - {1, "travel.pl", 2, false}, - {1, "turystyka.pl", 2, false}, - {1, "gov.pl", 2, false}, - {1, "ap.gov.pl", 3, false}, - {1, "ic.gov.pl", 3, false}, - {1, "is.gov.pl", 3, false}, - {1, "us.gov.pl", 3, false}, - {1, "kmpsp.gov.pl", 3, false}, - {1, "kppsp.gov.pl", 3, false}, - {1, "kwpsp.gov.pl", 3, false}, - {1, "psp.gov.pl", 3, false}, - {1, "wskr.gov.pl", 3, false}, - {1, "kwp.gov.pl", 3, false}, - {1, "mw.gov.pl", 3, false}, - {1, "ug.gov.pl", 3, false}, - {1, "um.gov.pl", 3, false}, - {1, "umig.gov.pl", 3, false}, - {1, "ugim.gov.pl", 3, false}, - {1, "upow.gov.pl", 3, false}, - {1, "uw.gov.pl", 3, false}, - {1, "starostwo.gov.pl", 3, false}, - {1, "pa.gov.pl", 3, false}, - {1, "po.gov.pl", 3, false}, - {1, "psse.gov.pl", 3, false}, - {1, "pup.gov.pl", 3, false}, - {1, "rzgw.gov.pl", 3, false}, - {1, "sa.gov.pl", 3, false}, - {1, "so.gov.pl", 3, false}, - {1, "sr.gov.pl", 3, false}, - {1, "wsa.gov.pl", 3, false}, - {1, "sko.gov.pl", 3, false}, - {1, "uzs.gov.pl", 3, false}, - {1, "wiih.gov.pl", 3, false}, - {1, "winb.gov.pl", 3, false}, - {1, "pinb.gov.pl", 3, false}, - {1, "wios.gov.pl", 3, false}, - {1, "witd.gov.pl", 3, false}, - {1, "wzmiuw.gov.pl", 3, false}, - {1, "piw.gov.pl", 3, false}, - {1, "wiw.gov.pl", 3, false}, - {1, "griw.gov.pl", 3, false}, - {1, "wif.gov.pl", 3, false}, - {1, "oum.gov.pl", 3, false}, - {1, "sdn.gov.pl", 3, false}, - {1, "zp.gov.pl", 3, false}, - {1, "uppo.gov.pl", 3, false}, - {1, "mup.gov.pl", 3, false}, - {1, "wuoz.gov.pl", 3, false}, - {1, "konsulat.gov.pl", 3, false}, - {1, "oirm.gov.pl", 3, false}, - {1, "augustow.pl", 2, false}, - {1, "babia-gora.pl", 2, false}, - {1, "bedzin.pl", 2, false}, - {1, "beskidy.pl", 2, false}, - {1, "bialowieza.pl", 2, false}, - {1, "bialystok.pl", 2, false}, - {1, "bielawa.pl", 2, false}, - {1, "bieszczady.pl", 2, false}, - {1, "boleslawiec.pl", 2, false}, - {1, "bydgoszcz.pl", 2, false}, - {1, "bytom.pl", 2, false}, - {1, "cieszyn.pl", 2, false}, - {1, "czeladz.pl", 2, false}, - {1, "czest.pl", 2, false}, - {1, "dlugoleka.pl", 2, false}, - {1, "elblag.pl", 2, false}, - {1, "elk.pl", 2, false}, - {1, "glogow.pl", 2, false}, - {1, "gniezno.pl", 2, false}, - {1, "gorlice.pl", 2, false}, - {1, "grajewo.pl", 2, false}, - {1, "ilawa.pl", 2, false}, - {1, "jaworzno.pl", 2, false}, - {1, "jelenia-gora.pl", 2, false}, - {1, "jgora.pl", 2, false}, - {1, "kalisz.pl", 2, false}, - {1, "kazimierz-dolny.pl", 2, false}, - {1, "karpacz.pl", 2, false}, - {1, "kartuzy.pl", 2, false}, - {1, "kaszuby.pl", 2, false}, - {1, "katowice.pl", 2, false}, - {1, "kepno.pl", 2, false}, - {1, "ketrzyn.pl", 2, false}, - {1, "klodzko.pl", 2, false}, - {1, "kobierzyce.pl", 2, false}, - {1, "kolobrzeg.pl", 2, false}, - {1, "konin.pl", 2, false}, - {1, "konskowola.pl", 2, false}, - {1, "kutno.pl", 2, false}, - {1, "lapy.pl", 2, false}, - {1, "lebork.pl", 2, false}, - {1, "legnica.pl", 2, false}, - {1, "lezajsk.pl", 2, false}, - {1, "limanowa.pl", 2, false}, - {1, "lomza.pl", 2, false}, - {1, "lowicz.pl", 2, false}, - {1, "lubin.pl", 2, false}, - {1, "lukow.pl", 2, false}, - {1, "malbork.pl", 2, false}, - {1, "malopolska.pl", 2, false}, - {1, "mazowsze.pl", 2, false}, - {1, "mazury.pl", 2, false}, - {1, "mielec.pl", 2, false}, - {1, "mielno.pl", 2, false}, - {1, "mragowo.pl", 2, false}, - {1, "naklo.pl", 2, false}, - {1, "nowaruda.pl", 2, false}, - {1, "nysa.pl", 2, false}, - {1, "olawa.pl", 2, false}, - {1, "olecko.pl", 2, false}, - {1, "olkusz.pl", 2, false}, - {1, "olsztyn.pl", 2, false}, - {1, "opoczno.pl", 2, false}, - {1, "opole.pl", 2, false}, - {1, "ostroda.pl", 2, false}, - {1, "ostroleka.pl", 2, false}, - {1, "ostrowiec.pl", 2, false}, - {1, "ostrowwlkp.pl", 2, false}, - {1, "pila.pl", 2, false}, - {1, "pisz.pl", 2, false}, - {1, "podhale.pl", 2, false}, - {1, "podlasie.pl", 2, false}, - {1, "polkowice.pl", 2, false}, - {1, "pomorze.pl", 2, false}, - {1, "pomorskie.pl", 2, false}, - {1, "prochowice.pl", 2, false}, - {1, "pruszkow.pl", 2, false}, - {1, "przeworsk.pl", 2, false}, - {1, "pulawy.pl", 2, false}, - {1, "radom.pl", 2, false}, - {1, "rawa-maz.pl", 2, false}, - {1, "rybnik.pl", 2, false}, - {1, "rzeszow.pl", 2, false}, - {1, "sanok.pl", 2, false}, - {1, "sejny.pl", 2, false}, - {1, "slask.pl", 2, false}, - {1, "slupsk.pl", 2, false}, - {1, "sosnowiec.pl", 2, false}, - {1, "stalowa-wola.pl", 2, false}, - {1, "skoczow.pl", 2, false}, - {1, "starachowice.pl", 2, false}, - {1, "stargard.pl", 2, false}, - {1, "suwalki.pl", 2, false}, - {1, "swidnica.pl", 2, false}, - {1, "swiebodzin.pl", 2, false}, - {1, "swinoujscie.pl", 2, false}, - {1, "szczecin.pl", 2, false}, - {1, "szczytno.pl", 2, false}, - {1, "tarnobrzeg.pl", 2, false}, - {1, "tgory.pl", 2, false}, - {1, "turek.pl", 2, false}, - {1, "tychy.pl", 2, false}, - {1, "ustka.pl", 2, false}, - {1, "walbrzych.pl", 2, false}, - {1, "warmia.pl", 2, false}, - {1, "warszawa.pl", 2, false}, - {1, "waw.pl", 2, false}, - {1, "wegrow.pl", 2, false}, - {1, "wielun.pl", 2, false}, - {1, "wlocl.pl", 2, false}, - {1, "wloclawek.pl", 2, false}, - {1, "wodzislaw.pl", 2, false}, - {1, "wolomin.pl", 2, false}, - {1, "wroclaw.pl", 2, false}, - {1, "zachpomor.pl", 2, false}, - {1, "zagan.pl", 2, false}, - {1, "zarow.pl", 2, false}, - {1, "zgora.pl", 2, false}, - {1, "zgorzelec.pl", 2, false}, - {1, "pm", 1, false}, - {1, "pn", 1, false}, - {1, "gov.pn", 2, false}, - {1, "co.pn", 2, false}, - {1, "org.pn", 2, false}, - {1, "edu.pn", 2, false}, - {1, "net.pn", 2, false}, - {1, "post", 1, false}, - {1, "pr", 1, false}, - {1, "com.pr", 2, false}, - {1, "net.pr", 2, false}, - {1, "org.pr", 2, false}, - {1, "gov.pr", 2, false}, - {1, "edu.pr", 2, false}, - {1, "isla.pr", 2, false}, - {1, "pro.pr", 2, false}, - {1, "biz.pr", 2, false}, - {1, "info.pr", 2, false}, - {1, "name.pr", 2, false}, - {1, "est.pr", 2, false}, - {1, "prof.pr", 2, false}, - {1, "ac.pr", 2, false}, - {1, "pro", 1, false}, - {1, "aaa.pro", 2, false}, - {1, "aca.pro", 2, false}, - {1, "acct.pro", 2, false}, - {1, "avocat.pro", 2, false}, - {1, "bar.pro", 2, false}, - {1, "cpa.pro", 2, false}, - {1, "eng.pro", 2, false}, - {1, "jur.pro", 2, false}, - {1, "law.pro", 2, false}, - {1, "med.pro", 2, false}, - {1, "recht.pro", 2, false}, - {1, "ps", 1, false}, - {1, "edu.ps", 2, false}, - {1, "gov.ps", 2, false}, - {1, "sec.ps", 2, false}, - {1, "plo.ps", 2, false}, - {1, "com.ps", 2, false}, - {1, "org.ps", 2, false}, - {1, "net.ps", 2, false}, - {1, "pt", 1, false}, - {1, "net.pt", 2, false}, - {1, "gov.pt", 2, false}, - {1, "org.pt", 2, false}, - {1, "edu.pt", 2, false}, - {1, "int.pt", 2, false}, - {1, "publ.pt", 2, false}, - {1, "com.pt", 2, false}, - {1, "nome.pt", 2, false}, - {1, "pw", 1, false}, - {1, "co.pw", 2, false}, - {1, "ne.pw", 2, false}, - {1, "or.pw", 2, false}, - {1, "ed.pw", 2, false}, - {1, "go.pw", 2, false}, - {1, "belau.pw", 2, false}, - {1, "py", 1, false}, - {1, "com.py", 2, false}, - {1, "coop.py", 2, false}, - {1, "edu.py", 2, false}, - {1, "gov.py", 2, false}, - {1, "mil.py", 2, false}, - {1, "net.py", 2, false}, - {1, "org.py", 2, false}, - {1, "qa", 1, false}, - {1, "com.qa", 2, false}, - {1, "edu.qa", 2, false}, - {1, "gov.qa", 2, false}, - {1, "mil.qa", 2, false}, - {1, "name.qa", 2, false}, - {1, "net.qa", 2, false}, - {1, "org.qa", 2, false}, - {1, "sch.qa", 2, false}, - {1, "re", 1, false}, - {1, "asso.re", 2, false}, - {1, "com.re", 2, false}, - {1, "nom.re", 2, false}, - {1, "ro", 1, false}, - {1, "arts.ro", 2, false}, - {1, "com.ro", 2, false}, - {1, "firm.ro", 2, false}, - {1, "info.ro", 2, false}, - {1, "nom.ro", 2, false}, - {1, "nt.ro", 2, false}, - {1, "org.ro", 2, false}, - {1, "rec.ro", 2, false}, - {1, "store.ro", 2, false}, - {1, "tm.ro", 2, false}, - {1, "www.ro", 2, false}, - {1, "rs", 1, false}, - {1, "ac.rs", 2, false}, - {1, "co.rs", 2, false}, - {1, "edu.rs", 2, false}, - {1, "gov.rs", 2, false}, - {1, "in.rs", 2, false}, - {1, "org.rs", 2, false}, - {1, "ru", 1, false}, - {1, "rw", 1, false}, - {1, "ac.rw", 2, false}, - {1, "co.rw", 2, false}, - {1, "coop.rw", 2, false}, - {1, "gov.rw", 2, false}, - {1, "mil.rw", 2, false}, - {1, "net.rw", 2, false}, - {1, "org.rw", 2, false}, - {1, "sa", 1, false}, - {1, "com.sa", 2, false}, - {1, "net.sa", 2, false}, - {1, "org.sa", 2, false}, - {1, "gov.sa", 2, false}, - {1, "med.sa", 2, false}, - {1, "pub.sa", 2, false}, - {1, "edu.sa", 2, false}, - {1, "sch.sa", 2, false}, - {1, "sb", 1, false}, - {1, "com.sb", 2, false}, - {1, "edu.sb", 2, false}, - {1, "gov.sb", 2, false}, - {1, "net.sb", 2, false}, - {1, "org.sb", 2, false}, - {1, "sc", 1, false}, - {1, "com.sc", 2, false}, - {1, "gov.sc", 2, false}, - {1, "net.sc", 2, false}, - {1, "org.sc", 2, false}, - {1, "edu.sc", 2, false}, - {1, "sd", 1, false}, - {1, "com.sd", 2, false}, - {1, "net.sd", 2, false}, - {1, "org.sd", 2, false}, - {1, "edu.sd", 2, false}, - {1, "med.sd", 2, false}, - {1, "tv.sd", 2, false}, - {1, "gov.sd", 2, false}, - {1, "info.sd", 2, false}, - {1, "se", 1, false}, - {1, "a.se", 2, false}, - {1, "ac.se", 2, false}, - {1, "b.se", 2, false}, - {1, "bd.se", 2, false}, - {1, "brand.se", 2, false}, - {1, "c.se", 2, false}, - {1, "d.se", 2, false}, - {1, "e.se", 2, false}, - {1, "f.se", 2, false}, - {1, "fh.se", 2, false}, - {1, "fhsk.se", 2, false}, - {1, "fhv.se", 2, false}, - {1, "g.se", 2, false}, - {1, "h.se", 2, false}, - {1, "i.se", 2, false}, - {1, "k.se", 2, false}, - {1, "komforb.se", 2, false}, - {1, "kommunalforbund.se", 2, false}, - {1, "komvux.se", 2, false}, - {1, "l.se", 2, false}, - {1, "lanbib.se", 2, false}, - {1, "m.se", 2, false}, - {1, "n.se", 2, false}, - {1, "naturbruksgymn.se", 2, false}, - {1, "o.se", 2, false}, - {1, "org.se", 2, false}, - {1, "p.se", 2, false}, - {1, "parti.se", 2, false}, - {1, "pp.se", 2, false}, - {1, "press.se", 2, false}, - {1, "r.se", 2, false}, - {1, "s.se", 2, false}, - {1, "t.se", 2, false}, - {1, "tm.se", 2, false}, - {1, "u.se", 2, false}, - {1, "w.se", 2, false}, - {1, "x.se", 2, false}, - {1, "y.se", 2, false}, - {1, "z.se", 2, false}, - {1, "sg", 1, false}, - {1, "com.sg", 2, false}, - {1, "net.sg", 2, false}, - {1, "org.sg", 2, false}, - {1, "gov.sg", 2, false}, - {1, "edu.sg", 2, false}, - {1, "per.sg", 2, false}, - {1, "sh", 1, false}, - {1, "com.sh", 2, false}, - {1, "net.sh", 2, false}, - {1, "gov.sh", 2, false}, - {1, "org.sh", 2, false}, - {1, "mil.sh", 2, false}, - {1, "si", 1, false}, - {1, "sj", 1, false}, - {1, "sk", 1, false}, - {1, "sl", 1, false}, - {1, "com.sl", 2, false}, - {1, "net.sl", 2, false}, - {1, "edu.sl", 2, false}, - {1, "gov.sl", 2, false}, - {1, "org.sl", 2, false}, - {1, "sm", 1, false}, - {1, "sn", 1, false}, - {1, "art.sn", 2, false}, - {1, "com.sn", 2, false}, - {1, "edu.sn", 2, false}, - {1, "gouv.sn", 2, false}, - {1, "org.sn", 2, false}, - {1, "perso.sn", 2, false}, - {1, "univ.sn", 2, false}, - {1, "so", 1, false}, - {1, "com.so", 2, false}, - {1, "edu.so", 2, false}, - {1, "gov.so", 2, false}, - {1, "me.so", 2, false}, - {1, "net.so", 2, false}, - {1, "org.so", 2, false}, - {1, "sr", 1, false}, - {1, "ss", 1, false}, - {1, "biz.ss", 2, false}, - {1, "com.ss", 2, false}, - {1, "edu.ss", 2, false}, - {1, "gov.ss", 2, false}, - {1, "net.ss", 2, false}, - {1, "org.ss", 2, false}, - {1, "st", 1, false}, - {1, "co.st", 2, false}, - {1, "com.st", 2, false}, - {1, "consulado.st", 2, false}, - {1, "edu.st", 2, false}, - {1, "embaixada.st", 2, false}, - {1, "gov.st", 2, false}, - {1, "mil.st", 2, false}, - {1, "net.st", 2, false}, - {1, "org.st", 2, false}, - {1, "principe.st", 2, false}, - {1, "saotome.st", 2, false}, - {1, "store.st", 2, false}, - {1, "su", 1, false}, - {1, "sv", 1, false}, - {1, "com.sv", 2, false}, - {1, "edu.sv", 2, false}, - {1, "gob.sv", 2, false}, - {1, "org.sv", 2, false}, - {1, "red.sv", 2, false}, - {1, "sx", 1, false}, - {1, "gov.sx", 2, false}, - {1, "sy", 1, false}, - {1, "edu.sy", 2, false}, - {1, "gov.sy", 2, false}, - {1, "net.sy", 2, false}, - {1, "mil.sy", 2, false}, - {1, "com.sy", 2, false}, - {1, "org.sy", 2, false}, - {1, "sz", 1, false}, - {1, "co.sz", 2, false}, - {1, "ac.sz", 2, false}, - {1, "org.sz", 2, false}, - {1, "tc", 1, false}, - {1, "td", 1, false}, - {1, "tel", 1, false}, - {1, "tf", 1, false}, - {1, "tg", 1, false}, - {1, "th", 1, false}, - {1, "ac.th", 2, false}, - {1, "co.th", 2, false}, - {1, "go.th", 2, false}, - {1, "in.th", 2, false}, - {1, "mi.th", 2, false}, - {1, "net.th", 2, false}, - {1, "or.th", 2, false}, - {1, "tj", 1, false}, - {1, "ac.tj", 2, false}, - {1, "biz.tj", 2, false}, - {1, "co.tj", 2, false}, - {1, "com.tj", 2, false}, - {1, "edu.tj", 2, false}, - {1, "go.tj", 2, false}, - {1, "gov.tj", 2, false}, - {1, "int.tj", 2, false}, - {1, "mil.tj", 2, false}, - {1, "name.tj", 2, false}, - {1, "net.tj", 2, false}, - {1, "nic.tj", 2, false}, - {1, "org.tj", 2, false}, - {1, "test.tj", 2, false}, - {1, "web.tj", 2, false}, - {1, "tk", 1, false}, - {1, "tl", 1, false}, - {1, "gov.tl", 2, false}, - {1, "tm", 1, false}, - {1, "com.tm", 2, false}, - {1, "co.tm", 2, false}, - {1, "org.tm", 2, false}, - {1, "net.tm", 2, false}, - {1, "nom.tm", 2, false}, - {1, "gov.tm", 2, false}, - {1, "mil.tm", 2, false}, - {1, "edu.tm", 2, false}, - {1, "tn", 1, false}, - {1, "com.tn", 2, false}, - {1, "ens.tn", 2, false}, - {1, "fin.tn", 2, false}, - {1, "gov.tn", 2, false}, - {1, "ind.tn", 2, false}, - {1, "intl.tn", 2, false}, - {1, "nat.tn", 2, false}, - {1, "net.tn", 2, false}, - {1, "org.tn", 2, false}, - {1, "info.tn", 2, false}, - {1, "perso.tn", 2, false}, - {1, "tourism.tn", 2, false}, - {1, "edunet.tn", 2, false}, - {1, "rnrt.tn", 2, false}, - {1, "rns.tn", 2, false}, - {1, "rnu.tn", 2, false}, - {1, "mincom.tn", 2, false}, - {1, "agrinet.tn", 2, false}, - {1, "defense.tn", 2, false}, - {1, "turen.tn", 2, false}, - {1, "to", 1, false}, - {1, "com.to", 2, false}, - {1, "gov.to", 2, false}, - {1, "net.to", 2, false}, - {1, "org.to", 2, false}, - {1, "edu.to", 2, false}, - {1, "mil.to", 2, false}, - {1, "tr", 1, false}, - {1, "av.tr", 2, false}, - {1, "bbs.tr", 2, false}, - {1, "bel.tr", 2, false}, - {1, "biz.tr", 2, false}, - {1, "com.tr", 2, false}, - {1, "dr.tr", 2, false}, - {1, "edu.tr", 2, false}, - {1, "gen.tr", 2, false}, - {1, "gov.tr", 2, false}, - {1, "info.tr", 2, false}, - {1, "mil.tr", 2, false}, - {1, "k12.tr", 2, false}, - {1, "kep.tr", 2, false}, - {1, "name.tr", 2, false}, - {1, "net.tr", 2, false}, - {1, "org.tr", 2, false}, - {1, "pol.tr", 2, false}, - {1, "tel.tr", 2, false}, - {1, "tsk.tr", 2, false}, - {1, "tv.tr", 2, false}, - {1, "web.tr", 2, false}, - {1, "nc.tr", 2, false}, - {1, "gov.nc.tr", 3, false}, - {1, "tt", 1, false}, - {1, "co.tt", 2, false}, - {1, "com.tt", 2, false}, - {1, "org.tt", 2, false}, - {1, "net.tt", 2, false}, - {1, "biz.tt", 2, false}, - {1, "info.tt", 2, false}, - {1, "pro.tt", 2, false}, - {1, "int.tt", 2, false}, - {1, "coop.tt", 2, false}, - {1, "jobs.tt", 2, false}, - {1, "mobi.tt", 2, false}, - {1, "travel.tt", 2, false}, - {1, "museum.tt", 2, false}, - {1, "aero.tt", 2, false}, - {1, "name.tt", 2, false}, - {1, "gov.tt", 2, false}, - {1, "edu.tt", 2, false}, - {1, "tv", 1, false}, - {1, "tw", 1, false}, - {1, "edu.tw", 2, false}, - {1, "gov.tw", 2, false}, - {1, "mil.tw", 2, false}, - {1, "com.tw", 2, false}, - {1, "net.tw", 2, false}, - {1, "org.tw", 2, false}, - {1, "idv.tw", 2, false}, - {1, "game.tw", 2, false}, - {1, "ebiz.tw", 2, false}, - {1, "club.tw", 2, false}, - {1, "xn--zf0ao64a.tw", 2, false}, - {1, "xn--uc0atv.tw", 2, false}, - {1, "xn--czrw28b.tw", 2, false}, - {1, "tz", 1, false}, - {1, "ac.tz", 2, false}, - {1, "co.tz", 2, false}, - {1, "go.tz", 2, false}, - {1, "hotel.tz", 2, false}, - {1, "info.tz", 2, false}, - {1, "me.tz", 2, false}, - {1, "mil.tz", 2, false}, - {1, "mobi.tz", 2, false}, - {1, "ne.tz", 2, false}, - {1, "or.tz", 2, false}, - {1, "sc.tz", 2, false}, - {1, "tv.tz", 2, false}, - {1, "ua", 1, false}, - {1, "com.ua", 2, false}, - {1, "edu.ua", 2, false}, - {1, "gov.ua", 2, false}, - {1, "in.ua", 2, false}, - {1, "net.ua", 2, false}, - {1, "org.ua", 2, false}, - {1, "cherkassy.ua", 2, false}, - {1, "cherkasy.ua", 2, false}, - {1, "chernigov.ua", 2, false}, - {1, "chernihiv.ua", 2, false}, - {1, "chernivtsi.ua", 2, false}, - {1, "chernovtsy.ua", 2, false}, - {1, "ck.ua", 2, false}, - {1, "cn.ua", 2, false}, - {1, "cr.ua", 2, false}, - {1, "crimea.ua", 2, false}, - {1, "cv.ua", 2, false}, - {1, "dn.ua", 2, false}, - {1, "dnepropetrovsk.ua", 2, false}, - {1, "dnipropetrovsk.ua", 2, false}, - {1, "dominic.ua", 2, false}, - {1, "donetsk.ua", 2, false}, - {1, "dp.ua", 2, false}, - {1, "if.ua", 2, false}, - {1, "ivano-frankivsk.ua", 2, false}, - {1, "kh.ua", 2, false}, - {1, "kharkiv.ua", 2, false}, - {1, "kharkov.ua", 2, false}, - {1, "kherson.ua", 2, false}, - {1, "khmelnitskiy.ua", 2, false}, - {1, "khmelnytskyi.ua", 2, false}, - {1, "kiev.ua", 2, false}, - {1, "kirovograd.ua", 2, false}, - {1, "km.ua", 2, false}, - {1, "kr.ua", 2, false}, - {1, "krym.ua", 2, false}, - {1, "ks.ua", 2, false}, - {1, "kv.ua", 2, false}, - {1, "kyiv.ua", 2, false}, - {1, "lg.ua", 2, false}, - {1, "lt.ua", 2, false}, - {1, "lugansk.ua", 2, false}, - {1, "lutsk.ua", 2, false}, - {1, "lv.ua", 2, false}, - {1, "lviv.ua", 2, false}, - {1, "mk.ua", 2, false}, - {1, "mykolaiv.ua", 2, false}, - {1, "nikolaev.ua", 2, false}, - {1, "od.ua", 2, false}, - {1, "odesa.ua", 2, false}, - {1, "odessa.ua", 2, false}, - {1, "pl.ua", 2, false}, - {1, "poltava.ua", 2, false}, - {1, "rivne.ua", 2, false}, - {1, "rovno.ua", 2, false}, - {1, "rv.ua", 2, false}, - {1, "sb.ua", 2, false}, - {1, "sebastopol.ua", 2, false}, - {1, "sevastopol.ua", 2, false}, - {1, "sm.ua", 2, false}, - {1, "sumy.ua", 2, false}, - {1, "te.ua", 2, false}, - {1, "ternopil.ua", 2, false}, - {1, "uz.ua", 2, false}, - {1, "uzhgorod.ua", 2, false}, - {1, "vinnica.ua", 2, false}, - {1, "vinnytsia.ua", 2, false}, - {1, "vn.ua", 2, false}, - {1, "volyn.ua", 2, false}, - {1, "yalta.ua", 2, false}, - {1, "zaporizhzhe.ua", 2, false}, - {1, "zaporizhzhia.ua", 2, false}, - {1, "zhitomir.ua", 2, false}, - {1, "zhytomyr.ua", 2, false}, - {1, "zp.ua", 2, false}, - {1, "zt.ua", 2, false}, - {1, "ug", 1, false}, - {1, "co.ug", 2, false}, - {1, "or.ug", 2, false}, - {1, "ac.ug", 2, false}, - {1, "sc.ug", 2, false}, - {1, "go.ug", 2, false}, - {1, "ne.ug", 2, false}, - {1, "com.ug", 2, false}, - {1, "org.ug", 2, false}, - {1, "uk", 1, false}, - {1, "ac.uk", 2, false}, - {1, "co.uk", 2, false}, - {1, "gov.uk", 2, false}, - {1, "ltd.uk", 2, false}, - {1, "me.uk", 2, false}, - {1, "net.uk", 2, false}, - {1, "nhs.uk", 2, false}, - {1, "org.uk", 2, false}, - {1, "plc.uk", 2, false}, - {1, "police.uk", 2, false}, - {2, "sch.uk", 3, false}, - {1, "us", 1, false}, - {1, "dni.us", 2, false}, - {1, "fed.us", 2, false}, - {1, "isa.us", 2, false}, - {1, "kids.us", 2, false}, - {1, "nsn.us", 2, false}, - {1, "ak.us", 2, false}, - {1, "al.us", 2, false}, - {1, "ar.us", 2, false}, - {1, "as.us", 2, false}, - {1, "az.us", 2, false}, - {1, "ca.us", 2, false}, - {1, "co.us", 2, false}, - {1, "ct.us", 2, false}, - {1, "dc.us", 2, false}, - {1, "de.us", 2, false}, - {1, "fl.us", 2, false}, - {1, "ga.us", 2, false}, - {1, "gu.us", 2, false}, - {1, "hi.us", 2, false}, - {1, "ia.us", 2, false}, - {1, "id.us", 2, false}, - {1, "il.us", 2, false}, - {1, "in.us", 2, false}, - {1, "ks.us", 2, false}, - {1, "ky.us", 2, false}, - {1, "la.us", 2, false}, - {1, "ma.us", 2, false}, - {1, "md.us", 2, false}, - {1, "me.us", 2, false}, - {1, "mi.us", 2, false}, - {1, "mn.us", 2, false}, - {1, "mo.us", 2, false}, - {1, "ms.us", 2, false}, - {1, "mt.us", 2, false}, - {1, "nc.us", 2, false}, - {1, "nd.us", 2, false}, - {1, "ne.us", 2, false}, - {1, "nh.us", 2, false}, - {1, "nj.us", 2, false}, - {1, "nm.us", 2, false}, - {1, "nv.us", 2, false}, - {1, "ny.us", 2, false}, - {1, "oh.us", 2, false}, - {1, "ok.us", 2, false}, - {1, "or.us", 2, false}, - {1, "pa.us", 2, false}, - {1, "pr.us", 2, false}, - {1, "ri.us", 2, false}, - {1, "sc.us", 2, false}, - {1, "sd.us", 2, false}, - {1, "tn.us", 2, false}, - {1, "tx.us", 2, false}, - {1, "ut.us", 2, false}, - {1, "vi.us", 2, false}, - {1, "vt.us", 2, false}, - {1, "va.us", 2, false}, - {1, "wa.us", 2, false}, - {1, "wi.us", 2, false}, - {1, "wv.us", 2, false}, - {1, "wy.us", 2, false}, - {1, "k12.ak.us", 3, false}, - {1, "k12.al.us", 3, false}, - {1, "k12.ar.us", 3, false}, - {1, "k12.as.us", 3, false}, - {1, "k12.az.us", 3, false}, - {1, "k12.ca.us", 3, false}, - {1, "k12.co.us", 3, false}, - {1, "k12.ct.us", 3, false}, - {1, "k12.dc.us", 3, false}, - {1, "k12.de.us", 3, false}, - {1, "k12.fl.us", 3, false}, - {1, "k12.ga.us", 3, false}, - {1, "k12.gu.us", 3, false}, - {1, "k12.ia.us", 3, false}, - {1, "k12.id.us", 3, false}, - {1, "k12.il.us", 3, false}, - {1, "k12.in.us", 3, false}, - {1, "k12.ks.us", 3, false}, - {1, "k12.ky.us", 3, false}, - {1, "k12.la.us", 3, false}, - {1, "k12.ma.us", 3, false}, - {1, "k12.md.us", 3, false}, - {1, "k12.me.us", 3, false}, - {1, "k12.mi.us", 3, false}, - {1, "k12.mn.us", 3, false}, - {1, "k12.mo.us", 3, false}, - {1, "k12.ms.us", 3, false}, - {1, "k12.mt.us", 3, false}, - {1, "k12.nc.us", 3, false}, - {1, "k12.ne.us", 3, false}, - {1, "k12.nh.us", 3, false}, - {1, "k12.nj.us", 3, false}, - {1, "k12.nm.us", 3, false}, - {1, "k12.nv.us", 3, false}, - {1, "k12.ny.us", 3, false}, - {1, "k12.oh.us", 3, false}, - {1, "k12.ok.us", 3, false}, - {1, "k12.or.us", 3, false}, - {1, "k12.pa.us", 3, false}, - {1, "k12.pr.us", 3, false}, - {1, "k12.ri.us", 3, false}, - {1, "k12.sc.us", 3, false}, - {1, "k12.tn.us", 3, false}, - {1, "k12.tx.us", 3, false}, - {1, "k12.ut.us", 3, false}, - {1, "k12.vi.us", 3, false}, - {1, "k12.vt.us", 3, false}, - {1, "k12.va.us", 3, false}, - {1, "k12.wa.us", 3, false}, - {1, "k12.wi.us", 3, false}, - {1, "k12.wy.us", 3, false}, - {1, "cc.ak.us", 3, false}, - {1, "cc.al.us", 3, false}, - {1, "cc.ar.us", 3, false}, - {1, "cc.as.us", 3, false}, - {1, "cc.az.us", 3, false}, - {1, "cc.ca.us", 3, false}, - {1, "cc.co.us", 3, false}, - {1, "cc.ct.us", 3, false}, - {1, "cc.dc.us", 3, false}, - {1, "cc.de.us", 3, false}, - {1, "cc.fl.us", 3, false}, - {1, "cc.ga.us", 3, false}, - {1, "cc.gu.us", 3, false}, - {1, "cc.hi.us", 3, false}, - {1, "cc.ia.us", 3, false}, - {1, "cc.id.us", 3, false}, - {1, "cc.il.us", 3, false}, - {1, "cc.in.us", 3, false}, - {1, "cc.ks.us", 3, false}, - {1, "cc.ky.us", 3, false}, - {1, "cc.la.us", 3, false}, - {1, "cc.ma.us", 3, false}, - {1, "cc.md.us", 3, false}, - {1, "cc.me.us", 3, false}, - {1, "cc.mi.us", 3, false}, - {1, "cc.mn.us", 3, false}, - {1, "cc.mo.us", 3, false}, - {1, "cc.ms.us", 3, false}, - {1, "cc.mt.us", 3, false}, - {1, "cc.nc.us", 3, false}, - {1, "cc.nd.us", 3, false}, - {1, "cc.ne.us", 3, false}, - {1, "cc.nh.us", 3, false}, - {1, "cc.nj.us", 3, false}, - {1, "cc.nm.us", 3, false}, - {1, "cc.nv.us", 3, false}, - {1, "cc.ny.us", 3, false}, - {1, "cc.oh.us", 3, false}, - {1, "cc.ok.us", 3, false}, - {1, "cc.or.us", 3, false}, - {1, "cc.pa.us", 3, false}, - {1, "cc.pr.us", 3, false}, - {1, "cc.ri.us", 3, false}, - {1, "cc.sc.us", 3, false}, - {1, "cc.sd.us", 3, false}, - {1, "cc.tn.us", 3, false}, - {1, "cc.tx.us", 3, false}, - {1, "cc.ut.us", 3, false}, - {1, "cc.vi.us", 3, false}, - {1, "cc.vt.us", 3, false}, - {1, "cc.va.us", 3, false}, - {1, "cc.wa.us", 3, false}, - {1, "cc.wi.us", 3, false}, - {1, "cc.wv.us", 3, false}, - {1, "cc.wy.us", 3, false}, - {1, "lib.ak.us", 3, false}, - {1, "lib.al.us", 3, false}, - {1, "lib.ar.us", 3, false}, - {1, "lib.as.us", 3, false}, - {1, "lib.az.us", 3, false}, - {1, "lib.ca.us", 3, false}, - {1, "lib.co.us", 3, false}, - {1, "lib.ct.us", 3, false}, - {1, "lib.dc.us", 3, false}, - {1, "lib.fl.us", 3, false}, - {1, "lib.ga.us", 3, false}, - {1, "lib.gu.us", 3, false}, - {1, "lib.hi.us", 3, false}, - {1, "lib.ia.us", 3, false}, - {1, "lib.id.us", 3, false}, - {1, "lib.il.us", 3, false}, - {1, "lib.in.us", 3, false}, - {1, "lib.ks.us", 3, false}, - {1, "lib.ky.us", 3, false}, - {1, "lib.la.us", 3, false}, - {1, "lib.ma.us", 3, false}, - {1, "lib.md.us", 3, false}, - {1, "lib.me.us", 3, false}, - {1, "lib.mi.us", 3, false}, - {1, "lib.mn.us", 3, false}, - {1, "lib.mo.us", 3, false}, - {1, "lib.ms.us", 3, false}, - {1, "lib.mt.us", 3, false}, - {1, "lib.nc.us", 3, false}, - {1, "lib.nd.us", 3, false}, - {1, "lib.ne.us", 3, false}, - {1, "lib.nh.us", 3, false}, - {1, "lib.nj.us", 3, false}, - {1, "lib.nm.us", 3, false}, - {1, "lib.nv.us", 3, false}, - {1, "lib.ny.us", 3, false}, - {1, "lib.oh.us", 3, false}, - {1, "lib.ok.us", 3, false}, - {1, "lib.or.us", 3, false}, - {1, "lib.pa.us", 3, false}, - {1, "lib.pr.us", 3, false}, - {1, "lib.ri.us", 3, false}, - {1, "lib.sc.us", 3, false}, - {1, "lib.sd.us", 3, false}, - {1, "lib.tn.us", 3, false}, - {1, "lib.tx.us", 3, false}, - {1, "lib.ut.us", 3, false}, - {1, "lib.vi.us", 3, false}, - {1, "lib.vt.us", 3, false}, - {1, "lib.va.us", 3, false}, - {1, "lib.wa.us", 3, false}, - {1, "lib.wi.us", 3, false}, - {1, "lib.wy.us", 3, false}, - {1, "pvt.k12.ma.us", 4, false}, - {1, "chtr.k12.ma.us", 4, false}, - {1, "paroch.k12.ma.us", 4, false}, - {1, "ann-arbor.mi.us", 3, false}, - {1, "cog.mi.us", 3, false}, - {1, "dst.mi.us", 3, false}, - {1, "eaton.mi.us", 3, false}, - {1, "gen.mi.us", 3, false}, - {1, "mus.mi.us", 3, false}, - {1, "tec.mi.us", 3, false}, - {1, "washtenaw.mi.us", 3, false}, - {1, "uy", 1, false}, - {1, "com.uy", 2, false}, - {1, "edu.uy", 2, false}, - {1, "gub.uy", 2, false}, - {1, "mil.uy", 2, false}, - {1, "net.uy", 2, false}, - {1, "org.uy", 2, false}, - {1, "uz", 1, false}, - {1, "co.uz", 2, false}, - {1, "com.uz", 2, false}, - {1, "net.uz", 2, false}, - {1, "org.uz", 2, false}, - {1, "va", 1, false}, - {1, "vc", 1, false}, - {1, "com.vc", 2, false}, - {1, "net.vc", 2, false}, - {1, "org.vc", 2, false}, - {1, "gov.vc", 2, false}, - {1, "mil.vc", 2, false}, - {1, "edu.vc", 2, false}, - {1, "ve", 1, false}, - {1, "arts.ve", 2, false}, - {1, "co.ve", 2, false}, - {1, "com.ve", 2, false}, - {1, "e12.ve", 2, false}, - {1, "edu.ve", 2, false}, - {1, "firm.ve", 2, false}, - {1, "gob.ve", 2, false}, - {1, "gov.ve", 2, false}, - {1, "info.ve", 2, false}, - {1, "int.ve", 2, false}, - {1, "mil.ve", 2, false}, - {1, "net.ve", 2, false}, - {1, "org.ve", 2, false}, - {1, "rec.ve", 2, false}, - {1, "store.ve", 2, false}, - {1, "tec.ve", 2, false}, - {1, "web.ve", 2, false}, - {1, "vg", 1, false}, - {1, "vi", 1, false}, - {1, "co.vi", 2, false}, - {1, "com.vi", 2, false}, - {1, "k12.vi", 2, false}, - {1, "net.vi", 2, false}, - {1, "org.vi", 2, false}, - {1, "vn", 1, false}, - {1, "com.vn", 2, false}, - {1, "net.vn", 2, false}, - {1, "org.vn", 2, false}, - {1, "edu.vn", 2, false}, - {1, "gov.vn", 2, false}, - {1, "int.vn", 2, false}, - {1, "ac.vn", 2, false}, - {1, "biz.vn", 2, false}, - {1, "info.vn", 2, false}, - {1, "name.vn", 2, false}, - {1, "pro.vn", 2, false}, - {1, "health.vn", 2, false}, - {1, "vu", 1, false}, - {1, "com.vu", 2, false}, - {1, "edu.vu", 2, false}, - {1, "net.vu", 2, false}, - {1, "org.vu", 2, false}, - {1, "wf", 1, false}, - {1, "ws", 1, false}, - {1, "com.ws", 2, false}, - {1, "net.ws", 2, false}, - {1, "org.ws", 2, false}, - {1, "gov.ws", 2, false}, - {1, "edu.ws", 2, false}, - {1, "yt", 1, false}, - {1, "xn--mgbaam7a8h", 1, false}, - {1, "xn--y9a3aq", 1, false}, - {1, "xn--54b7fta0cc", 1, false}, - {1, "xn--90ae", 1, false}, - {1, "xn--90ais", 1, false}, - {1, "xn--fiqs8s", 1, false}, - {1, "xn--fiqz9s", 1, false}, - {1, "xn--lgbbat1ad8j", 1, false}, - {1, "xn--wgbh1c", 1, false}, - {1, "xn--e1a4c", 1, false}, - {1, "xn--mgbah1a3hjkrd", 1, false}, - {1, "xn--node", 1, false}, - {1, "xn--qxam", 1, false}, - {1, "xn--j6w193g", 1, false}, - {1, "xn--55qx5d.xn--j6w193g", 2, false}, - {1, "xn--wcvs22d.xn--j6w193g", 2, false}, - {1, "xn--mxtq1m.xn--j6w193g", 2, false}, - {1, "xn--gmqw5a.xn--j6w193g", 2, false}, - {1, "xn--od0alg.xn--j6w193g", 2, false}, - {1, "xn--uc0atv.xn--j6w193g", 2, false}, - {1, "xn--2scrj9c", 1, false}, - {1, "xn--3hcrj9c", 1, false}, - {1, "xn--45br5cyl", 1, false}, - {1, "xn--h2breg3eve", 1, false}, - {1, "xn--h2brj9c8c", 1, false}, - {1, "xn--mgbgu82a", 1, false}, - {1, "xn--rvc1e0am3e", 1, false}, - {1, "xn--h2brj9c", 1, false}, - {1, "xn--mgbbh1a", 1, false}, - {1, "xn--mgbbh1a71e", 1, false}, - {1, "xn--fpcrj9c3d", 1, false}, - {1, "xn--gecrj9c", 1, false}, - {1, "xn--s9brj9c", 1, false}, - {1, "xn--45brj9c", 1, false}, - {1, "xn--xkc2dl3a5ee0h", 1, false}, - {1, "xn--mgba3a4f16a", 1, false}, - {1, "xn--mgba3a4fra", 1, false}, - {1, "xn--mgbtx2b", 1, false}, - {1, "xn--mgbayh7gpa", 1, false}, - {1, "xn--3e0b707e", 1, false}, - {1, "xn--80ao21a", 1, false}, - {1, "xn--fzc2c9e2c", 1, false}, - {1, "xn--xkc2al3hye2a", 1, false}, - {1, "xn--mgbc0a9azcg", 1, false}, - {1, "xn--d1alf", 1, false}, - {1, "xn--l1acc", 1, false}, - {1, "xn--mix891f", 1, false}, - {1, "xn--mix082f", 1, false}, - {1, "xn--mgbx4cd0ab", 1, false}, - {1, "xn--mgb9awbf", 1, false}, - {1, "xn--mgbai9azgqp6j", 1, false}, - {1, "xn--mgbai9a5eva00b", 1, false}, - {1, "xn--ygbi2ammx", 1, false}, - {1, "xn--90a3ac", 1, false}, - {1, "xn--o1ac.xn--90a3ac", 2, false}, - {1, "xn--c1avg.xn--90a3ac", 2, false}, - {1, "xn--90azh.xn--90a3ac", 2, false}, - {1, "xn--d1at.xn--90a3ac", 2, false}, - {1, "xn--o1ach.xn--90a3ac", 2, false}, - {1, "xn--80au.xn--90a3ac", 2, false}, - {1, "xn--p1ai", 1, false}, - {1, "xn--wgbl6a", 1, false}, - {1, "xn--mgberp4a5d4ar", 1, false}, - {1, "xn--mgberp4a5d4a87g", 1, false}, - {1, "xn--mgbqly7c0a67fbc", 1, false}, - {1, "xn--mgbqly7cvafr", 1, false}, - {1, "xn--mgbpl2fh", 1, false}, - {1, "xn--yfro4i67o", 1, false}, - {1, "xn--clchc0ea0b2g2a9gcd", 1, false}, - {1, "xn--ogbpf8fl", 1, false}, - {1, "xn--mgbtf8fl", 1, false}, - {1, "xn--o3cw4h", 1, false}, - {1, "xn--12c1fe0br.xn--o3cw4h", 2, false}, - {1, "xn--12co0c3b4eva.xn--o3cw4h", 2, false}, - {1, "xn--h3cuzk1di.xn--o3cw4h", 2, false}, - {1, "xn--o3cyx2a.xn--o3cw4h", 2, false}, - {1, "xn--m3ch0j3a.xn--o3cw4h", 2, false}, - {1, "xn--12cfi8ixb8l.xn--o3cw4h", 2, false}, - {1, "xn--pgbs0dh", 1, false}, - {1, "xn--kpry57d", 1, false}, - {1, "xn--kprw13d", 1, false}, - {1, "xn--nnx388a", 1, false}, - {1, "xn--j1amh", 1, false}, - {1, "xn--mgb2ddes", 1, false}, - {1, "xxx", 1, false}, - {2, "ye", 2, false}, - {1, "ac.za", 2, false}, - {1, "agric.za", 2, false}, - {1, "alt.za", 2, false}, - {1, "co.za", 2, false}, - {1, "edu.za", 2, false}, - {1, "gov.za", 2, false}, - {1, "grondar.za", 2, false}, - {1, "law.za", 2, false}, - {1, "mil.za", 2, false}, - {1, "net.za", 2, false}, - {1, "ngo.za", 2, false}, - {1, "nic.za", 2, false}, - {1, "nis.za", 2, false}, - {1, "nom.za", 2, false}, - {1, "org.za", 2, false}, - {1, "school.za", 2, false}, - {1, "tm.za", 2, false}, - {1, "web.za", 2, false}, - {1, "zm", 1, false}, - {1, "ac.zm", 2, false}, - {1, "biz.zm", 2, false}, - {1, "co.zm", 2, false}, - {1, "com.zm", 2, false}, - {1, "edu.zm", 2, false}, - {1, "gov.zm", 2, false}, - {1, "info.zm", 2, false}, - {1, "mil.zm", 2, false}, - {1, "net.zm", 2, false}, - {1, "org.zm", 2, false}, - {1, "sch.zm", 2, false}, - {1, "zw", 1, false}, - {1, "ac.zw", 2, false}, - {1, "co.zw", 2, false}, - {1, "gov.zw", 2, false}, - {1, "mil.zw", 2, false}, - {1, "org.zw", 2, false}, - {1, "aaa", 1, false}, - {1, "aarp", 1, false}, - {1, "abarth", 1, false}, - {1, "abb", 1, false}, - {1, "abbott", 1, false}, - {1, "abbvie", 1, false}, - {1, "abc", 1, false}, - {1, "able", 1, false}, - {1, "abogado", 1, false}, - {1, "abudhabi", 1, false}, - {1, "academy", 1, false}, - {1, "accenture", 1, false}, - {1, "accountant", 1, false}, - {1, "accountants", 1, false}, - {1, "aco", 1, false}, - {1, "actor", 1, false}, - {1, "adac", 1, false}, - {1, "ads", 1, false}, - {1, "adult", 1, false}, - {1, "aeg", 1, false}, - {1, "aetna", 1, false}, - {1, "afamilycompany", 1, false}, - {1, "afl", 1, false}, - {1, "africa", 1, false}, - {1, "agakhan", 1, false}, - {1, "agency", 1, false}, - {1, "aig", 1, false}, - {1, "aigo", 1, false}, - {1, "airbus", 1, false}, - {1, "airforce", 1, false}, - {1, "airtel", 1, false}, - {1, "akdn", 1, false}, - {1, "alfaromeo", 1, false}, - {1, "alibaba", 1, false}, - {1, "alipay", 1, false}, - {1, "allfinanz", 1, false}, - {1, "allstate", 1, false}, - {1, "ally", 1, false}, - {1, "alsace", 1, false}, - {1, "alstom", 1, false}, - {1, "amazon", 1, false}, - {1, "americanexpress", 1, false}, - {1, "americanfamily", 1, false}, - {1, "amex", 1, false}, - {1, "amfam", 1, false}, - {1, "amica", 1, false}, - {1, "amsterdam", 1, false}, - {1, "analytics", 1, false}, - {1, "android", 1, false}, - {1, "anquan", 1, false}, - {1, "anz", 1, false}, - {1, "aol", 1, false}, - {1, "apartments", 1, false}, - {1, "app", 1, false}, - {1, "apple", 1, false}, - {1, "aquarelle", 1, false}, - {1, "arab", 1, false}, - {1, "aramco", 1, false}, - {1, "archi", 1, false}, - {1, "army", 1, false}, - {1, "art", 1, false}, - {1, "arte", 1, false}, - {1, "asda", 1, false}, - {1, "associates", 1, false}, - {1, "athleta", 1, false}, - {1, "attorney", 1, false}, - {1, "auction", 1, false}, - {1, "audi", 1, false}, - {1, "audible", 1, false}, - {1, "audio", 1, false}, - {1, "auspost", 1, false}, - {1, "author", 1, false}, - {1, "auto", 1, false}, - {1, "autos", 1, false}, - {1, "avianca", 1, false}, - {1, "aws", 1, false}, - {1, "axa", 1, false}, - {1, "azure", 1, false}, - {1, "baby", 1, false}, - {1, "baidu", 1, false}, - {1, "banamex", 1, false}, - {1, "bananarepublic", 1, false}, - {1, "band", 1, false}, - {1, "bank", 1, false}, - {1, "bar", 1, false}, - {1, "barcelona", 1, false}, - {1, "barclaycard", 1, false}, - {1, "barclays", 1, false}, - {1, "barefoot", 1, false}, - {1, "bargains", 1, false}, - {1, "baseball", 1, false}, - {1, "basketball", 1, false}, - {1, "bauhaus", 1, false}, - {1, "bayern", 1, false}, - {1, "bbc", 1, false}, - {1, "bbt", 1, false}, - {1, "bbva", 1, false}, - {1, "bcg", 1, false}, - {1, "bcn", 1, false}, - {1, "beats", 1, false}, - {1, "beauty", 1, false}, - {1, "beer", 1, false}, - {1, "bentley", 1, false}, - {1, "berlin", 1, false}, - {1, "best", 1, false}, - {1, "bestbuy", 1, false}, - {1, "bet", 1, false}, - {1, "bharti", 1, false}, - {1, "bible", 1, false}, - {1, "bid", 1, false}, - {1, "bike", 1, false}, - {1, "bing", 1, false}, - {1, "bingo", 1, false}, - {1, "bio", 1, false}, - {1, "black", 1, false}, - {1, "blackfriday", 1, false}, - {1, "blockbuster", 1, false}, - {1, "blog", 1, false}, - {1, "bloomberg", 1, false}, - {1, "blue", 1, false}, - {1, "bms", 1, false}, - {1, "bmw", 1, false}, - {1, "bnpparibas", 1, false}, - {1, "boats", 1, false}, - {1, "boehringer", 1, false}, - {1, "bofa", 1, false}, - {1, "bom", 1, false}, - {1, "bond", 1, false}, - {1, "boo", 1, false}, - {1, "book", 1, false}, - {1, "booking", 1, false}, - {1, "bosch", 1, false}, - {1, "bostik", 1, false}, - {1, "boston", 1, false}, - {1, "bot", 1, false}, - {1, "boutique", 1, false}, - {1, "box", 1, false}, - {1, "bradesco", 1, false}, - {1, "bridgestone", 1, false}, - {1, "broadway", 1, false}, - {1, "broker", 1, false}, - {1, "brother", 1, false}, - {1, "brussels", 1, false}, - {1, "budapest", 1, false}, - {1, "bugatti", 1, false}, - {1, "build", 1, false}, - {1, "builders", 1, false}, - {1, "business", 1, false}, - {1, "buy", 1, false}, - {1, "buzz", 1, false}, - {1, "bzh", 1, false}, - {1, "cab", 1, false}, - {1, "cafe", 1, false}, - {1, "cal", 1, false}, - {1, "call", 1, false}, - {1, "calvinklein", 1, false}, - {1, "cam", 1, false}, - {1, "camera", 1, false}, - {1, "camp", 1, false}, - {1, "cancerresearch", 1, false}, - {1, "canon", 1, false}, - {1, "capetown", 1, false}, - {1, "capital", 1, false}, - {1, "capitalone", 1, false}, - {1, "car", 1, false}, - {1, "caravan", 1, false}, - {1, "cards", 1, false}, - {1, "care", 1, false}, - {1, "career", 1, false}, - {1, "careers", 1, false}, - {1, "cars", 1, false}, - {1, "casa", 1, false}, - {1, "case", 1, false}, - {1, "caseih", 1, false}, - {1, "cash", 1, false}, - {1, "casino", 1, false}, - {1, "catering", 1, false}, - {1, "catholic", 1, false}, - {1, "cba", 1, false}, - {1, "cbn", 1, false}, - {1, "cbre", 1, false}, - {1, "cbs", 1, false}, - {1, "ceb", 1, false}, - {1, "center", 1, false}, - {1, "ceo", 1, false}, - {1, "cern", 1, false}, - {1, "cfa", 1, false}, - {1, "cfd", 1, false}, - {1, "chanel", 1, false}, - {1, "channel", 1, false}, - {1, "charity", 1, false}, - {1, "chase", 1, false}, - {1, "chat", 1, false}, - {1, "cheap", 1, false}, - {1, "chintai", 1, false}, - {1, "christmas", 1, false}, - {1, "chrome", 1, false}, - {1, "church", 1, false}, - {1, "cipriani", 1, false}, - {1, "circle", 1, false}, - {1, "cisco", 1, false}, - {1, "citadel", 1, false}, - {1, "citi", 1, false}, - {1, "citic", 1, false}, - {1, "city", 1, false}, - {1, "cityeats", 1, false}, - {1, "claims", 1, false}, - {1, "cleaning", 1, false}, - {1, "click", 1, false}, - {1, "clinic", 1, false}, - {1, "clinique", 1, false}, - {1, "clothing", 1, false}, - {1, "cloud", 1, false}, - {1, "club", 1, false}, - {1, "clubmed", 1, false}, - {1, "coach", 1, false}, - {1, "codes", 1, false}, - {1, "coffee", 1, false}, - {1, "college", 1, false}, - {1, "cologne", 1, false}, - {1, "comcast", 1, false}, - {1, "commbank", 1, false}, - {1, "community", 1, false}, - {1, "company", 1, false}, - {1, "compare", 1, false}, - {1, "computer", 1, false}, - {1, "comsec", 1, false}, - {1, "condos", 1, false}, - {1, "construction", 1, false}, - {1, "consulting", 1, false}, - {1, "contact", 1, false}, - {1, "contractors", 1, false}, - {1, "cooking", 1, false}, - {1, "cookingchannel", 1, false}, - {1, "cool", 1, false}, - {1, "corsica", 1, false}, - {1, "country", 1, false}, - {1, "coupon", 1, false}, - {1, "coupons", 1, false}, - {1, "courses", 1, false}, - {1, "cpa", 1, false}, - {1, "credit", 1, false}, - {1, "creditcard", 1, false}, - {1, "creditunion", 1, false}, - {1, "cricket", 1, false}, - {1, "crown", 1, false}, - {1, "crs", 1, false}, - {1, "cruise", 1, false}, - {1, "cruises", 1, false}, - {1, "csc", 1, false}, - {1, "cuisinella", 1, false}, - {1, "cymru", 1, false}, - {1, "cyou", 1, false}, - {1, "dabur", 1, false}, - {1, "dad", 1, false}, - {1, "dance", 1, false}, - {1, "data", 1, false}, - {1, "date", 1, false}, - {1, "dating", 1, false}, - {1, "datsun", 1, false}, - {1, "day", 1, false}, - {1, "dclk", 1, false}, - {1, "dds", 1, false}, - {1, "deal", 1, false}, - {1, "dealer", 1, false}, - {1, "deals", 1, false}, - {1, "degree", 1, false}, - {1, "delivery", 1, false}, - {1, "dell", 1, false}, - {1, "deloitte", 1, false}, - {1, "delta", 1, false}, - {1, "democrat", 1, false}, - {1, "dental", 1, false}, - {1, "dentist", 1, false}, - {1, "desi", 1, false}, - {1, "design", 1, false}, - {1, "dev", 1, false}, - {1, "dhl", 1, false}, - {1, "diamonds", 1, false}, - {1, "diet", 1, false}, - {1, "digital", 1, false}, - {1, "direct", 1, false}, - {1, "directory", 1, false}, - {1, "discount", 1, false}, - {1, "discover", 1, false}, - {1, "dish", 1, false}, - {1, "diy", 1, false}, - {1, "dnp", 1, false}, - {1, "docs", 1, false}, - {1, "doctor", 1, false}, - {1, "dog", 1, false}, - {1, "domains", 1, false}, - {1, "dot", 1, false}, - {1, "download", 1, false}, - {1, "drive", 1, false}, - {1, "dtv", 1, false}, - {1, "dubai", 1, false}, - {1, "duck", 1, false}, - {1, "dunlop", 1, false}, - {1, "dupont", 1, false}, - {1, "durban", 1, false}, - {1, "dvag", 1, false}, - {1, "dvr", 1, false}, - {1, "earth", 1, false}, - {1, "eat", 1, false}, - {1, "eco", 1, false}, - {1, "edeka", 1, false}, - {1, "education", 1, false}, - {1, "email", 1, false}, - {1, "emerck", 1, false}, - {1, "energy", 1, false}, - {1, "engineer", 1, false}, - {1, "engineering", 1, false}, - {1, "enterprises", 1, false}, - {1, "epson", 1, false}, - {1, "equipment", 1, false}, - {1, "ericsson", 1, false}, - {1, "erni", 1, false}, - {1, "esq", 1, false}, - {1, "estate", 1, false}, - {1, "esurance", 1, false}, - {1, "etisalat", 1, false}, - {1, "eurovision", 1, false}, - {1, "eus", 1, false}, - {1, "events", 1, false}, - {1, "exchange", 1, false}, - {1, "expert", 1, false}, - {1, "exposed", 1, false}, - {1, "express", 1, false}, - {1, "extraspace", 1, false}, - {1, "fage", 1, false}, - {1, "fail", 1, false}, - {1, "fairwinds", 1, false}, - {1, "faith", 1, false}, - {1, "family", 1, false}, - {1, "fan", 1, false}, - {1, "fans", 1, false}, - {1, "farm", 1, false}, - {1, "farmers", 1, false}, - {1, "fashion", 1, false}, - {1, "fast", 1, false}, - {1, "fedex", 1, false}, - {1, "feedback", 1, false}, - {1, "ferrari", 1, false}, - {1, "ferrero", 1, false}, - {1, "fiat", 1, false}, - {1, "fidelity", 1, false}, - {1, "fido", 1, false}, - {1, "film", 1, false}, - {1, "final", 1, false}, - {1, "finance", 1, false}, - {1, "financial", 1, false}, - {1, "fire", 1, false}, - {1, "firestone", 1, false}, - {1, "firmdale", 1, false}, - {1, "fish", 1, false}, - {1, "fishing", 1, false}, - {1, "fit", 1, false}, - {1, "fitness", 1, false}, - {1, "flickr", 1, false}, - {1, "flights", 1, false}, - {1, "flir", 1, false}, - {1, "florist", 1, false}, - {1, "flowers", 1, false}, - {1, "fly", 1, false}, - {1, "foo", 1, false}, - {1, "food", 1, false}, - {1, "foodnetwork", 1, false}, - {1, "football", 1, false}, - {1, "ford", 1, false}, - {1, "forex", 1, false}, - {1, "forsale", 1, false}, - {1, "forum", 1, false}, - {1, "foundation", 1, false}, - {1, "fox", 1, false}, - {1, "free", 1, false}, - {1, "fresenius", 1, false}, - {1, "frl", 1, false}, - {1, "frogans", 1, false}, - {1, "frontdoor", 1, false}, - {1, "frontier", 1, false}, - {1, "ftr", 1, false}, - {1, "fujitsu", 1, false}, - {1, "fujixerox", 1, false}, - {1, "fun", 1, false}, - {1, "fund", 1, false}, - {1, "furniture", 1, false}, - {1, "futbol", 1, false}, - {1, "fyi", 1, false}, - {1, "gal", 1, false}, - {1, "gallery", 1, false}, - {1, "gallo", 1, false}, - {1, "gallup", 1, false}, - {1, "game", 1, false}, - {1, "games", 1, false}, - {1, "gap", 1, false}, - {1, "garden", 1, false}, - {1, "gay", 1, false}, - {1, "gbiz", 1, false}, - {1, "gdn", 1, false}, - {1, "gea", 1, false}, - {1, "gent", 1, false}, - {1, "genting", 1, false}, - {1, "george", 1, false}, - {1, "ggee", 1, false}, - {1, "gift", 1, false}, - {1, "gifts", 1, false}, - {1, "gives", 1, false}, - {1, "giving", 1, false}, - {1, "glade", 1, false}, - {1, "glass", 1, false}, - {1, "gle", 1, false}, - {1, "global", 1, false}, - {1, "globo", 1, false}, - {1, "gmail", 1, false}, - {1, "gmbh", 1, false}, - {1, "gmo", 1, false}, - {1, "gmx", 1, false}, - {1, "godaddy", 1, false}, - {1, "gold", 1, false}, - {1, "goldpoint", 1, false}, - {1, "golf", 1, false}, - {1, "goo", 1, false}, - {1, "goodyear", 1, false}, - {1, "goog", 1, false}, - {1, "google", 1, false}, - {1, "gop", 1, false}, - {1, "got", 1, false}, - {1, "grainger", 1, false}, - {1, "graphics", 1, false}, - {1, "gratis", 1, false}, - {1, "green", 1, false}, - {1, "gripe", 1, false}, - {1, "grocery", 1, false}, - {1, "group", 1, false}, - {1, "guardian", 1, false}, - {1, "gucci", 1, false}, - {1, "guge", 1, false}, - {1, "guide", 1, false}, - {1, "guitars", 1, false}, - {1, "guru", 1, false}, - {1, "hair", 1, false}, - {1, "hamburg", 1, false}, - {1, "hangout", 1, false}, - {1, "haus", 1, false}, - {1, "hbo", 1, false}, - {1, "hdfc", 1, false}, - {1, "hdfcbank", 1, false}, - {1, "health", 1, false}, - {1, "healthcare", 1, false}, - {1, "help", 1, false}, - {1, "helsinki", 1, false}, - {1, "here", 1, false}, - {1, "hermes", 1, false}, - {1, "hgtv", 1, false}, - {1, "hiphop", 1, false}, - {1, "hisamitsu", 1, false}, - {1, "hitachi", 1, false}, - {1, "hiv", 1, false}, - {1, "hkt", 1, false}, - {1, "hockey", 1, false}, - {1, "holdings", 1, false}, - {1, "holiday", 1, false}, - {1, "homedepot", 1, false}, - {1, "homegoods", 1, false}, - {1, "homes", 1, false}, - {1, "homesense", 1, false}, - {1, "honda", 1, false}, - {1, "horse", 1, false}, - {1, "hospital", 1, false}, - {1, "host", 1, false}, - {1, "hosting", 1, false}, - {1, "hot", 1, false}, - {1, "hoteles", 1, false}, - {1, "hotels", 1, false}, - {1, "hotmail", 1, false}, - {1, "house", 1, false}, - {1, "how", 1, false}, - {1, "hsbc", 1, false}, - {1, "hughes", 1, false}, - {1, "hyatt", 1, false}, - {1, "hyundai", 1, false}, - {1, "ibm", 1, false}, - {1, "icbc", 1, false}, - {1, "ice", 1, false}, - {1, "icu", 1, false}, - {1, "ieee", 1, false}, - {1, "ifm", 1, false}, - {1, "ikano", 1, false}, - {1, "imamat", 1, false}, - {1, "imdb", 1, false}, - {1, "immo", 1, false}, - {1, "immobilien", 1, false}, - {1, "inc", 1, false}, - {1, "industries", 1, false}, - {1, "infiniti", 1, false}, - {1, "ing", 1, false}, - {1, "ink", 1, false}, - {1, "institute", 1, false}, - {1, "insurance", 1, false}, - {1, "insure", 1, false}, - {1, "intel", 1, false}, - {1, "international", 1, false}, - {1, "intuit", 1, false}, - {1, "investments", 1, false}, - {1, "ipiranga", 1, false}, - {1, "irish", 1, false}, - {1, "ismaili", 1, false}, - {1, "ist", 1, false}, - {1, "istanbul", 1, false}, - {1, "itau", 1, false}, - {1, "itv", 1, false}, - {1, "iveco", 1, false}, - {1, "jaguar", 1, false}, - {1, "java", 1, false}, - {1, "jcb", 1, false}, - {1, "jcp", 1, false}, - {1, "jeep", 1, false}, - {1, "jetzt", 1, false}, - {1, "jewelry", 1, false}, - {1, "jio", 1, false}, - {1, "jll", 1, false}, - {1, "jmp", 1, false}, - {1, "jnj", 1, false}, - {1, "joburg", 1, false}, - {1, "jot", 1, false}, - {1, "joy", 1, false}, - {1, "jpmorgan", 1, false}, - {1, "jprs", 1, false}, - {1, "juegos", 1, false}, - {1, "juniper", 1, false}, - {1, "kaufen", 1, false}, - {1, "kddi", 1, false}, - {1, "kerryhotels", 1, false}, - {1, "kerrylogistics", 1, false}, - {1, "kerryproperties", 1, false}, - {1, "kfh", 1, false}, - {1, "kia", 1, false}, - {1, "kim", 1, false}, - {1, "kinder", 1, false}, - {1, "kindle", 1, false}, - {1, "kitchen", 1, false}, - {1, "kiwi", 1, false}, - {1, "koeln", 1, false}, - {1, "komatsu", 1, false}, - {1, "kosher", 1, false}, - {1, "kpmg", 1, false}, - {1, "kpn", 1, false}, - {1, "krd", 1, false}, - {1, "kred", 1, false}, - {1, "kuokgroup", 1, false}, - {1, "kyoto", 1, false}, - {1, "lacaixa", 1, false}, - {1, "lamborghini", 1, false}, - {1, "lamer", 1, false}, - {1, "lancaster", 1, false}, - {1, "lancia", 1, false}, - {1, "land", 1, false}, - {1, "landrover", 1, false}, - {1, "lanxess", 1, false}, - {1, "lasalle", 1, false}, - {1, "lat", 1, false}, - {1, "latino", 1, false}, - {1, "latrobe", 1, false}, - {1, "law", 1, false}, - {1, "lawyer", 1, false}, - {1, "lds", 1, false}, - {1, "lease", 1, false}, - {1, "leclerc", 1, false}, - {1, "lefrak", 1, false}, - {1, "legal", 1, false}, - {1, "lego", 1, false}, - {1, "lexus", 1, false}, - {1, "lgbt", 1, false}, - {1, "lidl", 1, false}, - {1, "life", 1, false}, - {1, "lifeinsurance", 1, false}, - {1, "lifestyle", 1, false}, - {1, "lighting", 1, false}, - {1, "like", 1, false}, - {1, "lilly", 1, false}, - {1, "limited", 1, false}, - {1, "limo", 1, false}, - {1, "lincoln", 1, false}, - {1, "linde", 1, false}, - {1, "link", 1, false}, - {1, "lipsy", 1, false}, - {1, "live", 1, false}, - {1, "living", 1, false}, - {1, "lixil", 1, false}, - {1, "llc", 1, false}, - {1, "llp", 1, false}, - {1, "loan", 1, false}, - {1, "loans", 1, false}, - {1, "locker", 1, false}, - {1, "locus", 1, false}, - {1, "loft", 1, false}, - {1, "lol", 1, false}, - {1, "london", 1, false}, - {1, "lotte", 1, false}, - {1, "lotto", 1, false}, - {1, "love", 1, false}, - {1, "lpl", 1, false}, - {1, "lplfinancial", 1, false}, - {1, "ltd", 1, false}, - {1, "ltda", 1, false}, - {1, "lundbeck", 1, false}, - {1, "lupin", 1, false}, - {1, "luxe", 1, false}, - {1, "luxury", 1, false}, - {1, "macys", 1, false}, - {1, "madrid", 1, false}, - {1, "maif", 1, false}, - {1, "maison", 1, false}, - {1, "makeup", 1, false}, - {1, "man", 1, false}, - {1, "management", 1, false}, - {1, "mango", 1, false}, - {1, "map", 1, false}, - {1, "market", 1, false}, - {1, "marketing", 1, false}, - {1, "markets", 1, false}, - {1, "marriott", 1, false}, - {1, "marshalls", 1, false}, - {1, "maserati", 1, false}, - {1, "mattel", 1, false}, - {1, "mba", 1, false}, - {1, "mckinsey", 1, false}, - {1, "med", 1, false}, - {1, "media", 1, false}, - {1, "meet", 1, false}, - {1, "melbourne", 1, false}, - {1, "meme", 1, false}, - {1, "memorial", 1, false}, - {1, "men", 1, false}, - {1, "menu", 1, false}, - {1, "merckmsd", 1, false}, - {1, "metlife", 1, false}, - {1, "miami", 1, false}, - {1, "microsoft", 1, false}, - {1, "mini", 1, false}, - {1, "mint", 1, false}, - {1, "mit", 1, false}, - {1, "mitsubishi", 1, false}, - {1, "mlb", 1, false}, - {1, "mls", 1, false}, - {1, "mma", 1, false}, - {1, "mobile", 1, false}, - {1, "moda", 1, false}, - {1, "moe", 1, false}, - {1, "moi", 1, false}, - {1, "mom", 1, false}, - {1, "monash", 1, false}, - {1, "money", 1, false}, - {1, "monster", 1, false}, - {1, "mormon", 1, false}, - {1, "mortgage", 1, false}, - {1, "moscow", 1, false}, - {1, "moto", 1, false}, - {1, "motorcycles", 1, false}, - {1, "mov", 1, false}, - {1, "movie", 1, false}, - {1, "msd", 1, false}, - {1, "mtn", 1, false}, - {1, "mtr", 1, false}, - {1, "mutual", 1, false}, - {1, "nab", 1, false}, - {1, "nadex", 1, false}, - {1, "nagoya", 1, false}, - {1, "nationwide", 1, false}, - {1, "natura", 1, false}, - {1, "navy", 1, false}, - {1, "nba", 1, false}, - {1, "nec", 1, false}, - {1, "netbank", 1, false}, - {1, "netflix", 1, false}, - {1, "network", 1, false}, - {1, "neustar", 1, false}, - {1, "new", 1, false}, - {1, "newholland", 1, false}, - {1, "news", 1, false}, - {1, "next", 1, false}, - {1, "nextdirect", 1, false}, - {1, "nexus", 1, false}, - {1, "nfl", 1, false}, - {1, "ngo", 1, false}, - {1, "nhk", 1, false}, - {1, "nico", 1, false}, - {1, "nike", 1, false}, - {1, "nikon", 1, false}, - {1, "ninja", 1, false}, - {1, "nissan", 1, false}, - {1, "nissay", 1, false}, - {1, "nokia", 1, false}, - {1, "northwesternmutual", 1, false}, - {1, "norton", 1, false}, - {1, "now", 1, false}, - {1, "nowruz", 1, false}, - {1, "nowtv", 1, false}, - {1, "nra", 1, false}, - {1, "nrw", 1, false}, - {1, "ntt", 1, false}, - {1, "nyc", 1, false}, - {1, "obi", 1, false}, - {1, "observer", 1, false}, - {1, "off", 1, false}, - {1, "office", 1, false}, - {1, "okinawa", 1, false}, - {1, "olayan", 1, false}, - {1, "olayangroup", 1, false}, - {1, "oldnavy", 1, false}, - {1, "ollo", 1, false}, - {1, "omega", 1, false}, - {1, "one", 1, false}, - {1, "ong", 1, false}, - {1, "onl", 1, false}, - {1, "online", 1, false}, - {1, "onyourside", 1, false}, - {1, "ooo", 1, false}, - {1, "open", 1, false}, - {1, "oracle", 1, false}, - {1, "orange", 1, false}, - {1, "organic", 1, false}, - {1, "origins", 1, false}, - {1, "osaka", 1, false}, - {1, "otsuka", 1, false}, - {1, "ott", 1, false}, - {1, "ovh", 1, false}, - {1, "page", 1, false}, - {1, "panasonic", 1, false}, - {1, "paris", 1, false}, - {1, "pars", 1, false}, - {1, "partners", 1, false}, - {1, "parts", 1, false}, - {1, "party", 1, false}, - {1, "passagens", 1, false}, - {1, "pay", 1, false}, - {1, "pccw", 1, false}, - {1, "pet", 1, false}, - {1, "pfizer", 1, false}, - {1, "pharmacy", 1, false}, - {1, "phd", 1, false}, - {1, "philips", 1, false}, - {1, "phone", 1, false}, - {1, "photo", 1, false}, - {1, "photography", 1, false}, - {1, "photos", 1, false}, - {1, "physio", 1, false}, - {1, "pics", 1, false}, - {1, "pictet", 1, false}, - {1, "pictures", 1, false}, - {1, "pid", 1, false}, - {1, "pin", 1, false}, - {1, "ping", 1, false}, - {1, "pink", 1, false}, - {1, "pioneer", 1, false}, - {1, "pizza", 1, false}, - {1, "place", 1, false}, - {1, "play", 1, false}, - {1, "playstation", 1, false}, - {1, "plumbing", 1, false}, - {1, "plus", 1, false}, - {1, "pnc", 1, false}, - {1, "pohl", 1, false}, - {1, "poker", 1, false}, - {1, "politie", 1, false}, - {1, "porn", 1, false}, - {1, "pramerica", 1, false}, - {1, "praxi", 1, false}, - {1, "press", 1, false}, - {1, "prime", 1, false}, - {1, "prod", 1, false}, - {1, "productions", 1, false}, - {1, "prof", 1, false}, - {1, "progressive", 1, false}, - {1, "promo", 1, false}, - {1, "properties", 1, false}, - {1, "property", 1, false}, - {1, "protection", 1, false}, - {1, "pru", 1, false}, - {1, "prudential", 1, false}, - {1, "pub", 1, false}, - {1, "pwc", 1, false}, - {1, "qpon", 1, false}, - {1, "quebec", 1, false}, - {1, "quest", 1, false}, - {1, "qvc", 1, false}, - {1, "racing", 1, false}, - {1, "radio", 1, false}, - {1, "raid", 1, false}, - {1, "read", 1, false}, - {1, "realestate", 1, false}, - {1, "realtor", 1, false}, - {1, "realty", 1, false}, - {1, "recipes", 1, false}, - {1, "red", 1, false}, - {1, "redstone", 1, false}, - {1, "redumbrella", 1, false}, - {1, "rehab", 1, false}, - {1, "reise", 1, false}, - {1, "reisen", 1, false}, - {1, "reit", 1, false}, - {1, "reliance", 1, false}, - {1, "ren", 1, false}, - {1, "rent", 1, false}, - {1, "rentals", 1, false}, - {1, "repair", 1, false}, - {1, "report", 1, false}, - {1, "republican", 1, false}, - {1, "rest", 1, false}, - {1, "restaurant", 1, false}, - {1, "review", 1, false}, - {1, "reviews", 1, false}, - {1, "rexroth", 1, false}, - {1, "rich", 1, false}, - {1, "richardli", 1, false}, - {1, "ricoh", 1, false}, - {1, "rightathome", 1, false}, - {1, "ril", 1, false}, - {1, "rio", 1, false}, - {1, "rip", 1, false}, - {1, "rmit", 1, false}, - {1, "rocher", 1, false}, - {1, "rocks", 1, false}, - {1, "rodeo", 1, false}, - {1, "rogers", 1, false}, - {1, "room", 1, false}, - {1, "rsvp", 1, false}, - {1, "rugby", 1, false}, - {1, "ruhr", 1, false}, - {1, "run", 1, false}, - {1, "rwe", 1, false}, - {1, "ryukyu", 1, false}, - {1, "saarland", 1, false}, - {1, "safe", 1, false}, - {1, "safety", 1, false}, - {1, "sakura", 1, false}, - {1, "sale", 1, false}, - {1, "salon", 1, false}, - {1, "samsclub", 1, false}, - {1, "samsung", 1, false}, - {1, "sandvik", 1, false}, - {1, "sandvikcoromant", 1, false}, - {1, "sanofi", 1, false}, - {1, "sap", 1, false}, - {1, "sarl", 1, false}, - {1, "sas", 1, false}, - {1, "save", 1, false}, - {1, "saxo", 1, false}, - {1, "sbi", 1, false}, - {1, "sbs", 1, false}, - {1, "sca", 1, false}, - {1, "scb", 1, false}, - {1, "schaeffler", 1, false}, - {1, "schmidt", 1, false}, - {1, "scholarships", 1, false}, - {1, "school", 1, false}, - {1, "schule", 1, false}, - {1, "schwarz", 1, false}, - {1, "science", 1, false}, - {1, "scjohnson", 1, false}, - {1, "scor", 1, false}, - {1, "scot", 1, false}, - {1, "search", 1, false}, - {1, "seat", 1, false}, - {1, "secure", 1, false}, - {1, "security", 1, false}, - {1, "seek", 1, false}, - {1, "select", 1, false}, - {1, "sener", 1, false}, - {1, "services", 1, false}, - {1, "ses", 1, false}, - {1, "seven", 1, false}, - {1, "sew", 1, false}, - {1, "sex", 1, false}, - {1, "sexy", 1, false}, - {1, "sfr", 1, false}, - {1, "shangrila", 1, false}, - {1, "sharp", 1, false}, - {1, "shaw", 1, false}, - {1, "shell", 1, false}, - {1, "shia", 1, false}, - {1, "shiksha", 1, false}, - {1, "shoes", 1, false}, - {1, "shop", 1, false}, - {1, "shopping", 1, false}, - {1, "shouji", 1, false}, - {1, "show", 1, false}, - {1, "showtime", 1, false}, - {1, "shriram", 1, false}, - {1, "silk", 1, false}, - {1, "sina", 1, false}, - {1, "singles", 1, false}, - {1, "site", 1, false}, - {1, "ski", 1, false}, - {1, "skin", 1, false}, - {1, "sky", 1, false}, - {1, "skype", 1, false}, - {1, "sling", 1, false}, - {1, "smart", 1, false}, - {1, "smile", 1, false}, - {1, "sncf", 1, false}, - {1, "soccer", 1, false}, - {1, "social", 1, false}, - {1, "softbank", 1, false}, - {1, "software", 1, false}, - {1, "sohu", 1, false}, - {1, "solar", 1, false}, - {1, "solutions", 1, false}, - {1, "song", 1, false}, - {1, "sony", 1, false}, - {1, "soy", 1, false}, - {1, "spa", 1, false}, - {1, "space", 1, false}, - {1, "sport", 1, false}, - {1, "spot", 1, false}, - {1, "spreadbetting", 1, false}, - {1, "srl", 1, false}, - {1, "stada", 1, false}, - {1, "staples", 1, false}, - {1, "star", 1, false}, - {1, "statebank", 1, false}, - {1, "statefarm", 1, false}, - {1, "stc", 1, false}, - {1, "stcgroup", 1, false}, - {1, "stockholm", 1, false}, - {1, "storage", 1, false}, - {1, "store", 1, false}, - {1, "stream", 1, false}, - {1, "studio", 1, false}, - {1, "study", 1, false}, - {1, "style", 1, false}, - {1, "sucks", 1, false}, - {1, "supplies", 1, false}, - {1, "supply", 1, false}, - {1, "support", 1, false}, - {1, "surf", 1, false}, - {1, "surgery", 1, false}, - {1, "suzuki", 1, false}, - {1, "swatch", 1, false}, - {1, "swiftcover", 1, false}, - {1, "swiss", 1, false}, - {1, "sydney", 1, false}, - {1, "symantec", 1, false}, - {1, "systems", 1, false}, - {1, "tab", 1, false}, - {1, "taipei", 1, false}, - {1, "talk", 1, false}, - {1, "taobao", 1, false}, - {1, "target", 1, false}, - {1, "tatamotors", 1, false}, - {1, "tatar", 1, false}, - {1, "tattoo", 1, false}, - {1, "tax", 1, false}, - {1, "taxi", 1, false}, - {1, "tci", 1, false}, - {1, "tdk", 1, false}, - {1, "team", 1, false}, - {1, "tech", 1, false}, - {1, "technology", 1, false}, - {1, "temasek", 1, false}, - {1, "tennis", 1, false}, - {1, "teva", 1, false}, - {1, "thd", 1, false}, - {1, "theater", 1, false}, - {1, "theatre", 1, false}, - {1, "tiaa", 1, false}, - {1, "tickets", 1, false}, - {1, "tienda", 1, false}, - {1, "tiffany", 1, false}, - {1, "tips", 1, false}, - {1, "tires", 1, false}, - {1, "tirol", 1, false}, - {1, "tjmaxx", 1, false}, - {1, "tjx", 1, false}, - {1, "tkmaxx", 1, false}, - {1, "tmall", 1, false}, - {1, "today", 1, false}, - {1, "tokyo", 1, false}, - {1, "tools", 1, false}, - {1, "top", 1, false}, - {1, "toray", 1, false}, - {1, "toshiba", 1, false}, - {1, "total", 1, false}, - {1, "tours", 1, false}, - {1, "town", 1, false}, - {1, "toyota", 1, false}, - {1, "toys", 1, false}, - {1, "trade", 1, false}, - {1, "trading", 1, false}, - {1, "training", 1, false}, - {1, "travel", 1, false}, - {1, "travelchannel", 1, false}, - {1, "travelers", 1, false}, - {1, "travelersinsurance", 1, false}, - {1, "trust", 1, false}, - {1, "trv", 1, false}, - {1, "tube", 1, false}, - {1, "tui", 1, false}, - {1, "tunes", 1, false}, - {1, "tushu", 1, false}, - {1, "tvs", 1, false}, - {1, "ubank", 1, false}, - {1, "ubs", 1, false}, - {1, "unicom", 1, false}, - {1, "university", 1, false}, - {1, "uno", 1, false}, - {1, "uol", 1, false}, - {1, "ups", 1, false}, - {1, "vacations", 1, false}, - {1, "vana", 1, false}, - {1, "vanguard", 1, false}, - {1, "vegas", 1, false}, - {1, "ventures", 1, false}, - {1, "verisign", 1, false}, - {1, "versicherung", 1, false}, - {1, "vet", 1, false}, - {1, "viajes", 1, false}, - {1, "video", 1, false}, - {1, "vig", 1, false}, - {1, "viking", 1, false}, - {1, "villas", 1, false}, - {1, "vin", 1, false}, - {1, "vip", 1, false}, - {1, "virgin", 1, false}, - {1, "visa", 1, false}, - {1, "vision", 1, false}, - {1, "vistaprint", 1, false}, - {1, "viva", 1, false}, - {1, "vivo", 1, false}, - {1, "vlaanderen", 1, false}, - {1, "vodka", 1, false}, - {1, "volkswagen", 1, false}, - {1, "volvo", 1, false}, - {1, "vote", 1, false}, - {1, "voting", 1, false}, - {1, "voto", 1, false}, - {1, "voyage", 1, false}, - {1, "vuelos", 1, false}, - {1, "wales", 1, false}, - {1, "walmart", 1, false}, - {1, "walter", 1, false}, - {1, "wang", 1, false}, - {1, "wanggou", 1, false}, - {1, "watch", 1, false}, - {1, "watches", 1, false}, - {1, "weather", 1, false}, - {1, "weatherchannel", 1, false}, - {1, "webcam", 1, false}, - {1, "weber", 1, false}, - {1, "website", 1, false}, - {1, "wed", 1, false}, - {1, "wedding", 1, false}, - {1, "weibo", 1, false}, - {1, "weir", 1, false}, - {1, "whoswho", 1, false}, - {1, "wien", 1, false}, - {1, "wiki", 1, false}, - {1, "williamhill", 1, false}, - {1, "win", 1, false}, - {1, "windows", 1, false}, - {1, "wine", 1, false}, - {1, "winners", 1, false}, - {1, "wme", 1, false}, - {1, "wolterskluwer", 1, false}, - {1, "woodside", 1, false}, - {1, "work", 1, false}, - {1, "works", 1, false}, - {1, "world", 1, false}, - {1, "wow", 1, false}, - {1, "wtc", 1, false}, - {1, "wtf", 1, false}, - {1, "xbox", 1, false}, - {1, "xerox", 1, false}, - {1, "xfinity", 1, false}, - {1, "xihuan", 1, false}, - {1, "xin", 1, false}, - {1, "xn--11b4c3d", 1, false}, - {1, "xn--1ck2e1b", 1, false}, - {1, "xn--1qqw23a", 1, false}, - {1, "xn--30rr7y", 1, false}, - {1, "xn--3bst00m", 1, false}, - {1, "xn--3ds443g", 1, false}, - {1, "xn--3oq18vl8pn36a", 1, false}, - {1, "xn--3pxu8k", 1, false}, - {1, "xn--42c2d9a", 1, false}, - {1, "xn--45q11c", 1, false}, - {1, "xn--4gbrim", 1, false}, - {1, "xn--55qw42g", 1, false}, - {1, "xn--55qx5d", 1, false}, - {1, "xn--5su34j936bgsg", 1, false}, - {1, "xn--5tzm5g", 1, false}, - {1, "xn--6frz82g", 1, false}, - {1, "xn--6qq986b3xl", 1, false}, - {1, "xn--80adxhks", 1, false}, - {1, "xn--80aqecdr1a", 1, false}, - {1, "xn--80asehdb", 1, false}, - {1, "xn--80aswg", 1, false}, - {1, "xn--8y0a063a", 1, false}, - {1, "xn--9dbq2a", 1, false}, - {1, "xn--9et52u", 1, false}, - {1, "xn--9krt00a", 1, false}, - {1, "xn--b4w605ferd", 1, false}, - {1, "xn--bck1b9a5dre4c", 1, false}, - {1, "xn--c1avg", 1, false}, - {1, "xn--c2br7g", 1, false}, - {1, "xn--cck2b3b", 1, false}, - {1, "xn--cckwcxetd", 1, false}, - {1, "xn--cg4bki", 1, false}, - {1, "xn--czr694b", 1, false}, - {1, "xn--czrs0t", 1, false}, - {1, "xn--czru2d", 1, false}, - {1, "xn--d1acj3b", 1, false}, - {1, "xn--eckvdtc9d", 1, false}, - {1, "xn--efvy88h", 1, false}, - {1, "xn--estv75g", 1, false}, - {1, "xn--fct429k", 1, false}, - {1, "xn--fhbei", 1, false}, - {1, "xn--fiq228c5hs", 1, false}, - {1, "xn--fiq64b", 1, false}, - {1, "xn--fjq720a", 1, false}, - {1, "xn--flw351e", 1, false}, - {1, "xn--fzys8d69uvgm", 1, false}, - {1, "xn--g2xx48c", 1, false}, - {1, "xn--gckr3f0f", 1, false}, - {1, "xn--gk3at1e", 1, false}, - {1, "xn--hxt814e", 1, false}, - {1, "xn--i1b6b1a6a2e", 1, false}, - {1, "xn--imr513n", 1, false}, - {1, "xn--io0a7i", 1, false}, - {1, "xn--j1aef", 1, false}, - {1, "xn--jlq480n2rg", 1, false}, - {1, "xn--jlq61u9w7b", 1, false}, - {1, "xn--jvr189m", 1, false}, - {1, "xn--kcrx77d1x4a", 1, false}, - {1, "xn--kpu716f", 1, false}, - {1, "xn--kput3i", 1, false}, - {1, "xn--mgba3a3ejt", 1, false}, - {1, "xn--mgba7c0bbn0a", 1, false}, - {1, "xn--mgbaakc7dvf", 1, false}, - {1, "xn--mgbab2bd", 1, false}, - {1, "xn--mgbca7dzdo", 1, false}, - {1, "xn--mgbi4ecexp", 1, false}, - {1, "xn--mgbt3dhd", 1, false}, - {1, "xn--mk1bu44c", 1, false}, - {1, "xn--mxtq1m", 1, false}, - {1, "xn--ngbc5azd", 1, false}, - {1, "xn--ngbe9e0a", 1, false}, - {1, "xn--ngbrx", 1, false}, - {1, "xn--nqv7f", 1, false}, - {1, "xn--nqv7fs00ema", 1, false}, - {1, "xn--nyqy26a", 1, false}, - {1, "xn--otu796d", 1, false}, - {1, "xn--p1acf", 1, false}, - {1, "xn--pbt977c", 1, false}, - {1, "xn--pssy2u", 1, false}, - {1, "xn--q9jyb4c", 1, false}, - {1, "xn--qcka1pmc", 1, false}, - {1, "xn--rhqv96g", 1, false}, - {1, "xn--rovu88b", 1, false}, - {1, "xn--ses554g", 1, false}, - {1, "xn--t60b56a", 1, false}, - {1, "xn--tckwe", 1, false}, - {1, "xn--tiq49xqyj", 1, false}, - {1, "xn--unup4y", 1, false}, - {1, "xn--vermgensberater-ctb", 1, false}, - {1, "xn--vermgensberatung-pwb", 1, false}, - {1, "xn--vhquv", 1, false}, - {1, "xn--vuq861b", 1, false}, - {1, "xn--w4r85el8fhu5dnra", 1, false}, - {1, "xn--w4rs40l", 1, false}, - {1, "xn--xhq521b", 1, false}, - {1, "xn--zfr164b", 1, false}, - {1, "xyz", 1, false}, - {1, "yachts", 1, false}, - {1, "yahoo", 1, false}, - {1, "yamaxun", 1, false}, - {1, "yandex", 1, false}, - {1, "yodobashi", 1, false}, - {1, "yoga", 1, false}, - {1, "yokohama", 1, false}, - {1, "you", 1, false}, - {1, "youtube", 1, false}, - {1, "yun", 1, false}, - {1, "zappos", 1, false}, - {1, "zara", 1, false}, - {1, "zero", 1, false}, - {1, "zip", 1, false}, - {1, "zone", 1, false}, - {1, "zuerich", 1, false}, - {1, "cc.ua", 2, true}, - {1, "inf.ua", 2, true}, - {1, "ltd.ua", 2, true}, - {1, "adobeaemcloud.com", 2, true}, - {1, "adobeaemcloud.net", 2, true}, - {2, "dev.adobeaemcloud.com", 4, true}, - {1, "beep.pl", 2, true}, - {1, "barsy.ca", 2, true}, - {2, "compute.estate", 3, true}, - {2, "alces.network", 3, true}, - {1, "altervista.org", 2, true}, - {1, "alwaysdata.net", 2, true}, - {1, "cloudfront.net", 2, true}, - {2, "compute.amazonaws.com", 4, true}, - {2, "compute-1.amazonaws.com", 4, true}, - {2, "compute.amazonaws.com.cn", 5, true}, - {1, "us-east-1.amazonaws.com", 3, true}, - {1, "cn-north-1.eb.amazonaws.com.cn", 5, true}, - {1, "cn-northwest-1.eb.amazonaws.com.cn", 5, true}, - {1, "elasticbeanstalk.com", 2, true}, - {1, "ap-northeast-1.elasticbeanstalk.com", 3, true}, - {1, "ap-northeast-2.elasticbeanstalk.com", 3, true}, - {1, "ap-northeast-3.elasticbeanstalk.com", 3, true}, - {1, "ap-south-1.elasticbeanstalk.com", 3, true}, - {1, "ap-southeast-1.elasticbeanstalk.com", 3, true}, - {1, "ap-southeast-2.elasticbeanstalk.com", 3, true}, - {1, "ca-central-1.elasticbeanstalk.com", 3, true}, - {1, "eu-central-1.elasticbeanstalk.com", 3, true}, - {1, "eu-west-1.elasticbeanstalk.com", 3, true}, - {1, "eu-west-2.elasticbeanstalk.com", 3, true}, - {1, "eu-west-3.elasticbeanstalk.com", 3, true}, - {1, "sa-east-1.elasticbeanstalk.com", 3, true}, - {1, "us-east-1.elasticbeanstalk.com", 3, true}, - {1, "us-east-2.elasticbeanstalk.com", 3, true}, - {1, "us-gov-west-1.elasticbeanstalk.com", 3, true}, - {1, "us-west-1.elasticbeanstalk.com", 3, true}, - {1, "us-west-2.elasticbeanstalk.com", 3, true}, - {2, "elb.amazonaws.com", 4, true}, - {2, "elb.amazonaws.com.cn", 5, true}, - {1, "s3.amazonaws.com", 3, true}, - {1, "s3-ap-northeast-1.amazonaws.com", 3, true}, - {1, "s3-ap-northeast-2.amazonaws.com", 3, true}, - {1, "s3-ap-south-1.amazonaws.com", 3, true}, - {1, "s3-ap-southeast-1.amazonaws.com", 3, true}, - {1, "s3-ap-southeast-2.amazonaws.com", 3, true}, - {1, "s3-ca-central-1.amazonaws.com", 3, true}, - {1, "s3-eu-central-1.amazonaws.com", 3, true}, - {1, "s3-eu-west-1.amazonaws.com", 3, true}, - {1, "s3-eu-west-2.amazonaws.com", 3, true}, - {1, "s3-eu-west-3.amazonaws.com", 3, true}, - {1, "s3-external-1.amazonaws.com", 3, true}, - {1, "s3-fips-us-gov-west-1.amazonaws.com", 3, true}, - {1, "s3-sa-east-1.amazonaws.com", 3, true}, - {1, "s3-us-gov-west-1.amazonaws.com", 3, true}, - {1, "s3-us-east-2.amazonaws.com", 3, true}, - {1, "s3-us-west-1.amazonaws.com", 3, true}, - {1, "s3-us-west-2.amazonaws.com", 3, true}, - {1, "s3.ap-northeast-2.amazonaws.com", 4, true}, - {1, "s3.ap-south-1.amazonaws.com", 4, true}, - {1, "s3.cn-north-1.amazonaws.com.cn", 5, true}, - {1, "s3.ca-central-1.amazonaws.com", 4, true}, - {1, "s3.eu-central-1.amazonaws.com", 4, true}, - {1, "s3.eu-west-2.amazonaws.com", 4, true}, - {1, "s3.eu-west-3.amazonaws.com", 4, true}, - {1, "s3.us-east-2.amazonaws.com", 4, true}, - {1, "s3.dualstack.ap-northeast-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.ap-northeast-2.amazonaws.com", 5, true}, - {1, "s3.dualstack.ap-south-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.ap-southeast-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.ap-southeast-2.amazonaws.com", 5, true}, - {1, "s3.dualstack.ca-central-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.eu-central-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.eu-west-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.eu-west-2.amazonaws.com", 5, true}, - {1, "s3.dualstack.eu-west-3.amazonaws.com", 5, true}, - {1, "s3.dualstack.sa-east-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.us-east-1.amazonaws.com", 5, true}, - {1, "s3.dualstack.us-east-2.amazonaws.com", 5, true}, - {1, "s3-website-us-east-1.amazonaws.com", 3, true}, - {1, "s3-website-us-west-1.amazonaws.com", 3, true}, - {1, "s3-website-us-west-2.amazonaws.com", 3, true}, - {1, "s3-website-ap-northeast-1.amazonaws.com", 3, true}, - {1, "s3-website-ap-southeast-1.amazonaws.com", 3, true}, - {1, "s3-website-ap-southeast-2.amazonaws.com", 3, true}, - {1, "s3-website-eu-west-1.amazonaws.com", 3, true}, - {1, "s3-website-sa-east-1.amazonaws.com", 3, true}, - {1, "s3-website.ap-northeast-2.amazonaws.com", 4, true}, - {1, "s3-website.ap-south-1.amazonaws.com", 4, true}, - {1, "s3-website.ca-central-1.amazonaws.com", 4, true}, - {1, "s3-website.eu-central-1.amazonaws.com", 4, true}, - {1, "s3-website.eu-west-2.amazonaws.com", 4, true}, - {1, "s3-website.eu-west-3.amazonaws.com", 4, true}, - {1, "s3-website.us-east-2.amazonaws.com", 4, true}, - {1, "amsw.nl", 2, true}, - {1, "t3l3p0rt.net", 2, true}, - {1, "tele.amune.org", 3, true}, - {1, "apigee.io", 2, true}, - {1, "on-aptible.com", 2, true}, - {1, "user.aseinet.ne.jp", 4, true}, - {1, "gv.vc", 2, true}, - {1, "d.gv.vc", 3, true}, - {1, "user.party.eus", 3, true}, - {1, "pimienta.org", 2, true}, - {1, "poivron.org", 2, true}, - {1, "potager.org", 2, true}, - {1, "sweetpepper.org", 2, true}, - {1, "myasustor.com", 2, true}, - {1, "myfritz.net", 2, true}, - {2, "awdev.ca", 3, true}, - {2, "advisor.ws", 3, true}, - {1, "b-data.io", 2, true}, - {1, "backplaneapp.io", 2, true}, - {1, "balena-devices.com", 2, true}, - {1, "app.banzaicloud.io", 3, true}, - {1, "betainabox.com", 2, true}, - {1, "bnr.la", 2, true}, - {1, "blackbaudcdn.net", 2, true}, - {1, "boomla.net", 2, true}, - {1, "boxfuse.io", 2, true}, - {1, "square7.ch", 2, true}, - {1, "bplaced.com", 2, true}, - {1, "bplaced.de", 2, true}, - {1, "square7.de", 2, true}, - {1, "bplaced.net", 2, true}, - {1, "square7.net", 2, true}, - {1, "browsersafetymark.io", 2, true}, - {1, "uk0.bigv.io", 3, true}, - {1, "dh.bytemark.co.uk", 4, true}, - {1, "vm.bytemark.co.uk", 4, true}, - {1, "mycd.eu", 2, true}, - {1, "carrd.co", 2, true}, - {1, "crd.co", 2, true}, - {1, "uwu.ai", 2, true}, - {1, "ae.org", 2, true}, - {1, "ar.com", 2, true}, - {1, "br.com", 2, true}, - {1, "cn.com", 2, true}, - {1, "com.de", 2, true}, - {1, "com.se", 2, true}, - {1, "de.com", 2, true}, - {1, "eu.com", 2, true}, - {1, "gb.com", 2, true}, - {1, "gb.net", 2, true}, - {1, "hu.com", 2, true}, - {1, "hu.net", 2, true}, - {1, "jp.net", 2, true}, - {1, "jpn.com", 2, true}, - {1, "kr.com", 2, true}, - {1, "mex.com", 2, true}, - {1, "no.com", 2, true}, - {1, "qc.com", 2, true}, - {1, "ru.com", 2, true}, - {1, "sa.com", 2, true}, - {1, "se.net", 2, true}, - {1, "uk.com", 2, true}, - {1, "uk.net", 2, true}, - {1, "us.com", 2, true}, - {1, "uy.com", 2, true}, - {1, "za.bz", 2, true}, - {1, "za.com", 2, true}, - {1, "africa.com", 2, true}, - {1, "gr.com", 2, true}, - {1, "in.net", 2, true}, - {1, "us.org", 2, true}, - {1, "co.com", 2, true}, - {1, "c.la", 2, true}, - {1, "certmgr.org", 2, true}, - {1, "xenapponazure.com", 2, true}, - {1, "discourse.group", 2, true}, - {1, "discourse.team", 2, true}, - {1, "virtueeldomein.nl", 2, true}, - {1, "cleverapps.io", 2, true}, - {2, "lcl.dev", 3, true}, - {2, "stg.dev", 3, true}, - {1, "c66.me", 2, true}, - {1, "cloud66.ws", 2, true}, - {1, "cloud66.zone", 2, true}, - {1, "jdevcloud.com", 2, true}, - {1, "wpdevcloud.com", 2, true}, - {1, "cloudaccess.host", 2, true}, - {1, "freesite.host", 2, true}, - {1, "cloudaccess.net", 2, true}, - {1, "cloudcontrolled.com", 2, true}, - {1, "cloudcontrolapp.com", 2, true}, - {1, "cloudera.site", 2, true}, - {1, "trycloudflare.com", 2, true}, - {1, "workers.dev", 2, true}, - {1, "wnext.app", 2, true}, - {1, "co.ca", 2, true}, - {2, "otap.co", 3, true}, - {1, "co.cz", 2, true}, - {1, "c.cdn77.org", 3, true}, - {1, "cdn77-ssl.net", 2, true}, - {1, "r.cdn77.net", 3, true}, - {1, "rsc.cdn77.org", 3, true}, - {1, "ssl.origin.cdn77-secure.org", 4, true}, - {1, "cloudns.asia", 2, true}, - {1, "cloudns.biz", 2, true}, - {1, "cloudns.club", 2, true}, - {1, "cloudns.cc", 2, true}, - {1, "cloudns.eu", 2, true}, - {1, "cloudns.in", 2, true}, - {1, "cloudns.info", 2, true}, - {1, "cloudns.org", 2, true}, - {1, "cloudns.pro", 2, true}, - {1, "cloudns.pw", 2, true}, - {1, "cloudns.us", 2, true}, - {1, "cloudeity.net", 2, true}, - {1, "cnpy.gdn", 2, true}, - {1, "co.nl", 2, true}, - {1, "co.no", 2, true}, - {1, "webhosting.be", 2, true}, - {1, "hosting-cluster.nl", 2, true}, - {1, "ac.ru", 2, true}, - {1, "edu.ru", 2, true}, - {1, "gov.ru", 2, true}, - {1, "int.ru", 2, true}, - {1, "mil.ru", 2, true}, - {1, "test.ru", 2, true}, - {1, "dyn.cosidns.de", 3, true}, - {1, "dynamisches-dns.de", 2, true}, - {1, "dnsupdater.de", 2, true}, - {1, "internet-dns.de", 2, true}, - {1, "l-o-g-i-n.de", 2, true}, - {1, "dynamic-dns.info", 2, true}, - {1, "feste-ip.net", 2, true}, - {1, "knx-server.net", 2, true}, - {1, "static-access.net", 2, true}, - {1, "realm.cz", 2, true}, - {2, "cryptonomic.net", 3, true}, - {1, "cupcake.is", 2, true}, - {2, "customer-oci.com", 3, true}, - {2, "oci.customer-oci.com", 4, true}, - {2, "ocp.customer-oci.com", 4, true}, - {2, "ocs.customer-oci.com", 4, true}, - {1, "cyon.link", 2, true}, - {1, "cyon.site", 2, true}, - {1, "daplie.me", 2, true}, - {1, "localhost.daplie.me", 3, true}, - {1, "dattolocal.com", 2, true}, - {1, "dattorelay.com", 2, true}, - {1, "dattoweb.com", 2, true}, - {1, "mydatto.com", 2, true}, - {1, "dattolocal.net", 2, true}, - {1, "mydatto.net", 2, true}, - {1, "biz.dk", 2, true}, - {1, "co.dk", 2, true}, - {1, "firm.dk", 2, true}, - {1, "reg.dk", 2, true}, - {1, "store.dk", 2, true}, - {2, "dapps.earth", 3, true}, - {2, "bzz.dapps.earth", 4, true}, - {1, "builtwithdark.com", 2, true}, - {1, "edgestack.me", 2, true}, - {1, "debian.net", 2, true}, - {1, "dedyn.io", 2, true}, - {1, "dnshome.de", 2, true}, - {1, "online.th", 2, true}, - {1, "shop.th", 2, true}, - {1, "drayddns.com", 2, true}, - {1, "dreamhosters.com", 2, true}, - {1, "mydrobo.com", 2, true}, - {1, "drud.io", 2, true}, - {1, "drud.us", 2, true}, - {1, "duckdns.org", 2, true}, - {1, "dy.fi", 2, true}, - {1, "tunk.org", 2, true}, - {1, "dyndns-at-home.com", 2, true}, - {1, "dyndns-at-work.com", 2, true}, - {1, "dyndns-blog.com", 2, true}, - {1, "dyndns-free.com", 2, true}, - {1, "dyndns-home.com", 2, true}, - {1, "dyndns-ip.com", 2, true}, - {1, "dyndns-mail.com", 2, true}, - {1, "dyndns-office.com", 2, true}, - {1, "dyndns-pics.com", 2, true}, - {1, "dyndns-remote.com", 2, true}, - {1, "dyndns-server.com", 2, true}, - {1, "dyndns-web.com", 2, true}, - {1, "dyndns-wiki.com", 2, true}, - {1, "dyndns-work.com", 2, true}, - {1, "dyndns.biz", 2, true}, - {1, "dyndns.info", 2, true}, - {1, "dyndns.org", 2, true}, - {1, "dyndns.tv", 2, true}, - {1, "at-band-camp.net", 2, true}, - {1, "ath.cx", 2, true}, - {1, "barrel-of-knowledge.info", 2, true}, - {1, "barrell-of-knowledge.info", 2, true}, - {1, "better-than.tv", 2, true}, - {1, "blogdns.com", 2, true}, - {1, "blogdns.net", 2, true}, - {1, "blogdns.org", 2, true}, - {1, "blogsite.org", 2, true}, - {1, "boldlygoingnowhere.org", 2, true}, - {1, "broke-it.net", 2, true}, - {1, "buyshouses.net", 2, true}, - {1, "cechire.com", 2, true}, - {1, "dnsalias.com", 2, true}, - {1, "dnsalias.net", 2, true}, - {1, "dnsalias.org", 2, true}, - {1, "dnsdojo.com", 2, true}, - {1, "dnsdojo.net", 2, true}, - {1, "dnsdojo.org", 2, true}, - {1, "does-it.net", 2, true}, - {1, "doesntexist.com", 2, true}, - {1, "doesntexist.org", 2, true}, - {1, "dontexist.com", 2, true}, - {1, "dontexist.net", 2, true}, - {1, "dontexist.org", 2, true}, - {1, "doomdns.com", 2, true}, - {1, "doomdns.org", 2, true}, - {1, "dvrdns.org", 2, true}, - {1, "dyn-o-saur.com", 2, true}, - {1, "dynalias.com", 2, true}, - {1, "dynalias.net", 2, true}, - {1, "dynalias.org", 2, true}, - {1, "dynathome.net", 2, true}, - {1, "dyndns.ws", 2, true}, - {1, "endofinternet.net", 2, true}, - {1, "endofinternet.org", 2, true}, - {1, "endoftheinternet.org", 2, true}, - {1, "est-a-la-maison.com", 2, true}, - {1, "est-a-la-masion.com", 2, true}, - {1, "est-le-patron.com", 2, true}, - {1, "est-mon-blogueur.com", 2, true}, - {1, "for-better.biz", 2, true}, - {1, "for-more.biz", 2, true}, - {1, "for-our.info", 2, true}, - {1, "for-some.biz", 2, true}, - {1, "for-the.biz", 2, true}, - {1, "forgot.her.name", 3, true}, - {1, "forgot.his.name", 3, true}, - {1, "from-ak.com", 2, true}, - {1, "from-al.com", 2, true}, - {1, "from-ar.com", 2, true}, - {1, "from-az.net", 2, true}, - {1, "from-ca.com", 2, true}, - {1, "from-co.net", 2, true}, - {1, "from-ct.com", 2, true}, - {1, "from-dc.com", 2, true}, - {1, "from-de.com", 2, true}, - {1, "from-fl.com", 2, true}, - {1, "from-ga.com", 2, true}, - {1, "from-hi.com", 2, true}, - {1, "from-ia.com", 2, true}, - {1, "from-id.com", 2, true}, - {1, "from-il.com", 2, true}, - {1, "from-in.com", 2, true}, - {1, "from-ks.com", 2, true}, - {1, "from-ky.com", 2, true}, - {1, "from-la.net", 2, true}, - {1, "from-ma.com", 2, true}, - {1, "from-md.com", 2, true}, - {1, "from-me.org", 2, true}, - {1, "from-mi.com", 2, true}, - {1, "from-mn.com", 2, true}, - {1, "from-mo.com", 2, true}, - {1, "from-ms.com", 2, true}, - {1, "from-mt.com", 2, true}, - {1, "from-nc.com", 2, true}, - {1, "from-nd.com", 2, true}, - {1, "from-ne.com", 2, true}, - {1, "from-nh.com", 2, true}, - {1, "from-nj.com", 2, true}, - {1, "from-nm.com", 2, true}, - {1, "from-nv.com", 2, true}, - {1, "from-ny.net", 2, true}, - {1, "from-oh.com", 2, true}, - {1, "from-ok.com", 2, true}, - {1, "from-or.com", 2, true}, - {1, "from-pa.com", 2, true}, - {1, "from-pr.com", 2, true}, - {1, "from-ri.com", 2, true}, - {1, "from-sc.com", 2, true}, - {1, "from-sd.com", 2, true}, - {1, "from-tn.com", 2, true}, - {1, "from-tx.com", 2, true}, - {1, "from-ut.com", 2, true}, - {1, "from-va.com", 2, true}, - {1, "from-vt.com", 2, true}, - {1, "from-wa.com", 2, true}, - {1, "from-wi.com", 2, true}, - {1, "from-wv.com", 2, true}, - {1, "from-wy.com", 2, true}, - {1, "ftpaccess.cc", 2, true}, - {1, "fuettertdasnetz.de", 2, true}, - {1, "game-host.org", 2, true}, - {1, "game-server.cc", 2, true}, - {1, "getmyip.com", 2, true}, - {1, "gets-it.net", 2, true}, - {1, "go.dyndns.org", 3, true}, - {1, "gotdns.com", 2, true}, - {1, "gotdns.org", 2, true}, - {1, "groks-the.info", 2, true}, - {1, "groks-this.info", 2, true}, - {1, "ham-radio-op.net", 2, true}, - {1, "here-for-more.info", 2, true}, - {1, "hobby-site.com", 2, true}, - {1, "hobby-site.org", 2, true}, - {1, "home.dyndns.org", 3, true}, - {1, "homedns.org", 2, true}, - {1, "homeftp.net", 2, true}, - {1, "homeftp.org", 2, true}, - {1, "homeip.net", 2, true}, - {1, "homelinux.com", 2, true}, - {1, "homelinux.net", 2, true}, - {1, "homelinux.org", 2, true}, - {1, "homeunix.com", 2, true}, - {1, "homeunix.net", 2, true}, - {1, "homeunix.org", 2, true}, - {1, "iamallama.com", 2, true}, - {1, "in-the-band.net", 2, true}, - {1, "is-a-anarchist.com", 2, true}, - {1, "is-a-blogger.com", 2, true}, - {1, "is-a-bookkeeper.com", 2, true}, - {1, "is-a-bruinsfan.org", 2, true}, - {1, "is-a-bulls-fan.com", 2, true}, - {1, "is-a-candidate.org", 2, true}, - {1, "is-a-caterer.com", 2, true}, - {1, "is-a-celticsfan.org", 2, true}, - {1, "is-a-chef.com", 2, true}, - {1, "is-a-chef.net", 2, true}, - {1, "is-a-chef.org", 2, true}, - {1, "is-a-conservative.com", 2, true}, - {1, "is-a-cpa.com", 2, true}, - {1, "is-a-cubicle-slave.com", 2, true}, - {1, "is-a-democrat.com", 2, true}, - {1, "is-a-designer.com", 2, true}, - {1, "is-a-doctor.com", 2, true}, - {1, "is-a-financialadvisor.com", 2, true}, - {1, "is-a-geek.com", 2, true}, - {1, "is-a-geek.net", 2, true}, - {1, "is-a-geek.org", 2, true}, - {1, "is-a-green.com", 2, true}, - {1, "is-a-guru.com", 2, true}, - {1, "is-a-hard-worker.com", 2, true}, - {1, "is-a-hunter.com", 2, true}, - {1, "is-a-knight.org", 2, true}, - {1, "is-a-landscaper.com", 2, true}, - {1, "is-a-lawyer.com", 2, true}, - {1, "is-a-liberal.com", 2, true}, - {1, "is-a-libertarian.com", 2, true}, - {1, "is-a-linux-user.org", 2, true}, - {1, "is-a-llama.com", 2, true}, - {1, "is-a-musician.com", 2, true}, - {1, "is-a-nascarfan.com", 2, true}, - {1, "is-a-nurse.com", 2, true}, - {1, "is-a-painter.com", 2, true}, - {1, "is-a-patsfan.org", 2, true}, - {1, "is-a-personaltrainer.com", 2, true}, - {1, "is-a-photographer.com", 2, true}, - {1, "is-a-player.com", 2, true}, - {1, "is-a-republican.com", 2, true}, - {1, "is-a-rockstar.com", 2, true}, - {1, "is-a-socialist.com", 2, true}, - {1, "is-a-soxfan.org", 2, true}, - {1, "is-a-student.com", 2, true}, - {1, "is-a-teacher.com", 2, true}, - {1, "is-a-techie.com", 2, true}, - {1, "is-a-therapist.com", 2, true}, - {1, "is-an-accountant.com", 2, true}, - {1, "is-an-actor.com", 2, true}, - {1, "is-an-actress.com", 2, true}, - {1, "is-an-anarchist.com", 2, true}, - {1, "is-an-artist.com", 2, true}, - {1, "is-an-engineer.com", 2, true}, - {1, "is-an-entertainer.com", 2, true}, - {1, "is-by.us", 2, true}, - {1, "is-certified.com", 2, true}, - {1, "is-found.org", 2, true}, - {1, "is-gone.com", 2, true}, - {1, "is-into-anime.com", 2, true}, - {1, "is-into-cars.com", 2, true}, - {1, "is-into-cartoons.com", 2, true}, - {1, "is-into-games.com", 2, true}, - {1, "is-leet.com", 2, true}, - {1, "is-lost.org", 2, true}, - {1, "is-not-certified.com", 2, true}, - {1, "is-saved.org", 2, true}, - {1, "is-slick.com", 2, true}, - {1, "is-uberleet.com", 2, true}, - {1, "is-very-bad.org", 2, true}, - {1, "is-very-evil.org", 2, true}, - {1, "is-very-good.org", 2, true}, - {1, "is-very-nice.org", 2, true}, - {1, "is-very-sweet.org", 2, true}, - {1, "is-with-theband.com", 2, true}, - {1, "isa-geek.com", 2, true}, - {1, "isa-geek.net", 2, true}, - {1, "isa-geek.org", 2, true}, - {1, "isa-hockeynut.com", 2, true}, - {1, "issmarterthanyou.com", 2, true}, - {1, "isteingeek.de", 2, true}, - {1, "istmein.de", 2, true}, - {1, "kicks-ass.net", 2, true}, - {1, "kicks-ass.org", 2, true}, - {1, "knowsitall.info", 2, true}, - {1, "land-4-sale.us", 2, true}, - {1, "lebtimnetz.de", 2, true}, - {1, "leitungsen.de", 2, true}, - {1, "likes-pie.com", 2, true}, - {1, "likescandy.com", 2, true}, - {1, "merseine.nu", 2, true}, - {1, "mine.nu", 2, true}, - {1, "misconfused.org", 2, true}, - {1, "mypets.ws", 2, true}, - {1, "myphotos.cc", 2, true}, - {1, "neat-url.com", 2, true}, - {1, "office-on-the.net", 2, true}, - {1, "on-the-web.tv", 2, true}, - {1, "podzone.net", 2, true}, - {1, "podzone.org", 2, true}, - {1, "readmyblog.org", 2, true}, - {1, "saves-the-whales.com", 2, true}, - {1, "scrapper-site.net", 2, true}, - {1, "scrapping.cc", 2, true}, - {1, "selfip.biz", 2, true}, - {1, "selfip.com", 2, true}, - {1, "selfip.info", 2, true}, - {1, "selfip.net", 2, true}, - {1, "selfip.org", 2, true}, - {1, "sells-for-less.com", 2, true}, - {1, "sells-for-u.com", 2, true}, - {1, "sells-it.net", 2, true}, - {1, "sellsyourhome.org", 2, true}, - {1, "servebbs.com", 2, true}, - {1, "servebbs.net", 2, true}, - {1, "servebbs.org", 2, true}, - {1, "serveftp.net", 2, true}, - {1, "serveftp.org", 2, true}, - {1, "servegame.org", 2, true}, - {1, "shacknet.nu", 2, true}, - {1, "simple-url.com", 2, true}, - {1, "space-to-rent.com", 2, true}, - {1, "stuff-4-sale.org", 2, true}, - {1, "stuff-4-sale.us", 2, true}, - {1, "teaches-yoga.com", 2, true}, - {1, "thruhere.net", 2, true}, - {1, "traeumtgerade.de", 2, true}, - {1, "webhop.biz", 2, true}, - {1, "webhop.info", 2, true}, - {1, "webhop.net", 2, true}, - {1, "webhop.org", 2, true}, - {1, "worse-than.tv", 2, true}, - {1, "writesthisblog.com", 2, true}, - {1, "ddnss.de", 2, true}, - {1, "dyn.ddnss.de", 3, true}, - {1, "dyndns.ddnss.de", 3, true}, - {1, "dyndns1.de", 2, true}, - {1, "dyn-ip24.de", 2, true}, - {1, "home-webserver.de", 2, true}, - {1, "dyn.home-webserver.de", 3, true}, - {1, "myhome-server.de", 2, true}, - {1, "ddnss.org", 2, true}, - {1, "definima.net", 2, true}, - {1, "definima.io", 2, true}, - {1, "bci.dnstrace.pro", 3, true}, - {1, "ddnsfree.com", 2, true}, - {1, "ddnsgeek.com", 2, true}, - {1, "giize.com", 2, true}, - {1, "gleeze.com", 2, true}, - {1, "kozow.com", 2, true}, - {1, "loseyourip.com", 2, true}, - {1, "ooguy.com", 2, true}, - {1, "theworkpc.com", 2, true}, - {1, "casacam.net", 2, true}, - {1, "dynu.net", 2, true}, - {1, "accesscam.org", 2, true}, - {1, "camdvr.org", 2, true}, - {1, "freeddns.org", 2, true}, - {1, "mywire.org", 2, true}, - {1, "webredirect.org", 2, true}, - {1, "myddns.rocks", 2, true}, - {1, "blogsite.xyz", 2, true}, - {1, "dynv6.net", 2, true}, - {1, "e4.cz", 2, true}, - {1, "en-root.fr", 2, true}, - {1, "mytuleap.com", 2, true}, - {1, "onred.one", 2, true}, - {1, "staging.onred.one", 3, true}, - {1, "enonic.io", 2, true}, - {1, "customer.enonic.io", 3, true}, - {1, "eu.org", 2, true}, - {1, "al.eu.org", 3, true}, - {1, "asso.eu.org", 3, true}, - {1, "at.eu.org", 3, true}, - {1, "au.eu.org", 3, true}, - {1, "be.eu.org", 3, true}, - {1, "bg.eu.org", 3, true}, - {1, "ca.eu.org", 3, true}, - {1, "cd.eu.org", 3, true}, - {1, "ch.eu.org", 3, true}, - {1, "cn.eu.org", 3, true}, - {1, "cy.eu.org", 3, true}, - {1, "cz.eu.org", 3, true}, - {1, "de.eu.org", 3, true}, - {1, "dk.eu.org", 3, true}, - {1, "edu.eu.org", 3, true}, - {1, "ee.eu.org", 3, true}, - {1, "es.eu.org", 3, true}, - {1, "fi.eu.org", 3, true}, - {1, "fr.eu.org", 3, true}, - {1, "gr.eu.org", 3, true}, - {1, "hr.eu.org", 3, true}, - {1, "hu.eu.org", 3, true}, - {1, "ie.eu.org", 3, true}, - {1, "il.eu.org", 3, true}, - {1, "in.eu.org", 3, true}, - {1, "int.eu.org", 3, true}, - {1, "is.eu.org", 3, true}, - {1, "it.eu.org", 3, true}, - {1, "jp.eu.org", 3, true}, - {1, "kr.eu.org", 3, true}, - {1, "lt.eu.org", 3, true}, - {1, "lu.eu.org", 3, true}, - {1, "lv.eu.org", 3, true}, - {1, "mc.eu.org", 3, true}, - {1, "me.eu.org", 3, true}, - {1, "mk.eu.org", 3, true}, - {1, "mt.eu.org", 3, true}, - {1, "my.eu.org", 3, true}, - {1, "net.eu.org", 3, true}, - {1, "ng.eu.org", 3, true}, - {1, "nl.eu.org", 3, true}, - {1, "no.eu.org", 3, true}, - {1, "nz.eu.org", 3, true}, - {1, "paris.eu.org", 3, true}, - {1, "pl.eu.org", 3, true}, - {1, "pt.eu.org", 3, true}, - {1, "q-a.eu.org", 3, true}, - {1, "ro.eu.org", 3, true}, - {1, "ru.eu.org", 3, true}, - {1, "se.eu.org", 3, true}, - {1, "si.eu.org", 3, true}, - {1, "sk.eu.org", 3, true}, - {1, "tr.eu.org", 3, true}, - {1, "uk.eu.org", 3, true}, - {1, "us.eu.org", 3, true}, - {1, "eu-1.evennode.com", 3, true}, - {1, "eu-2.evennode.com", 3, true}, - {1, "eu-3.evennode.com", 3, true}, - {1, "eu-4.evennode.com", 3, true}, - {1, "us-1.evennode.com", 3, true}, - {1, "us-2.evennode.com", 3, true}, - {1, "us-3.evennode.com", 3, true}, - {1, "us-4.evennode.com", 3, true}, - {1, "twmail.cc", 2, true}, - {1, "twmail.net", 2, true}, - {1, "twmail.org", 2, true}, - {1, "mymailer.com.tw", 3, true}, - {1, "url.tw", 2, true}, - {1, "apps.fbsbx.com", 3, true}, - {1, "ru.net", 2, true}, - {1, "adygeya.ru", 2, true}, - {1, "bashkiria.ru", 2, true}, - {1, "bir.ru", 2, true}, - {1, "cbg.ru", 2, true}, - {1, "com.ru", 2, true}, - {1, "dagestan.ru", 2, true}, - {1, "grozny.ru", 2, true}, - {1, "kalmykia.ru", 2, true}, - {1, "kustanai.ru", 2, true}, - {1, "marine.ru", 2, true}, - {1, "mordovia.ru", 2, true}, - {1, "msk.ru", 2, true}, - {1, "mytis.ru", 2, true}, - {1, "nalchik.ru", 2, true}, - {1, "nov.ru", 2, true}, - {1, "pyatigorsk.ru", 2, true}, - {1, "spb.ru", 2, true}, - {1, "vladikavkaz.ru", 2, true}, - {1, "vladimir.ru", 2, true}, - {1, "abkhazia.su", 2, true}, - {1, "adygeya.su", 2, true}, - {1, "aktyubinsk.su", 2, true}, - {1, "arkhangelsk.su", 2, true}, - {1, "armenia.su", 2, true}, - {1, "ashgabad.su", 2, true}, - {1, "azerbaijan.su", 2, true}, - {1, "balashov.su", 2, true}, - {1, "bashkiria.su", 2, true}, - {1, "bryansk.su", 2, true}, - {1, "bukhara.su", 2, true}, - {1, "chimkent.su", 2, true}, - {1, "dagestan.su", 2, true}, - {1, "east-kazakhstan.su", 2, true}, - {1, "exnet.su", 2, true}, - {1, "georgia.su", 2, true}, - {1, "grozny.su", 2, true}, - {1, "ivanovo.su", 2, true}, - {1, "jambyl.su", 2, true}, - {1, "kalmykia.su", 2, true}, - {1, "kaluga.su", 2, true}, - {1, "karacol.su", 2, true}, - {1, "karaganda.su", 2, true}, - {1, "karelia.su", 2, true}, - {1, "khakassia.su", 2, true}, - {1, "krasnodar.su", 2, true}, - {1, "kurgan.su", 2, true}, - {1, "kustanai.su", 2, true}, - {1, "lenug.su", 2, true}, - {1, "mangyshlak.su", 2, true}, - {1, "mordovia.su", 2, true}, - {1, "msk.su", 2, true}, - {1, "murmansk.su", 2, true}, - {1, "nalchik.su", 2, true}, - {1, "navoi.su", 2, true}, - {1, "north-kazakhstan.su", 2, true}, - {1, "nov.su", 2, true}, - {1, "obninsk.su", 2, true}, - {1, "penza.su", 2, true}, - {1, "pokrovsk.su", 2, true}, - {1, "sochi.su", 2, true}, - {1, "spb.su", 2, true}, - {1, "tashkent.su", 2, true}, - {1, "termez.su", 2, true}, - {1, "togliatti.su", 2, true}, - {1, "troitsk.su", 2, true}, - {1, "tselinograd.su", 2, true}, - {1, "tula.su", 2, true}, - {1, "tuva.su", 2, true}, - {1, "vladikavkaz.su", 2, true}, - {1, "vladimir.su", 2, true}, - {1, "vologda.su", 2, true}, - {1, "channelsdvr.net", 2, true}, - {1, "u.channelsdvr.net", 3, true}, - {1, "fastly-terrarium.com", 2, true}, - {1, "fastlylb.net", 2, true}, - {1, "map.fastlylb.net", 3, true}, - {1, "freetls.fastly.net", 3, true}, - {1, "map.fastly.net", 3, true}, - {1, "a.prod.fastly.net", 4, true}, - {1, "global.prod.fastly.net", 4, true}, - {1, "a.ssl.fastly.net", 4, true}, - {1, "b.ssl.fastly.net", 4, true}, - {1, "global.ssl.fastly.net", 4, true}, - {1, "fastpanel.direct", 2, true}, - {1, "fastvps-server.com", 2, true}, - {1, "fhapp.xyz", 2, true}, - {1, "fedorainfracloud.org", 2, true}, - {1, "fedorapeople.org", 2, true}, - {1, "cloud.fedoraproject.org", 3, true}, - {1, "app.os.fedoraproject.org", 4, true}, - {1, "app.os.stg.fedoraproject.org", 5, true}, - {1, "mydobiss.com", 2, true}, - {1, "filegear.me", 2, true}, - {1, "filegear-au.me", 2, true}, - {1, "filegear-de.me", 2, true}, - {1, "filegear-gb.me", 2, true}, - {1, "filegear-ie.me", 2, true}, - {1, "filegear-jp.me", 2, true}, - {1, "filegear-sg.me", 2, true}, - {1, "firebaseapp.com", 2, true}, - {1, "flynnhub.com", 2, true}, - {1, "flynnhosting.net", 2, true}, - {1, "0e.vc", 2, true}, - {1, "freebox-os.com", 2, true}, - {1, "freeboxos.com", 2, true}, - {1, "fbx-os.fr", 2, true}, - {1, "fbxos.fr", 2, true}, - {1, "freebox-os.fr", 2, true}, - {1, "freeboxos.fr", 2, true}, - {1, "freedesktop.org", 2, true}, - {2, "futurecms.at", 3, true}, - {2, "ex.futurecms.at", 4, true}, - {2, "in.futurecms.at", 4, true}, - {1, "futurehosting.at", 2, true}, - {1, "futuremailing.at", 2, true}, - {2, "ex.ortsinfo.at", 4, true}, - {2, "kunden.ortsinfo.at", 4, true}, - {2, "statics.cloud", 3, true}, - {1, "service.gov.uk", 3, true}, - {1, "gehirn.ne.jp", 3, true}, - {1, "usercontent.jp", 2, true}, - {1, "gentapps.com", 2, true}, - {1, "lab.ms", 2, true}, - {1, "github.io", 2, true}, - {1, "githubusercontent.com", 2, true}, - {1, "gitlab.io", 2, true}, - {1, "glitch.me", 2, true}, - {1, "lolipop.io", 2, true}, - {1, "cloudapps.digital", 2, true}, - {1, "london.cloudapps.digital", 3, true}, - {1, "homeoffice.gov.uk", 3, true}, - {1, "ro.im", 2, true}, - {1, "shop.ro", 2, true}, - {1, "goip.de", 2, true}, - {1, "run.app", 2, true}, - {1, "a.run.app", 3, true}, - {1, "web.app", 2, true}, - {2, "0emm.com", 3, true}, - {1, "appspot.com", 2, true}, - {2, "r.appspot.com", 4, true}, - {1, "blogspot.ae", 2, true}, - {1, "blogspot.al", 2, true}, - {1, "blogspot.am", 2, true}, - {1, "blogspot.ba", 2, true}, - {1, "blogspot.be", 2, true}, - {1, "blogspot.bg", 2, true}, - {1, "blogspot.bj", 2, true}, - {1, "blogspot.ca", 2, true}, - {1, "blogspot.cf", 2, true}, - {1, "blogspot.ch", 2, true}, - {1, "blogspot.cl", 2, true}, - {1, "blogspot.co.at", 3, true}, - {1, "blogspot.co.id", 3, true}, - {1, "blogspot.co.il", 3, true}, - {1, "blogspot.co.ke", 3, true}, - {1, "blogspot.co.nz", 3, true}, - {1, "blogspot.co.uk", 3, true}, - {1, "blogspot.co.za", 3, true}, - {1, "blogspot.com", 2, true}, - {1, "blogspot.com.ar", 3, true}, - {1, "blogspot.com.au", 3, true}, - {1, "blogspot.com.br", 3, true}, - {1, "blogspot.com.by", 3, true}, - {1, "blogspot.com.co", 3, true}, - {1, "blogspot.com.cy", 3, true}, - {1, "blogspot.com.ee", 3, true}, - {1, "blogspot.com.eg", 3, true}, - {1, "blogspot.com.es", 3, true}, - {1, "blogspot.com.mt", 3, true}, - {1, "blogspot.com.ng", 3, true}, - {1, "blogspot.com.tr", 3, true}, - {1, "blogspot.com.uy", 3, true}, - {1, "blogspot.cv", 2, true}, - {1, "blogspot.cz", 2, true}, - {1, "blogspot.de", 2, true}, - {1, "blogspot.dk", 2, true}, - {1, "blogspot.fi", 2, true}, - {1, "blogspot.fr", 2, true}, - {1, "blogspot.gr", 2, true}, - {1, "blogspot.hk", 2, true}, - {1, "blogspot.hr", 2, true}, - {1, "blogspot.hu", 2, true}, - {1, "blogspot.ie", 2, true}, - {1, "blogspot.in", 2, true}, - {1, "blogspot.is", 2, true}, - {1, "blogspot.it", 2, true}, - {1, "blogspot.jp", 2, true}, - {1, "blogspot.kr", 2, true}, - {1, "blogspot.li", 2, true}, - {1, "blogspot.lt", 2, true}, - {1, "blogspot.lu", 2, true}, - {1, "blogspot.md", 2, true}, - {1, "blogspot.mk", 2, true}, - {1, "blogspot.mr", 2, true}, - {1, "blogspot.mx", 2, true}, - {1, "blogspot.my", 2, true}, - {1, "blogspot.nl", 2, true}, - {1, "blogspot.no", 2, true}, - {1, "blogspot.pe", 2, true}, - {1, "blogspot.pt", 2, true}, - {1, "blogspot.qa", 2, true}, - {1, "blogspot.re", 2, true}, - {1, "blogspot.ro", 2, true}, - {1, "blogspot.rs", 2, true}, - {1, "blogspot.ru", 2, true}, - {1, "blogspot.se", 2, true}, - {1, "blogspot.sg", 2, true}, - {1, "blogspot.si", 2, true}, - {1, "blogspot.sk", 2, true}, - {1, "blogspot.sn", 2, true}, - {1, "blogspot.td", 2, true}, - {1, "blogspot.tw", 2, true}, - {1, "blogspot.ug", 2, true}, - {1, "blogspot.vn", 2, true}, - {1, "cloudfunctions.net", 2, true}, - {1, "cloud.goog", 2, true}, - {1, "codespot.com", 2, true}, - {1, "googleapis.com", 2, true}, - {1, "googlecode.com", 2, true}, - {1, "pagespeedmobilizer.com", 2, true}, - {1, "publishproxy.com", 2, true}, - {1, "withgoogle.com", 2, true}, - {1, "withyoutube.com", 2, true}, - {1, "awsmppl.com", 2, true}, - {1, "fin.ci", 2, true}, - {1, "free.hr", 2, true}, - {1, "caa.li", 2, true}, - {1, "ua.rs", 2, true}, - {1, "conf.se", 2, true}, - {1, "hs.zone", 2, true}, - {1, "hs.run", 2, true}, - {1, "hashbang.sh", 2, true}, - {1, "hasura.app", 2, true}, - {1, "hasura-app.io", 2, true}, - {1, "hepforge.org", 2, true}, - {1, "herokuapp.com", 2, true}, - {1, "herokussl.com", 2, true}, - {1, "myravendb.com", 2, true}, - {1, "ravendb.community", 2, true}, - {1, "ravendb.me", 2, true}, - {1, "development.run", 2, true}, - {1, "ravendb.run", 2, true}, - {1, "bpl.biz", 2, true}, - {1, "orx.biz", 2, true}, - {1, "ng.city", 2, true}, - {1, "biz.gl", 2, true}, - {1, "ng.ink", 2, true}, - {1, "col.ng", 2, true}, - {1, "firm.ng", 2, true}, - {1, "gen.ng", 2, true}, - {1, "ltd.ng", 2, true}, - {1, "ngo.ng", 2, true}, - {1, "ng.school", 2, true}, - {1, "sch.so", 2, true}, - {1, "xn--hkkinen-5wa.fi", 2, true}, - {2, "moonscale.io", 3, true}, - {1, "moonscale.net", 2, true}, - {1, "iki.fi", 2, true}, - {1, "dyn-berlin.de", 2, true}, - {1, "in-berlin.de", 2, true}, - {1, "in-brb.de", 2, true}, - {1, "in-butter.de", 2, true}, - {1, "in-dsl.de", 2, true}, - {1, "in-dsl.net", 2, true}, - {1, "in-dsl.org", 2, true}, - {1, "in-vpn.de", 2, true}, - {1, "in-vpn.net", 2, true}, - {1, "in-vpn.org", 2, true}, - {1, "biz.at", 2, true}, - {1, "info.at", 2, true}, - {1, "info.cx", 2, true}, - {1, "ac.leg.br", 3, true}, - {1, "al.leg.br", 3, true}, - {1, "am.leg.br", 3, true}, - {1, "ap.leg.br", 3, true}, - {1, "ba.leg.br", 3, true}, - {1, "ce.leg.br", 3, true}, - {1, "df.leg.br", 3, true}, - {1, "es.leg.br", 3, true}, - {1, "go.leg.br", 3, true}, - {1, "ma.leg.br", 3, true}, - {1, "mg.leg.br", 3, true}, - {1, "ms.leg.br", 3, true}, - {1, "mt.leg.br", 3, true}, - {1, "pa.leg.br", 3, true}, - {1, "pb.leg.br", 3, true}, - {1, "pe.leg.br", 3, true}, - {1, "pi.leg.br", 3, true}, - {1, "pr.leg.br", 3, true}, - {1, "rj.leg.br", 3, true}, - {1, "rn.leg.br", 3, true}, - {1, "ro.leg.br", 3, true}, - {1, "rr.leg.br", 3, true}, - {1, "rs.leg.br", 3, true}, - {1, "sc.leg.br", 3, true}, - {1, "se.leg.br", 3, true}, - {1, "sp.leg.br", 3, true}, - {1, "to.leg.br", 3, true}, - {1, "pixolino.com", 2, true}, - {1, "ipifony.net", 2, true}, - {1, "mein-iserv.de", 2, true}, - {1, "test-iserv.de", 2, true}, - {1, "iserv.dev", 2, true}, - {1, "iobb.net", 2, true}, - {1, "myjino.ru", 2, true}, - {2, "hosting.myjino.ru", 4, true}, - {2, "landing.myjino.ru", 4, true}, - {2, "spectrum.myjino.ru", 4, true}, - {2, "vps.myjino.ru", 4, true}, - {2, "triton.zone", 3, true}, - {2, "cns.joyent.com", 4, true}, - {1, "js.org", 2, true}, - {1, "kaas.gg", 2, true}, - {1, "khplay.nl", 2, true}, - {1, "keymachine.de", 2, true}, - {1, "kinghost.net", 2, true}, - {1, "uni5.net", 2, true}, - {1, "knightpoint.systems", 2, true}, - {1, "oya.to", 2, true}, - {1, "co.krd", 2, true}, - {1, "edu.krd", 2, true}, - {1, "git-repos.de", 2, true}, - {1, "lcube-server.de", 2, true}, - {1, "svn-repos.de", 2, true}, - {1, "leadpages.co", 2, true}, - {1, "lpages.co", 2, true}, - {1, "lpusercontent.com", 2, true}, - {1, "lelux.site", 2, true}, - {1, "co.business", 2, true}, - {1, "co.education", 2, true}, - {1, "co.events", 2, true}, - {1, "co.financial", 2, true}, - {1, "co.network", 2, true}, - {1, "co.place", 2, true}, - {1, "co.technology", 2, true}, - {1, "app.lmpm.com", 3, true}, - {1, "linkitools.space", 2, true}, - {1, "linkyard.cloud", 2, true}, - {1, "linkyard-cloud.ch", 2, true}, - {1, "members.linode.com", 3, true}, - {1, "nodebalancer.linode.com", 3, true}, - {1, "we.bs", 2, true}, - {1, "loginline.app", 2, true}, - {1, "loginline.dev", 2, true}, - {1, "loginline.io", 2, true}, - {1, "loginline.services", 2, true}, - {1, "loginline.site", 2, true}, - {1, "krasnik.pl", 2, true}, - {1, "leczna.pl", 2, true}, - {1, "lubartow.pl", 2, true}, - {1, "lublin.pl", 2, true}, - {1, "poniatowa.pl", 2, true}, - {1, "swidnik.pl", 2, true}, - {1, "uklugs.org", 2, true}, - {1, "glug.org.uk", 3, true}, - {1, "lug.org.uk", 3, true}, - {1, "lugs.org.uk", 3, true}, - {1, "barsy.bg", 2, true}, - {1, "barsy.co.uk", 3, true}, - {1, "barsyonline.co.uk", 3, true}, - {1, "barsycenter.com", 2, true}, - {1, "barsyonline.com", 2, true}, - {1, "barsy.club", 2, true}, - {1, "barsy.de", 2, true}, - {1, "barsy.eu", 2, true}, - {1, "barsy.in", 2, true}, - {1, "barsy.info", 2, true}, - {1, "barsy.io", 2, true}, - {1, "barsy.me", 2, true}, - {1, "barsy.menu", 2, true}, - {1, "barsy.mobi", 2, true}, - {1, "barsy.net", 2, true}, - {1, "barsy.online", 2, true}, - {1, "barsy.org", 2, true}, - {1, "barsy.pro", 2, true}, - {1, "barsy.pub", 2, true}, - {1, "barsy.shop", 2, true}, - {1, "barsy.site", 2, true}, - {1, "barsy.support", 2, true}, - {1, "barsy.uk", 2, true}, - {2, "magentosite.cloud", 3, true}, - {1, "mayfirst.info", 2, true}, - {1, "mayfirst.org", 2, true}, - {1, "hb.cldmail.ru", 3, true}, - {1, "miniserver.com", 2, true}, - {1, "memset.net", 2, true}, - {1, "cloud.metacentrum.cz", 3, true}, - {1, "custom.metacentrum.cz", 3, true}, - {1, "flt.cloud.muni.cz", 4, true}, - {1, "usr.cloud.muni.cz", 4, true}, - {1, "meteorapp.com", 2, true}, - {1, "eu.meteorapp.com", 3, true}, - {1, "co.pl", 2, true}, - {1, "azurecontainer.io", 2, true}, - {1, "azurewebsites.net", 2, true}, - {1, "azure-mobile.net", 2, true}, - {1, "cloudapp.net", 2, true}, - {1, "mozilla-iot.org", 2, true}, - {1, "bmoattachments.org", 2, true}, - {1, "net.ru", 2, true}, - {1, "org.ru", 2, true}, - {1, "pp.ru", 2, true}, - {1, "ui.nabu.casa", 3, true}, - {1, "pony.club", 2, true}, - {1, "of.fashion", 2, true}, - {1, "on.fashion", 2, true}, - {1, "of.football", 2, true}, - {1, "in.london", 2, true}, - {1, "of.london", 2, true}, - {1, "for.men", 2, true}, - {1, "and.mom", 2, true}, - {1, "for.mom", 2, true}, - {1, "for.one", 2, true}, - {1, "for.sale", 2, true}, - {1, "of.work", 2, true}, - {1, "to.work", 2, true}, - {1, "nctu.me", 2, true}, - {1, "bitballoon.com", 2, true}, - {1, "netlify.com", 2, true}, - {1, "4u.com", 2, true}, - {1, "ngrok.io", 2, true}, - {1, "nh-serv.co.uk", 3, true}, - {1, "nfshost.com", 2, true}, - {1, "dnsking.ch", 2, true}, - {1, "mypi.co", 2, true}, - {1, "n4t.co", 2, true}, - {1, "001www.com", 2, true}, - {1, "ddnslive.com", 2, true}, - {1, "myiphost.com", 2, true}, - {1, "forumz.info", 2, true}, - {1, "16-b.it", 2, true}, - {1, "32-b.it", 2, true}, - {1, "64-b.it", 2, true}, - {1, "soundcast.me", 2, true}, - {1, "tcp4.me", 2, true}, - {1, "dnsup.net", 2, true}, - {1, "hicam.net", 2, true}, - {1, "now-dns.net", 2, true}, - {1, "ownip.net", 2, true}, - {1, "vpndns.net", 2, true}, - {1, "dynserv.org", 2, true}, - {1, "now-dns.org", 2, true}, - {1, "x443.pw", 2, true}, - {1, "now-dns.top", 2, true}, - {1, "ntdll.top", 2, true}, - {1, "freeddns.us", 2, true}, - {1, "crafting.xyz", 2, true}, - {1, "zapto.xyz", 2, true}, - {1, "nsupdate.info", 2, true}, - {1, "nerdpol.ovh", 2, true}, - {1, "blogsyte.com", 2, true}, - {1, "brasilia.me", 2, true}, - {1, "cable-modem.org", 2, true}, - {1, "ciscofreak.com", 2, true}, - {1, "collegefan.org", 2, true}, - {1, "couchpotatofries.org", 2, true}, - {1, "damnserver.com", 2, true}, - {1, "ddns.me", 2, true}, - {1, "ditchyourip.com", 2, true}, - {1, "dnsfor.me", 2, true}, - {1, "dnsiskinky.com", 2, true}, - {1, "dvrcam.info", 2, true}, - {1, "dynns.com", 2, true}, - {1, "eating-organic.net", 2, true}, - {1, "fantasyleague.cc", 2, true}, - {1, "geekgalaxy.com", 2, true}, - {1, "golffan.us", 2, true}, - {1, "health-carereform.com", 2, true}, - {1, "homesecuritymac.com", 2, true}, - {1, "homesecuritypc.com", 2, true}, - {1, "hopto.me", 2, true}, - {1, "ilovecollege.info", 2, true}, - {1, "loginto.me", 2, true}, - {1, "mlbfan.org", 2, true}, - {1, "mmafan.biz", 2, true}, - {1, "myactivedirectory.com", 2, true}, - {1, "mydissent.net", 2, true}, - {1, "myeffect.net", 2, true}, - {1, "mymediapc.net", 2, true}, - {1, "mypsx.net", 2, true}, - {1, "mysecuritycamera.com", 2, true}, - {1, "mysecuritycamera.net", 2, true}, - {1, "mysecuritycamera.org", 2, true}, - {1, "net-freaks.com", 2, true}, - {1, "nflfan.org", 2, true}, - {1, "nhlfan.net", 2, true}, - {1, "no-ip.ca", 2, true}, - {1, "no-ip.co.uk", 3, true}, - {1, "no-ip.net", 2, true}, - {1, "noip.us", 2, true}, - {1, "onthewifi.com", 2, true}, - {1, "pgafan.net", 2, true}, - {1, "point2this.com", 2, true}, - {1, "pointto.us", 2, true}, - {1, "privatizehealthinsurance.net", 2, true}, - {1, "quicksytes.com", 2, true}, - {1, "read-books.org", 2, true}, - {1, "securitytactics.com", 2, true}, - {1, "serveexchange.com", 2, true}, - {1, "servehumour.com", 2, true}, - {1, "servep2p.com", 2, true}, - {1, "servesarcasm.com", 2, true}, - {1, "stufftoread.com", 2, true}, - {1, "ufcfan.org", 2, true}, - {1, "unusualperson.com", 2, true}, - {1, "workisboring.com", 2, true}, - {1, "3utilities.com", 2, true}, - {1, "bounceme.net", 2, true}, - {1, "ddns.net", 2, true}, - {1, "ddnsking.com", 2, true}, - {1, "gotdns.ch", 2, true}, - {1, "hopto.org", 2, true}, - {1, "myftp.biz", 2, true}, - {1, "myftp.org", 2, true}, - {1, "myvnc.com", 2, true}, - {1, "no-ip.biz", 2, true}, - {1, "no-ip.info", 2, true}, - {1, "no-ip.org", 2, true}, - {1, "noip.me", 2, true}, - {1, "redirectme.net", 2, true}, - {1, "servebeer.com", 2, true}, - {1, "serveblog.net", 2, true}, - {1, "servecounterstrike.com", 2, true}, - {1, "serveftp.com", 2, true}, - {1, "servegame.com", 2, true}, - {1, "servehalflife.com", 2, true}, - {1, "servehttp.com", 2, true}, - {1, "serveirc.com", 2, true}, - {1, "serveminecraft.net", 2, true}, - {1, "servemp3.com", 2, true}, - {1, "servepics.com", 2, true}, - {1, "servequake.com", 2, true}, - {1, "sytes.net", 2, true}, - {1, "webhop.me", 2, true}, - {1, "zapto.org", 2, true}, - {1, "stage.nodeart.io", 3, true}, - {1, "nodum.co", 2, true}, - {1, "nodum.io", 2, true}, - {1, "pcloud.host", 2, true}, - {1, "nyc.mn", 2, true}, - {1, "nom.ae", 2, true}, - {1, "nom.af", 2, true}, - {1, "nom.ai", 2, true}, - {1, "nom.al", 2, true}, - {1, "nym.by", 2, true}, - {1, "nym.bz", 2, true}, - {1, "nom.cl", 2, true}, - {1, "nym.ec", 2, true}, - {1, "nom.gd", 2, true}, - {1, "nom.ge", 2, true}, - {1, "nom.gl", 2, true}, - {1, "nym.gr", 2, true}, - {1, "nom.gt", 2, true}, - {1, "nym.gy", 2, true}, - {1, "nym.hk", 2, true}, - {1, "nom.hn", 2, true}, - {1, "nym.ie", 2, true}, - {1, "nom.im", 2, true}, - {1, "nom.ke", 2, true}, - {1, "nym.kz", 2, true}, - {1, "nym.la", 2, true}, - {1, "nym.lc", 2, true}, - {1, "nom.li", 2, true}, - {1, "nym.li", 2, true}, - {1, "nym.lt", 2, true}, - {1, "nym.lu", 2, true}, - {1, "nym.me", 2, true}, - {1, "nom.mk", 2, true}, - {1, "nym.mn", 2, true}, - {1, "nym.mx", 2, true}, - {1, "nom.nu", 2, true}, - {1, "nym.nz", 2, true}, - {1, "nym.pe", 2, true}, - {1, "nym.pt", 2, true}, - {1, "nom.pw", 2, true}, - {1, "nom.qa", 2, true}, - {1, "nym.ro", 2, true}, - {1, "nom.rs", 2, true}, - {1, "nom.si", 2, true}, - {1, "nym.sk", 2, true}, - {1, "nom.st", 2, true}, - {1, "nym.su", 2, true}, - {1, "nym.sx", 2, true}, - {1, "nom.tj", 2, true}, - {1, "nym.tw", 2, true}, - {1, "nom.ug", 2, true}, - {1, "nom.uy", 2, true}, - {1, "nom.vc", 2, true}, - {1, "nom.vg", 2, true}, - {1, "static.observableusercontent.com", 3, true}, - {1, "cya.gg", 2, true}, - {1, "cloudycluster.net", 2, true}, - {1, "nid.io", 2, true}, - {1, "opencraft.hosting", 2, true}, - {1, "operaunite.com", 2, true}, - {1, "skygearapp.com", 2, true}, - {1, "outsystemscloud.com", 2, true}, - {1, "ownprovider.com", 2, true}, - {1, "own.pm", 2, true}, - {1, "ox.rs", 2, true}, - {1, "oy.lc", 2, true}, - {1, "pgfog.com", 2, true}, - {1, "pagefrontapp.com", 2, true}, - {1, "art.pl", 2, true}, - {1, "gliwice.pl", 2, true}, - {1, "krakow.pl", 2, true}, - {1, "poznan.pl", 2, true}, - {1, "wroc.pl", 2, true}, - {1, "zakopane.pl", 2, true}, - {1, "pantheonsite.io", 2, true}, - {1, "gotpantheon.com", 2, true}, - {1, "mypep.link", 2, true}, - {1, "perspecta.cloud", 2, true}, - {1, "on-web.fr", 2, true}, - {2, "platform.sh", 3, true}, - {2, "platformsh.site", 3, true}, - {1, "dyn53.io", 2, true}, - {1, "co.bn", 2, true}, - {1, "xen.prgmr.com", 3, true}, - {1, "priv.at", 2, true}, - {1, "prvcy.page", 2, true}, - {2, "dweb.link", 3, true}, - {1, "protonet.io", 2, true}, - {1, "chirurgiens-dentistes-en-france.fr", 2, true}, - {1, "byen.site", 2, true}, - {1, "pubtls.org", 2, true}, - {1, "qualifioapp.com", 2, true}, - {1, "qbuser.com", 2, true}, - {1, "instantcloud.cn", 2, true}, - {1, "ras.ru", 2, true}, - {1, "qa2.com", 2, true}, - {1, "qcx.io", 2, true}, - {2, "sys.qcx.io", 4, true}, - {1, "dev-myqnapcloud.com", 2, true}, - {1, "alpha-myqnapcloud.com", 2, true}, - {1, "myqnapcloud.com", 2, true}, - {2, "quipelements.com", 3, true}, - {1, "vapor.cloud", 2, true}, - {1, "vaporcloud.io", 2, true}, - {1, "rackmaze.com", 2, true}, - {1, "rackmaze.net", 2, true}, - {2, "on-k3s.io", 3, true}, - {2, "on-rancher.cloud", 3, true}, - {2, "on-rio.io", 3, true}, - {1, "readthedocs.io", 2, true}, - {1, "rhcloud.com", 2, true}, - {1, "app.render.com", 3, true}, - {1, "onrender.com", 2, true}, - {1, "repl.co", 2, true}, - {1, "repl.run", 2, true}, - {1, "resindevice.io", 2, true}, - {1, "devices.resinstaging.io", 3, true}, - {1, "hzc.io", 2, true}, - {1, "wellbeingzone.eu", 2, true}, - {1, "ptplus.fit", 2, true}, - {1, "wellbeingzone.co.uk", 3, true}, - {1, "git-pages.rit.edu", 3, true}, - {1, "sandcats.io", 2, true}, - {1, "logoip.de", 2, true}, - {1, "logoip.com", 2, true}, - {1, "schokokeks.net", 2, true}, - {1, "gov.scot", 2, true}, - {1, "scrysec.com", 2, true}, - {1, "firewall-gateway.com", 2, true}, - {1, "firewall-gateway.de", 2, true}, - {1, "my-gateway.de", 2, true}, - {1, "my-router.de", 2, true}, - {1, "spdns.de", 2, true}, - {1, "spdns.eu", 2, true}, - {1, "firewall-gateway.net", 2, true}, - {1, "my-firewall.org", 2, true}, - {1, "myfirewall.org", 2, true}, - {1, "spdns.org", 2, true}, - {1, "senseering.net", 2, true}, - {1, "biz.ua", 2, true}, - {1, "co.ua", 2, true}, - {1, "pp.ua", 2, true}, - {1, "shiftedit.io", 2, true}, - {1, "myshopblocks.com", 2, true}, - {1, "shopitsite.com", 2, true}, - {1, "mo-siemens.io", 2, true}, - {1, "1kapp.com", 2, true}, - {1, "appchizi.com", 2, true}, - {1, "applinzi.com", 2, true}, - {1, "sinaapp.com", 2, true}, - {1, "vipsinaapp.com", 2, true}, - {1, "siteleaf.net", 2, true}, - {1, "bounty-full.com", 2, true}, - {1, "alpha.bounty-full.com", 3, true}, - {1, "beta.bounty-full.com", 3, true}, - {1, "stackhero-network.com", 2, true}, - {1, "static.land", 2, true}, - {1, "dev.static.land", 3, true}, - {1, "sites.static.land", 3, true}, - {1, "apps.lair.io", 3, true}, - {2, "stolos.io", 3, true}, - {1, "spacekit.io", 2, true}, - {1, "customer.speedpartner.de", 3, true}, - {1, "api.stdlib.com", 3, true}, - {1, "storj.farm", 2, true}, - {1, "utwente.io", 2, true}, - {1, "soc.srcf.net", 3, true}, - {1, "user.srcf.net", 3, true}, - {1, "temp-dns.com", 2, true}, - {1, "applicationcloud.io", 2, true}, - {1, "scapp.io", 2, true}, - {2, "s5y.io", 3, true}, - {2, "sensiosite.cloud", 3, true}, - {1, "syncloud.it", 2, true}, - {1, "diskstation.me", 2, true}, - {1, "dscloud.biz", 2, true}, - {1, "dscloud.me", 2, true}, - {1, "dscloud.mobi", 2, true}, - {1, "dsmynas.com", 2, true}, - {1, "dsmynas.net", 2, true}, - {1, "dsmynas.org", 2, true}, - {1, "familyds.com", 2, true}, - {1, "familyds.net", 2, true}, - {1, "familyds.org", 2, true}, - {1, "i234.me", 2, true}, - {1, "myds.me", 2, true}, - {1, "synology.me", 2, true}, - {1, "vpnplus.to", 2, true}, - {1, "direct.quickconnect.to", 3, true}, - {1, "taifun-dns.de", 2, true}, - {1, "gda.pl", 2, true}, - {1, "gdansk.pl", 2, true}, - {1, "gdynia.pl", 2, true}, - {1, "med.pl", 2, true}, - {1, "sopot.pl", 2, true}, - {1, "edugit.org", 2, true}, - {1, "telebit.app", 2, true}, - {1, "telebit.io", 2, true}, - {2, "telebit.xyz", 3, true}, - {1, "gwiddle.co.uk", 3, true}, - {1, "thingdustdata.com", 2, true}, - {1, "cust.dev.thingdust.io", 4, true}, - {1, "cust.disrec.thingdust.io", 4, true}, - {1, "cust.prod.thingdust.io", 4, true}, - {1, "cust.testing.thingdust.io", 4, true}, - {1, "arvo.network", 2, true}, - {1, "azimuth.network", 2, true}, - {1, "bloxcms.com", 2, true}, - {1, "townnews-staging.com", 2, true}, - {1, "12hp.at", 2, true}, - {1, "2ix.at", 2, true}, - {1, "4lima.at", 2, true}, - {1, "lima-city.at", 2, true}, - {1, "12hp.ch", 2, true}, - {1, "2ix.ch", 2, true}, - {1, "4lima.ch", 2, true}, - {1, "lima-city.ch", 2, true}, - {1, "trafficplex.cloud", 2, true}, - {1, "de.cool", 2, true}, - {1, "12hp.de", 2, true}, - {1, "2ix.de", 2, true}, - {1, "4lima.de", 2, true}, - {1, "lima-city.de", 2, true}, - {1, "1337.pictures", 2, true}, - {1, "clan.rip", 2, true}, - {1, "lima-city.rocks", 2, true}, - {1, "webspace.rocks", 2, true}, - {1, "lima.zone", 2, true}, - {2, "transurl.be", 3, true}, - {2, "transurl.eu", 3, true}, - {2, "transurl.nl", 3, true}, - {1, "tuxfamily.org", 2, true}, - {1, "dd-dns.de", 2, true}, - {1, "diskstation.eu", 2, true}, - {1, "diskstation.org", 2, true}, - {1, "dray-dns.de", 2, true}, - {1, "draydns.de", 2, true}, - {1, "dyn-vpn.de", 2, true}, - {1, "dynvpn.de", 2, true}, - {1, "mein-vigor.de", 2, true}, - {1, "my-vigor.de", 2, true}, - {1, "my-wan.de", 2, true}, - {1, "syno-ds.de", 2, true}, - {1, "synology-diskstation.de", 2, true}, - {1, "synology-ds.de", 2, true}, - {1, "uber.space", 2, true}, - {2, "uberspace.de", 3, true}, - {1, "hk.com", 2, true}, - {1, "hk.org", 2, true}, - {1, "ltd.hk", 2, true}, - {1, "inc.hk", 2, true}, - {1, "virtualuser.de", 2, true}, - {1, "virtual-user.de", 2, true}, - {1, "lib.de.us", 3, true}, - {1, "2038.io", 2, true}, - {1, "router.management", 2, true}, - {1, "v-info.info", 2, true}, - {1, "voorloper.cloud", 2, true}, - {1, "v.ua", 2, true}, - {1, "wafflecell.com", 2, true}, - {2, "webhare.dev", 3, true}, - {1, "wedeploy.io", 2, true}, - {1, "wedeploy.me", 2, true}, - {1, "wedeploy.sh", 2, true}, - {1, "remotewd.com", 2, true}, - {1, "wmflabs.org", 2, true}, - {1, "myforum.community", 2, true}, - {1, "community-pro.de", 2, true}, - {1, "diskussionsbereich.de", 2, true}, - {1, "community-pro.net", 2, true}, - {1, "meinforum.net", 2, true}, - {1, "half.host", 2, true}, - {1, "xnbay.com", 2, true}, - {1, "u2.xnbay.com", 3, true}, - {1, "u2-local.xnbay.com", 3, true}, - {1, "cistron.nl", 2, true}, - {1, "demon.nl", 2, true}, - {1, "xs4all.space", 2, true}, - {1, "yandexcloud.net", 2, true}, - {1, "storage.yandexcloud.net", 3, true}, - {1, "website.yandexcloud.net", 3, true}, - {1, "official.academy", 2, true}, - {1, "yolasite.com", 2, true}, - {1, "ybo.faith", 2, true}, - {1, "yombo.me", 2, true}, - {1, "homelink.one", 2, true}, - {1, "ybo.party", 2, true}, - {1, "ybo.review", 2, true}, - {1, "ybo.science", 2, true}, - {1, "ybo.trade", 2, true}, - {1, "nohost.me", 2, true}, - {1, "noho.st", 2, true}, - {1, "za.net", 2, true}, - {1, "za.org", 2, true}, - {1, "now.sh", 2, true}, - {1, "bss.design", 2, true}, - {1, "basicserver.io", 2, true}, - {1, "virtualserver.io", 2, true}, - {1, "site.builder.nu", 3, true}, - {1, "enterprisecloud.nu", 2, true}, -} - -func init() { - for i := range r { - DefaultList.AddRule(&r[i]) - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 33013c20..4620037b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -85,9 +85,6 @@ github.com/datarhei/joy4/utils/bits/pio # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/eggsampler/acme/v3 v3.1.1 -## explicit; go 1.11 -github.com/eggsampler/acme/v3 # github.com/go-ole/go-ole v1.2.6 ## explicit; go 1.12 github.com/go-ole/go-ole @@ -174,14 +171,6 @@ github.com/labstack/gommon/random # github.com/leodido/go-urn v1.2.1 ## explicit; go 1.13 github.com/leodido/go-urn -# github.com/letsdebug/letsdebug v1.6.1 -## explicit; go 1.15 -github.com/letsdebug/letsdebug -# github.com/lib/pq v1.8.0 -## explicit; go 1.13 -github.com/lib/pq -github.com/lib/pq/oid -github.com/lib/pq/scram # github.com/libdns/libdns v0.2.1 ## explicit; go 1.14 github.com/libdns/libdns @@ -212,9 +201,6 @@ github.com/mholt/acmez/acme # github.com/miekg/dns v1.1.50 ## explicit; go 1.14 github.com/miekg/dns -# github.com/miekg/unbound v0.0.0-20180419064740-e2b53b2dbcba -## explicit -github.com/miekg/unbound # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -293,10 +279,6 @@ github.com/vektah/gqlparser/v2/lexer github.com/vektah/gqlparser/v2/parser github.com/vektah/gqlparser/v2/validator github.com/vektah/gqlparser/v2/validator/rules -# github.com/weppos/publicsuffix-go v0.13.0 -## explicit -github.com/weppos/publicsuffix-go/net/publicsuffix -github.com/weppos/publicsuffix-go/publicsuffix # github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb ## explicit github.com/xeipuuv/gojsonpointer From c44fb30a84e69c2e2deb4ac7aee7336392370bb6 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 2 Jan 2023 06:57:02 +0100 Subject: [PATCH 08/39] Fix check for at least one process input and output --- http/handler/api/restream.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http/handler/api/restream.go b/http/handler/api/restream.go index 2e6f0c2a..e585d0df 100644 --- a/http/handler/api/restream.go +++ b/http/handler/api/restream.go @@ -51,7 +51,7 @@ func (h *RestreamHandler) Add(c echo.Context) error { return api.Err(http.StatusBadRequest, "Unsupported process type", "Supported process types are: ffmpeg") } - if len(process.Input) == 0 && len(process.Output) == 0 { + if len(process.Input) == 0 || len(process.Output) == 0 { return api.Err(http.StatusBadRequest, "At least one input and one output need to be defined") } From 59aa6af767084ec2abfdfce8420c95b3778b4e72 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 2 Jan 2023 07:20:39 +0100 Subject: [PATCH 09/39] Allow partial process config updates --- http/handler/api/restream.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/http/handler/api/restream.go b/http/handler/api/restream.go index e585d0df..c61f363a 100644 --- a/http/handler/api/restream.go +++ b/http/handler/api/restream.go @@ -189,6 +189,14 @@ func (h *RestreamHandler) Update(c echo.Context) error { Autostart: true, } + current, err := h.restream.GetProcess(id) + if err != nil { + return api.Err(http.StatusNotFound, "Process not found", "%s", id) + } + + // Prefill the config with the current values + process.Unmarshal(current.Config) + if err := util.ShouldBindJSON(c, &process); err != nil { return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) } From 391681447ec8c7ca3c4a841a5525afac1ddb601b Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 2 Jan 2023 10:54:29 +0100 Subject: [PATCH 10/39] Fix MustDir config type to create directory --- config/value/os.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config/value/os.go b/config/value/os.go index dddfdc5e..6dba77b3 100644 --- a/config/value/os.go +++ b/config/value/os.go @@ -34,6 +34,10 @@ func (u *MustDir) Validate() error { return fmt.Errorf("path name must not be empty") } + if err := os.MkdirAll(val, 0750); err != nil { + return fmt.Errorf("%s can't be created (%w)", val, err) + } + finfo, err := os.Stat(val) if err != nil { return fmt.Errorf("%s does not exist", val) From 992b04d180ddba62e64a3a92c906ef305bb57139 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 2 Jan 2023 11:39:58 +0100 Subject: [PATCH 11/39] Allow alternative syntax for auth0 tenants as environment variable --- config/value/auth0.go | 57 ++++++++++++++++++++++++++++++++------ config/value/auth0_test.go | 43 ++++++++++++++++++++++++++++ config/value/value_test.go | 32 ++++++++++----------- 3 files changed, 107 insertions(+), 25 deletions(-) create mode 100644 config/value/auth0_test.go diff --git a/config/value/auth0.go b/config/value/auth0.go index 8d19b4f1..a912134d 100644 --- a/config/value/auth0.go +++ b/config/value/auth0.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "net/url" "strings" ) @@ -16,6 +17,28 @@ type Auth0Tenant struct { Users []string `json:"users"` } +func (a *Auth0Tenant) String() string { + u := url.URL{ + Scheme: "auth0", + Host: a.Domain, + } + + if len(a.ClientID) != 0 { + u.User = url.User(a.ClientID) + } + + q := url.Values{} + q.Set("aud", a.Audience) + + for _, user := range a.Users { + q.Add("user", user) + } + + u.RawQuery = q.Encode() + + return u.String() +} + type TenantList struct { p *[]Auth0Tenant separator string @@ -32,18 +55,34 @@ func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *Tenan return v } +// Set allows to set a tenant list in two formats: +// - a separator separated list of bas64 encoded Auth0Tenant JSON objects +// - a separator separated list of Auth0Tenant in URL representation: auth0://[clientid]@[domain]?aud=[audience]&user=...&user=... func (s *TenantList) Set(val string) error { list := []Auth0Tenant{} for i, elm := range strings.Split(val, s.separator) { - data, err := base64.StdEncoding.DecodeString(elm) - if err != nil { - return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) - } - t := Auth0Tenant{} - if err := json.Unmarshal(data, &t); err != nil { - return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) + + if strings.HasPrefix(elm, "auth0://") { + data, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid url encoding of tenant %d: %w", i, err) + } + + t.Domain = data.Host + t.ClientID = data.User.Username() + t.Audience = data.Query().Get("aud") + t.Users = data.Query()["user"] + } else { + data, err := base64.StdEncoding.DecodeString(elm) + if err != nil { + return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) + } + + if err := json.Unmarshal(data, &t); err != nil { + return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) + } } list = append(list, t) @@ -62,10 +101,10 @@ func (s *TenantList) String() string { list := []string{} for _, t := range *s.p { - list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users))) + list = append(list, t.String()) } - return strings.Join(list, ",") + return strings.Join(list, s.separator) } func (s *TenantList) Validate() error { diff --git a/config/value/auth0_test.go b/config/value/auth0_test.go new file mode 100644 index 00000000..edc4eff8 --- /dev/null +++ b/config/value/auth0_test.go @@ -0,0 +1,43 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAuth0Value(t *testing.T) { + tenants := []Auth0Tenant{} + + v := NewTenantList(&tenants, nil, " ") + require.Equal(t, "(empty)", v.String()) + + v.Set("auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3") + require.Equal(t, []Auth0Tenant{ + { + Domain: "domain", + ClientID: "clientid", + Audience: "audience", + Users: []string{"user1", "user2"}, + }, + { + Domain: "domain2", + Audience: "audience2", + Users: []string{"user3"}, + }, + }, tenants) + require.Equal(t, "auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3", v.String()) + require.NoError(t, v.Validate()) + + v.Set("eyJkb21haW4iOiJkYXRhcmhlaS5ldS5hdXRoMC5jb20iLCJhdWRpZW5jZSI6Imh0dHBzOi8vZGF0YXJoZWkuY29tL2NvcmUiLCJ1c2VycyI6WyJhdXRoMHx4eHgiXX0=") + require.Equal(t, []Auth0Tenant{ + { + Domain: "datarhei.eu.auth0.com", + ClientID: "", + Audience: "https://datarhei.com/core", + Users: []string{"auth0|xxx"}, + }, + }, tenants) + require.Equal(t, "auth0://datarhei.eu.auth0.com?aud=https%3A%2F%2Fdatarhei.com%2Fcore&user=auth0%7Cxxx", v.String()) + require.NoError(t, v.Validate()) +} diff --git a/config/value/value_test.go b/config/value/value_test.go index 49c024e9..aeb707be 100644 --- a/config/value/value_test.go +++ b/config/value/value_test.go @@ -3,7 +3,7 @@ package value import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIntValue(t *testing.T) { @@ -11,19 +11,19 @@ func TestIntValue(t *testing.T) { ivar := NewInt(&i, 11) - assert.Equal(t, "11", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) + require.Equal(t, "11", ivar.String()) + require.Equal(t, nil, ivar.Validate()) + require.Equal(t, false, ivar.IsEmpty()) i = 42 - assert.Equal(t, "42", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) + require.Equal(t, "42", ivar.String()) + require.Equal(t, nil, ivar.Validate()) + require.Equal(t, false, ivar.IsEmpty()) ivar.Set("77") - assert.Equal(t, int(77), i) + require.Equal(t, int(77), i) } type testdata struct { @@ -37,22 +37,22 @@ func TestCopyStruct(t *testing.T) { NewInt(&data1.value1, 1) NewInt(&data1.value2, 2) - assert.Equal(t, int(1), data1.value1) - assert.Equal(t, int(2), data1.value2) + require.Equal(t, int(1), data1.value1) + require.Equal(t, int(2), data1.value2) data2 := testdata{} val21 := NewInt(&data2.value1, 3) val22 := NewInt(&data2.value2, 4) - assert.Equal(t, int(3), data2.value1) - assert.Equal(t, int(4), data2.value2) + require.Equal(t, int(3), data2.value1) + require.Equal(t, int(4), data2.value2) data2 = data1 - assert.Equal(t, int(1), data2.value1) - assert.Equal(t, int(2), data2.value2) + require.Equal(t, int(1), data2.value1) + require.Equal(t, int(2), data2.value2) - assert.Equal(t, "1", val21.String()) - assert.Equal(t, "2", val22.String()) + require.Equal(t, "1", val21.String()) + require.Equal(t, "2", val22.String()) } From 378a3cd9cf63f604d25e1aef754e42bea1e7f0b5 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 2 Jan 2023 11:58:54 +0100 Subject: [PATCH 12/39] Allow to set a soft memory limit for the binary itself The setting debug.memory_limit_mbytes should not be used in conjuction with debug.force_gc because the memory limit influences the garbage collector. --- app/api/api.go | 7 +++++++ config/config.go | 1 + config/data.go | 14 ++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/app/api/api.go b/app/api/api.go index a1062800..7a1f39fc 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -6,6 +6,7 @@ import ( "fmt" "io" golog "log" + "math" gonet "net" gohttp "net/http" "net/url" @@ -1119,6 +1120,12 @@ func (a *api) start() error { }(ctx) } + if cfg.Debug.MemoryLimit > 0 { + debug.SetMemoryLimit(cfg.Debug.MemoryLimit * 1024 * 1024) + } else { + debug.SetMemoryLimit(math.MaxInt64) + } + // Start the restream processes restream.Start() diff --git a/config/config.go b/config/config.go index 1d1d6732..b8a5028e 100644 --- a/config/config.go +++ b/config/config.go @@ -232,6 +232,7 @@ func (d *Config) init() { // Debug d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false) + d.vars.Register(value.NewInt64(&d.Debug.MemoryLimit, 0), "debug.memory_limit_mbytes", "CORE_DEBUG_MEMORY_LIMIT_MBYTES", nil, "Impose a soft memory limit for the core, in megabytes", false, false) // Metrics d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) diff --git a/config/data.go b/config/data.go index 012c964b..bb836b3f 100644 --- a/config/data.go +++ b/config/data.go @@ -135,8 +135,9 @@ type Data struct { MaxPort int `json:"max_port"` } `json:"playout"` Debug struct { - Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + Profiling bool `json:"profiling"` + ForceGC int `json:"force_gc"` + MemoryLimit int64 `json:"memory_limit_mbytes"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` @@ -189,7 +190,6 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.SRT = d.SRT data.FFmpeg = d.FFmpeg data.Playout = d.Playout - data.Debug = d.Debug data.Metrics = d.Metrics data.Sessions = d.Sessions data.Service = d.Service @@ -228,6 +228,10 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.Storage.Memory = d.Storage.Memory // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.Debug.MemoryLimit = 0 + data.TLS.Enable = d.TLS.Enable data.TLS.Address = d.TLS.Address data.TLS.Auto = d.TLS.Auto @@ -267,7 +271,6 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) { data.SRT = d.SRT data.FFmpeg = d.FFmpeg data.Playout = d.Playout - data.Debug = d.Debug data.Metrics = d.Metrics data.Sessions = d.Sessions data.Service = d.Service @@ -299,6 +302,9 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) { data.Router.Routes = copy.StringMap(d.Router.Routes) // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.TLS.Enable = d.TLS.Enable data.TLS.Address = d.TLS.Address data.TLS.Auto = d.TLS.Auto From ff6b0d958413d9a916004983619577b8e31629ca Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 3 Jan 2023 07:05:00 +0100 Subject: [PATCH 13/39] Require go1.19 for tests --- .github/workflows/go-tests.yml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 95aad096..d0b86012 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -3,20 +3,20 @@ name: tests on: [push, pull_request] jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 2 - - uses: actions/setup-go@v2 - with: - go-version: '1.18' - - name: Run coverage - run: go test -coverprofile=coverage.out -covermode=atomic -v ./... - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: coverage.out - flags: unit-linux + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 2 + - uses: actions/setup-go@v2 + with: + go-version: "1.19" + - name: Run coverage + run: go test -coverprofile=coverage.out -covermode=atomic -v ./... + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: coverage.out + flags: unit-linux From 17c9f6ef1393923b6784fe19d484010f9afbeaf8 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 3 Jan 2023 07:55:55 +0100 Subject: [PATCH 14/39] Test different standard location for config file If no path is given in the environment variable CORE_CONFIGFILE, different standard locations will be probed: - os.UserConfigDir() + /datarhei-core/config.js - os.UserHomeDir() + /.config/datarhei-core/config.js - ./config/config.js If the config.js doesn't exist in any of these locations, it will be assumed at ./config/config.js --- app/api/api.go | 2 ++ config/store/json.go | 4 ++++ main.go | 53 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/app/api/api.go b/app/api/api.go index 7a1f39fc..170c44d6 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -229,6 +229,8 @@ func (a *api) Reload() error { logger.Info().WithFields(logfields).Log("") + logger.Info().WithField("path", a.config.path).Log("Read config file") + configlogger := logger.WithComponent("Config") cfg.Messages(func(level string, v configvars.Variable, message string) { configlogger = configlogger.WithFields(log.Fields{ diff --git a/config/store/json.go b/config/store/json.go index b4cd2db9..a63ba627 100644 --- a/config/store/json.go +++ b/config/store/json.go @@ -118,6 +118,10 @@ func (c *jsonStore) load(cfg *config.Config) error { return err } + if len(jsondata) == 0 { + return nil + } + data, err := migrate(jsondata) if err != nil { return err diff --git a/main.go b/main.go index 4606f67d..6d16a134 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "os" "os/signal" + "path" "github.com/datarhei/core/v16/app/api" "github.com/datarhei/core/v16/log" @@ -13,7 +14,9 @@ import ( func main() { logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true)) - app, err := api.New(os.Getenv("CORE_CONFIGFILE"), os.Stderr) + configfile := findConfigfile() + + app, err := api.New(configfile, os.Stderr) if err != nil { logger.Error().WithError(err).Log("Failed to create new API") os.Exit(1) @@ -54,3 +57,51 @@ func main() { // Stop the app app.Destroy() } + +// findConfigfie returns the path to the config file. If no path is given +// in the environment variable CORE_CONFIGFILE, different standard location +// will be probed: +// - os.UserConfigDir() + /datarhei-core/config.js +// - os.UserHomeDir() + /.config/datarhei-core/config.js +// - ./config/config.js +// If the config doesn't exist in none of these locations, it will be assumed +// at ./config/config.js +func findConfigfile() string { + configfile := os.Getenv("CORE_CONFIGFILE") + if len(configfile) != 0 { + return configfile + } + + locations := []string{} + + if dir, err := os.UserConfigDir(); err == nil { + locations = append(locations, dir+"/datarhei-core/config.js") + } + + if dir, err := os.UserHomeDir(); err == nil { + locations = append(locations, dir+"/.config/datarhei-core/config.js") + } + + locations = append(locations, "./config/config.js") + + for _, path := range locations { + info, err := os.Stat(path) + if err != nil { + continue + } + + if info.IsDir() { + continue + } + + configfile = path + } + + if len(configfile) == 0 { + configfile = "./config/config.js" + } + + os.MkdirAll(path.Dir(configfile), 0740) + + return configfile +} From 1bbb7a9c1fe4c073cb24a07099848a649781a955 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 3 Jan 2023 11:45:10 +0100 Subject: [PATCH 15/39] Use config locations for import and ffmigrage --- app/ffmigrate/main.go | 4 ++- app/import/main.go | 4 ++- config/store/location.go | 53 ++++++++++++++++++++++++++++++++++++++++ main.go | 52 ++------------------------------------- 4 files changed, 61 insertions(+), 52 deletions(-) create mode 100644 config/store/location.go diff --git a/app/ffmigrate/main.go b/app/ffmigrate/main.go index 036af80f..5553ec0e 100644 --- a/app/ffmigrate/main.go +++ b/app/ffmigrate/main.go @@ -22,7 +22,9 @@ func main() { "to": "ffmpeg5", }) - configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil) + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + configstore, err := cfgstore.NewJSON(configfile, nil) if err != nil { logger.Error().WithError(err).Log("Loading configuration failed") os.Exit(1) diff --git a/app/import/main.go b/app/import/main.go index ebfe1aa0..cc2a9fb3 100644 --- a/app/import/main.go +++ b/app/import/main.go @@ -15,7 +15,9 @@ import ( func main() { logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1") - configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil) + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + configstore, err := cfgstore.NewJSON(configfile, nil) if err != nil { logger.Error().WithError(err).Log("Loading configuration failed") os.Exit(1) diff --git a/config/store/location.go b/config/store/location.go new file mode 100644 index 00000000..e073a0c8 --- /dev/null +++ b/config/store/location.go @@ -0,0 +1,53 @@ +package store + +import ( + "os" + "path" +) + +// Location returns the path to the config file. If no path is provided, +// different standard location will be probed: +// - os.UserConfigDir() + /datarhei-core/config.js +// - os.UserHomeDir() + /.config/datarhei-core/config.js +// - ./config/config.js +// If the config doesn't exist in none of these locations, it will be assumed +// at ./config/config.js +func Location(filepath string) string { + configfile := filepath + if len(configfile) != 0 { + return configfile + } + + locations := []string{} + + if dir, err := os.UserConfigDir(); err == nil { + locations = append(locations, dir+"/datarhei-core/config.js") + } + + if dir, err := os.UserHomeDir(); err == nil { + locations = append(locations, dir+"/.config/datarhei-core/config.js") + } + + locations = append(locations, "./config/config.js") + + for _, path := range locations { + info, err := os.Stat(path) + if err != nil { + continue + } + + if info.IsDir() { + continue + } + + configfile = path + } + + if len(configfile) == 0 { + configfile = "./config/config.js" + } + + os.MkdirAll(path.Dir(configfile), 0740) + + return configfile +} diff --git a/main.go b/main.go index 6d16a134..377af7e5 100644 --- a/main.go +++ b/main.go @@ -3,9 +3,9 @@ package main import ( "os" "os/signal" - "path" "github.com/datarhei/core/v16/app/api" + "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/log" _ "github.com/joho/godotenv/autoload" @@ -14,7 +14,7 @@ import ( func main() { logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true)) - configfile := findConfigfile() + configfile := store.Location(os.Getenv("CORE_CONFIGFILE")) app, err := api.New(configfile, os.Stderr) if err != nil { @@ -57,51 +57,3 @@ func main() { // Stop the app app.Destroy() } - -// findConfigfie returns the path to the config file. If no path is given -// in the environment variable CORE_CONFIGFILE, different standard location -// will be probed: -// - os.UserConfigDir() + /datarhei-core/config.js -// - os.UserHomeDir() + /.config/datarhei-core/config.js -// - ./config/config.js -// If the config doesn't exist in none of these locations, it will be assumed -// at ./config/config.js -func findConfigfile() string { - configfile := os.Getenv("CORE_CONFIGFILE") - if len(configfile) != 0 { - return configfile - } - - locations := []string{} - - if dir, err := os.UserConfigDir(); err == nil { - locations = append(locations, dir+"/datarhei-core/config.js") - } - - if dir, err := os.UserHomeDir(); err == nil { - locations = append(locations, dir+"/.config/datarhei-core/config.js") - } - - locations = append(locations, "./config/config.js") - - for _, path := range locations { - info, err := os.Stat(path) - if err != nil { - continue - } - - if info.IsDir() { - continue - } - - configfile = path - } - - if len(configfile) == 0 { - configfile = "./config/config.js" - } - - os.MkdirAll(path.Dir(configfile), 0740) - - return configfile -} From 481cd79e6d17f9705c5308ea1132e9ea027bb17f Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 10 Jan 2023 19:03:26 +0100 Subject: [PATCH 16/39] Update swagger API documentation --- docs/docs.go | 60 +++++++++++++++++++++++++++-------------------- docs/swagger.json | 60 +++++++++++++++++++++++++++-------------------- docs/swagger.yaml | 39 ++++++++++++++++-------------- 3 files changed, 92 insertions(+), 67 deletions(-) diff --git a/docs/docs.go b/docs/docs.go index 0d0740fd..cee532d5 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -229,7 +229,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -2470,29 +2470,6 @@ const docTemplate = `{ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2602,6 +2579,9 @@ const docTemplate = `{ "force_gc": { "type": "integer" }, + "memory_limit_mbytes": { + "type": "integer" + }, "profiling": { "type": "boolean" } @@ -3623,7 +3603,11 @@ const docTemplate = `{ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", @@ -4230,6 +4214,9 @@ const docTemplate = `{ "force_gc": { "type": "integer" }, + "memory_limit_mbytes": { + "type": "integer" + }, "profiling": { "type": "boolean" } @@ -4873,6 +4860,29 @@ const docTemplate = `{ } } }, + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, "value.Auth0Tenant": { "type": "object", "properties": { diff --git a/docs/swagger.json b/docs/swagger.json index ae470126..64d93b2c 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -221,7 +221,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -2462,29 +2462,6 @@ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2594,6 +2571,9 @@ "force_gc": { "type": "integer" }, + "memory_limit_mbytes": { + "type": "integer" + }, "profiling": { "type": "boolean" } @@ -3615,7 +3595,11 @@ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", @@ -4222,6 +4206,9 @@ "force_gc": { "type": "integer" }, + "memory_limit_mbytes": { + "type": "integer" + }, "profiling": { "type": "boolean" } @@ -4865,6 +4852,29 @@ } } }, + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, "value.Auth0Tenant": { "type": "object", "properties": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index a11c50c6..6b67f3a4 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -68,21 +68,6 @@ definitions: required: - command type: object - api.Config: - properties: - config: - $ref: '#/definitions/api.ConfigData' - created_at: - type: string - loaded_at: - type: string - overrides: - items: - type: string - type: array - updated_at: - type: string - type: object api.ConfigData: properties: address: @@ -153,6 +138,8 @@ definitions: properties: force_gc: type: integer + memory_limit_mbytes: + type: integer profiling: type: boolean type: object @@ -823,7 +810,8 @@ definitions: address: type: string avstream: - $ref: '#/definitions/api.AVstream' + allOf: + - $ref: '#/definitions/api.AVstream' description: avstream bitrate_kbit: description: kbit/s @@ -1278,6 +1266,8 @@ definitions: properties: force_gc: type: integer + memory_limit_mbytes: + type: integer profiling: type: boolean type: object @@ -1695,6 +1685,21 @@ definitions: uptime: type: integer type: object + github_com_datarhei_core_v16_http_api.Config: + properties: + config: + $ref: '#/definitions/api.ConfigData' + created_at: + type: string + loaded_at: + type: string + overrides: + items: + type: string + type: array + updated_at: + type: string + type: object value.Auth0Tenant: properties: audience: @@ -1876,7 +1881,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/api.Config' + $ref: '#/definitions/github_com_datarhei_core_v16_http_api.Config' security: - ApiKeyAuth: [] summary: Retrieve the currently active Restreamer configuration From ea79b8723623f0f4be476ae8a596db9095b6f829 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Wed, 11 Jan 2023 21:05:40 +0100 Subject: [PATCH 17/39] Add format annotation for integer types for swagger documentation --- config/data.go | 40 ++-- config/v1/data.go | 38 ++-- config/v2/data.go | 38 ++-- docs/docs.go | 462 ++++++++++++++++++++++++++++--------------- docs/swagger.json | 462 ++++++++++++++++++++++++++++--------------- docs/swagger.yaml | 154 +++++++++++++++ http/api/avstream.go | 12 +- http/api/error.go | 2 +- http/api/fs.go | 4 +- http/api/metrics.go | 8 +- http/api/playout.go | 18 +- http/api/probe.go | 12 +- http/api/process.go | 22 +-- http/api/progress.go | 28 +-- http/api/session.go | 16 +- http/api/srt.go | 98 ++++----- http/api/widget.go | 4 +- 17 files changed, 940 insertions(+), 478 deletions(-) diff --git a/config/data.go b/config/data.go index bb836b3f..a44cb0b2 100644 --- a/config/data.go +++ b/config/data.go @@ -13,7 +13,7 @@ type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=3,maximum=3"` + Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -21,7 +21,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -67,12 +67,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types struct { Allow []string `json:"allow"` Block []string `json:"block"` @@ -85,7 +85,7 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -113,7 +113,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -125,34 +125,34 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` - MemoryLimit int64 `json:"memory_limit_mbytes"` + ForceGC int `json:"force_gc" format:"int"` + MemoryLimit int64 `json:"memory_limit_mbytes" format:"int64"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` diff --git a/config/v1/data.go b/config/v1/data.go index e398adbb..2826f02d 100644 --- a/config/v1/data.go +++ b/config/v1/data.go @@ -10,7 +10,7 @@ type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=1,maximum=1"` + Version int64 `json:"version" jsonschema:"minimum=1,maximum=1" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -18,7 +18,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -63,12 +63,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -78,7 +78,7 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -105,7 +105,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -117,33 +117,33 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` diff --git a/config/v2/data.go b/config/v2/data.go index 1a549497..10476c7f 100644 --- a/config/v2/data.go +++ b/config/v2/data.go @@ -16,7 +16,7 @@ type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=2,maximum=2"` + Version int64 `json:"version" jsonschema:"minimum=2,maximum=2" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -24,7 +24,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -69,12 +69,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -84,7 +84,7 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -112,7 +112,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -124,33 +124,33 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` diff --git a/docs/docs.go b/docs/docs.go index cee532d5..c44762d7 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -2372,19 +2372,23 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2399,7 +2403,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2407,7 +2412,8 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2577,10 +2583,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" }, "memory_limit_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2636,15 +2644,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2679,7 +2690,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2700,11 +2712,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2718,10 +2732,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2795,19 +2811,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2866,13 +2886,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -2897,7 +2920,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2919,7 +2943,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -2958,7 +2983,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2975,7 +3001,8 @@ const docTemplate = `{ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -2992,13 +3019,15 @@ const docTemplate = `{ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3080,7 +3109,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3089,7 +3119,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3111,7 +3142,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3120,7 +3152,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3159,20 +3192,24 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3190,10 +3227,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3207,10 +3246,12 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3220,7 +3261,8 @@ const docTemplate = `{ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3265,7 +3307,8 @@ const docTemplate = `{ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3284,10 +3327,12 @@ const docTemplate = `{ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3300,10 +3345,12 @@ const docTemplate = `{ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3313,7 +3360,8 @@ const docTemplate = `{ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3324,7 +3372,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3382,13 +3431,15 @@ const docTemplate = `{ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3432,10 +3483,12 @@ const docTemplate = `{ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3452,10 +3505,12 @@ const docTemplate = `{ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3463,7 +3518,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3492,7 +3548,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3530,7 +3587,8 @@ const docTemplate = `{ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3539,10 +3597,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3554,16 +3614,19 @@ const docTemplate = `{ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3578,14 +3641,16 @@ const docTemplate = `{ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3614,7 +3679,8 @@ const docTemplate = `{ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3629,23 +3695,27 @@ const docTemplate = `{ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3659,20 +3729,24 @@ const docTemplate = `{ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3746,7 +3820,8 @@ const docTemplate = `{ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3755,11 +3830,13 @@ const docTemplate = `{ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3767,11 +3844,13 @@ const docTemplate = `{ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3779,11 +3858,13 @@ const docTemplate = `{ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3791,79 +3872,98 @@ const docTemplate = `{ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -3871,75 +3971,93 @@ const docTemplate = `{ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3955,13 +4073,16 @@ const docTemplate = `{ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -3990,13 +4111,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4004,13 +4128,16 @@ const docTemplate = `{ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4051,10 +4178,12 @@ const docTemplate = `{ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4080,13 +4209,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4212,10 +4344,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" }, "memory_limit_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4271,15 +4405,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4314,7 +4451,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4335,11 +4473,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4353,10 +4493,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4430,19 +4572,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4501,13 +4647,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4532,7 +4681,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4554,7 +4704,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4593,7 +4744,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4850,10 +5002,12 @@ const docTemplate = `{ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" diff --git a/docs/swagger.json b/docs/swagger.json index 64d93b2c..966e1e49 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -2364,19 +2364,23 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2391,7 +2395,8 @@ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2399,7 +2404,8 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2569,10 +2575,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" }, "memory_limit_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2628,15 +2636,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2671,7 +2682,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2692,11 +2704,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2710,10 +2724,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2787,19 +2803,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2858,13 +2878,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -2889,7 +2912,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2911,7 +2935,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -2950,7 +2975,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2967,7 +2993,8 @@ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -2984,13 +3011,15 @@ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3072,7 +3101,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3081,7 +3111,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3103,7 +3134,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3112,7 +3144,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3151,20 +3184,24 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3182,10 +3219,12 @@ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3199,10 +3238,12 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3212,7 +3253,8 @@ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3257,7 +3299,8 @@ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3276,10 +3319,12 @@ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3292,10 +3337,12 @@ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3305,7 +3352,8 @@ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3316,7 +3364,8 @@ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3374,13 +3423,15 @@ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3424,10 +3475,12 @@ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3444,10 +3497,12 @@ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3455,7 +3510,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3484,7 +3540,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3522,7 +3579,8 @@ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3531,10 +3589,12 @@ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3546,16 +3606,19 @@ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3570,14 +3633,16 @@ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3606,7 +3671,8 @@ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3621,23 +3687,27 @@ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3651,20 +3721,24 @@ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3738,7 +3812,8 @@ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3747,11 +3822,13 @@ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3759,11 +3836,13 @@ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3771,11 +3850,13 @@ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3783,79 +3864,98 @@ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -3863,75 +3963,93 @@ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3947,13 +4065,16 @@ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -3982,13 +4103,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3996,13 +4120,16 @@ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4043,10 +4170,12 @@ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4072,13 +4201,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4204,10 +4336,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" }, "memory_limit_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4263,15 +4397,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4306,7 +4443,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4327,11 +4465,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4345,10 +4485,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4422,19 +4564,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4493,13 +4639,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4524,7 +4673,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4546,7 +4696,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4585,7 +4736,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4842,10 +4994,12 @@ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 6b67f3a4..5923d296 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -3,14 +3,18 @@ definitions: api.AVstream: properties: aqueue: + format: uint64 type: integer drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -21,11 +25,13 @@ definitions: output: $ref: '#/definitions/api.AVstreamIO' queue: + format: uint64 type: integer type: object api.AVstreamIO: properties: packet: + format: uint64 type: integer size_kb: type: integer @@ -137,8 +143,10 @@ definitions: debug: properties: force_gc: + format: int type: integer memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -175,11 +183,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -204,6 +215,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -218,9 +230,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -230,8 +244,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -280,14 +296,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -326,10 +346,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -346,6 +369,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -360,6 +384,7 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean @@ -385,6 +410,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.ConfigError: @@ -396,6 +422,7 @@ definitions: api.Error: properties: code: + format: int type: integer details: items: @@ -407,10 +434,12 @@ definitions: api.FileInfo: properties: last_modified: + format: int64 type: integer name: type: string size_bytes: + format: int64 type: integer type: object api.GraphQuery: @@ -465,12 +494,14 @@ definitions: api.MetricsQuery: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsQueryMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsQueryMetric: @@ -485,12 +516,14 @@ definitions: api.MetricsResponse: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsResponseMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsResponseMetric: @@ -516,15 +549,19 @@ definitions: api.PlayoutStatus: properties: aqueue: + format: uint64 type: integer debug: {} drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -537,8 +574,10 @@ definitions: output: $ref: '#/definitions/api.PlayoutStatusIO' queue: + format: uint64 type: integer stream: + format: uint64 type: integer swap: $ref: '#/definitions/api.PlayoutStatusSwap' @@ -548,8 +587,10 @@ definitions: api.PlayoutStatusIO: properties: packet: + format: uint64 type: integer size_kb: + format: uint64 type: integer state: enum: @@ -557,6 +598,7 @@ definitions: - idle type: string time: + format: uint64 type: integer type: object api.PlayoutStatusSwap: @@ -586,6 +628,7 @@ definitions: bitrate_kbps: type: number channels: + format: uint64 type: integer codec: type: string @@ -599,8 +642,10 @@ definitions: description: video type: number height: + format: uint64 type: integer index: + format: uint64 type: integer language: type: string @@ -610,8 +655,10 @@ definitions: type: string sampling_hz: description: audio + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string @@ -619,6 +666,7 @@ definitions: description: common type: string width: + format: uint64 type: integer type: object api.Process: @@ -626,6 +674,7 @@ definitions: config: $ref: '#/definitions/api.ProcessConfig' created_at: + format: int64 type: integer id: type: string @@ -662,10 +711,12 @@ definitions: reconnect: type: boolean reconnect_delay_seconds: + format: uint64 type: integer reference: type: string stale_timeout_seconds: + format: uint64 type: integer type: enum: @@ -696,8 +747,10 @@ definitions: api.ProcessConfigIOCleanup: properties: max_file_age_seconds: + format: uint type: integer max_files: + format: uint type: integer pattern: type: string @@ -711,13 +764,16 @@ definitions: cpu_usage: type: number memory_mbytes: + format: uint64 type: integer waitfor_seconds: + format: uint64 type: integer type: object api.ProcessReport: properties: created_at: + format: int64 type: integer history: items: @@ -737,6 +793,7 @@ definitions: api.ProcessReportHistoryEntry: properties: created_at: + format: int64 type: integer log: items: @@ -762,14 +819,17 @@ definitions: last_logline: type: string memory_bytes: + format: uint64 type: integer order: type: string progress: $ref: '#/definitions/api.Progress' reconnect_seconds: + format: int64 type: integer runtime_seconds: + format: int64 type: integer type: object api.Progress: @@ -778,12 +838,15 @@ definitions: description: kbit/s type: number drop: + format: uint64 type: integer dup: + format: uint64 type: integer fps: type: number frame: + format: uint64 type: integer inputs: items: @@ -794,11 +857,13 @@ definitions: $ref: '#/definitions/api.ProgressIO' type: array packet: + format: uint64 type: integer q: type: number size_kb: description: kbytes + format: uint64 type: integer speed: type: number @@ -817,6 +882,7 @@ definitions: description: kbit/s type: number channels: + format: uint64 type: integer codec: type: string @@ -827,17 +893,21 @@ definitions: fps: type: number frame: + format: uint64 type: integer height: + format: uint64 type: integer id: type: string index: description: General + format: uint64 type: integer layout: type: string packet: + format: uint64 type: integer pix_fmt: description: Video @@ -848,15 +918,19 @@ definitions: type: number sampling_hz: description: Audio + format: uint64 type: integer size_kb: description: kbytes + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string width: + format: uint64 type: integer type: object api.RTMPChannel: @@ -905,34 +979,41 @@ definitions: type: string type: array ts: + format: int64 type: integer type: object api.SRTStatistics: properties: avail_recv_buf_bytes: description: The available space in the receiver's buffer, in bytes + format: uint64 type: integer avail_send_buf_bytes: description: The available space in the sender's buffer, in bytes + format: uint64 type: integer bandwidth_mbit: description: Estimated bandwidth of the network link, in Mbps type: number flight_size_pkt: description: The number of packets in flight + format: uint64 type: integer flow_window_pkt: description: The maximum number of packets that can be "in flight" + format: uint64 type: integer max_bandwidth_mbit: description: Transmission bandwidth limit, in Mbps type: number mss_bytes: description: Maximum Segment Size (MSS), in bytes + format: uint64 type: integer pkt_recv_avg_belated_time_ms: description: Accumulated difference between the current time and the time-to-play of a packet that is received late + format: uint64 type: integer pkt_send_period_us: description: Current minimum time interval between which consecutive packets @@ -940,79 +1021,98 @@ definitions: type: number recv_ack_pkt: description: The total number of received ACK (Acknowledgement) control packets + format: uint64 type: integer recv_buf_bytes: description: Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer recv_buf_ms: description: The timespan (msec) of acknowledged packets in the receiver's buffer + format: uint64 type: integer recv_buf_pkt: description: The number of acknowledged packets in receiver's buffer + format: uint64 type: integer recv_bytes: description: Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_bytes: description: Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_pkt: description: The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + format: uint64 type: integer recv_km_pkt: description: The total number of received KM (Key Material) control packets + format: uint64 type: integer recv_loss_bytes: description: Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + format: uint64 type: integer recv_loss_pkt: description: The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + format: uint64 type: integer recv_nak_pkt: description: The total number of received NAK (Negative Acknowledgement) control packets + format: uint64 type: integer recv_pkt: description: The total number of received DATA packets, including retransmitted packets + format: uint64 type: integer recv_retran_pkts: description: The total number of retransmitted packets registered at the receiver side + format: uint64 type: integer recv_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + format: uint64 type: integer recv_undecrypt_bytes: description: Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_undecrypt_pkt: description: The total number of packets that failed to be decrypted at the receiver side + format: uint64 type: integer recv_unique_bytes: description: Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_unique_pkt: description: The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + format: uint64 type: integer reorder_tolerance_pkt: description: Instant value of the packet reorder tolerance + format: uint64 type: integer rtt_ms: description: Smoothed round-trip time (SRTT), an exponentially-weighted moving @@ -1021,71 +1121,89 @@ definitions: send_buf_bytes: description: Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer send_buf_ms: description: The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + format: uint64 type: integer send_buf_pkt: description: The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + format: uint64 type: integer send_drop_bytes: description: Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer send_drop_pkt: description: The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + format: uint64 type: integer send_duration_us: description: The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + format: uint64 type: integer send_km_pkt: description: The total number of sent KM (Key Material) control packets + format: uint64 type: integer send_loss_pkt: description: The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + format: uint64 type: integer send_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value of the peer + format: uint64 type: integer sent_ack_pkt: description: The total number of sent ACK (Acknowledgement) control packets + format: uint64 type: integer sent_bytes: description: Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_nak_pkt: description: The total number of sent NAK (Negative Acknowledgement) control packets + format: uint64 type: integer sent_pkt: description: The total number of sent DATA packets, including retransmitted packets + format: uint64 type: integer sent_retrans_bytes: description: Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_retrans_pkt: description: The total number of retransmitted packets sent by the SRT sender + format: uint64 type: integer sent_unique_bytes: description: Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_unique_pkt: description: The total number of unique DATA packets sent by the SRT sender + format: uint64 type: integer timestamp_ms: description: The time elapsed, in milliseconds, since the SRT socket has been created + format: uint64 type: integer type: object api.Session: @@ -1097,10 +1215,13 @@ definitions: description: kbit/s type: number bytes_rx: + format: uint64 type: integer bytes_tx: + format: uint64 type: integer created_at: + format: int64 type: integer extra: type: string @@ -1120,19 +1241,25 @@ definitions: $ref: '#/definitions/api.SessionStats' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionStats: properties: sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionSummary: @@ -1161,8 +1288,10 @@ definitions: description: mbit/s type: number max_sessions: + format: uint64 type: integer sessions: + format: uint64 type: integer type: object api.SessionSummarySummary: @@ -1180,10 +1309,13 @@ definitions: $ref: '#/definitions/api.SessionPeers' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionsActive: @@ -1265,8 +1397,10 @@ definitions: debug: properties: force_gc: + format: int type: integer memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -1303,11 +1437,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -1332,6 +1469,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -1346,9 +1484,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -1358,8 +1498,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -1408,14 +1550,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -1454,10 +1600,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -1474,6 +1623,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -1488,6 +1638,7 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean @@ -1513,6 +1664,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.Skills: @@ -1679,8 +1831,10 @@ definitions: api.WidgetProcess: properties: current_sessions: + format: uint64 type: integer total_sessions: + format: uint64 type: integer uptime: type: integer diff --git a/http/api/avstream.go b/http/api/avstream.go index 0a9c5044..279b3352 100644 --- a/http/api/avstream.go +++ b/http/api/avstream.go @@ -6,7 +6,7 @@ import ( type AVstreamIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet" format:"uint64"` Time uint64 `json:"time"` Size uint64 `json:"size_kb"` } @@ -25,11 +25,11 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) { type AVstream struct { Input AVstreamIO `json:"input"` Output AVstreamIO `json:"output"` - Aqueue uint64 `json:"aqueue"` - Queue uint64 `json:"queue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Aqueue uint64 `json:"aqueue" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/error.go b/http/api/error.go index 07477568..a87ef95a 100644 --- a/http/api/error.go +++ b/http/api/error.go @@ -8,7 +8,7 @@ import ( // Error represents an error response of the API type Error struct { - Code int `json:"code" jsonschema:"required"` + Code int `json:"code" jsonschema:"required" format:"int"` Message string `json:"message" jsonschema:""` Details []string `json:"details" jsonschema:""` } diff --git a/http/api/fs.go b/http/api/fs.go index c7d12eb4..540670d2 100644 --- a/http/api/fs.go +++ b/http/api/fs.go @@ -3,6 +3,6 @@ package api // FileInfo represents informatiion about a file on a filesystem type FileInfo struct { Name string `json:"name" jsonschema:"minLength=1"` - Size int64 `json:"size_bytes" jsonschema:"minimum=0"` - LastMod int64 `json:"last_modified" jsonschema:"minimum=0"` + Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"` + LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"` } diff --git a/http/api/metrics.go b/http/api/metrics.go index 49b184f9..f2476988 100644 --- a/http/api/metrics.go +++ b/http/api/metrics.go @@ -19,8 +19,8 @@ type MetricsQueryMetric struct { } type MetricsQuery struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsQueryMetric `json:"metrics"` } @@ -51,8 +51,8 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) { } type MetricsResponse struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsResponseMetric `json:"metrics"` } diff --git a/http/api/playout.go b/http/api/playout.go index 22a192d4..ae2b0b9d 100644 --- a/http/api/playout.go +++ b/http/api/playout.go @@ -4,9 +4,9 @@ import "github.com/datarhei/core/v16/playout" type PlayoutStatusIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` - Time uint64 `json:"time"` - Size uint64 `json:"size_kb"` + Packet uint64 `json:"packet" format:"uint64"` + Time uint64 `json:"time" format:"uint64"` + Size uint64 `json:"size_kb" format:"uint64"` } func (i *PlayoutStatusIO) Unmarshal(io playout.StatusIO) { @@ -33,12 +33,12 @@ func (s *PlayoutStatusSwap) Unmarshal(swap playout.StatusSwap) { type PlayoutStatus struct { ID string `json:"id"` Address string `json:"url"` - Stream uint64 `json:"stream"` - Queue uint64 `json:"queue"` - AQueue uint64 `json:"aqueue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Stream uint64 `json:"stream" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + AQueue uint64 `json:"aqueue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/probe.go b/http/api/probe.go index 3c538dcc..dda8b260 100644 --- a/http/api/probe.go +++ b/http/api/probe.go @@ -11,8 +11,8 @@ type ProbeIO struct { // common Address string `json:"url"` Format string `json:"format"` - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` Language string `json:"language"` Type string `json:"type"` Codec string `json:"codec"` @@ -23,13 +23,13 @@ type ProbeIO struct { // video FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Pixfmt string `json:"pix_fmt"` - Width uint64 `json:"width"` - Height uint64 `json:"height"` + Width uint64 `json:"width" format:"uint64"` + Height uint64 `json:"height" format:"uint64"` // audio - Sampling uint64 `json:"sampling_hz"` + Sampling uint64 `json:"sampling_hz" format:"uint64"` Layout string `json:"layout"` - Channels uint64 `json:"channels"` + Channels uint64 `json:"channels" format:"uint64"` } func (i *ProbeIO) Unmarshal(io *app.ProbeIO) { diff --git a/http/api/process.go b/http/api/process.go index 7365e176..e217b455 100644 --- a/http/api/process.go +++ b/http/api/process.go @@ -13,7 +13,7 @@ type Process struct { ID string `json:"id" jsonschema:"minLength=1"` Type string `json:"type" jsonschema:"enum=ffmpeg"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at" jsonschema:"minimum=0"` + CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"` Config *ProcessConfig `json:"config,omitempty"` State *ProcessState `json:"state,omitempty"` Report *ProcessReport `json:"report,omitempty"` @@ -30,15 +30,15 @@ type ProcessConfigIO struct { type ProcessConfigIOCleanup struct { Pattern string `json:"pattern" validate:"required"` - MaxFiles uint `json:"max_files"` - MaxFileAge uint `json:"max_file_age_seconds"` + MaxFiles uint `json:"max_files" format:"uint"` + MaxFileAge uint `json:"max_file_age_seconds" format:"uint"` PurgeOnDelete bool `json:"purge_on_delete"` } type ProcessConfigLimits struct { CPU float64 `json:"cpu_usage" jsonschema:"minimum=0,maximum=100"` - Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0"` - WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0"` + Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"` + WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"` } // ProcessConfig represents the configuration of an ffmpeg process @@ -50,9 +50,9 @@ type ProcessConfig struct { Output []ProcessConfigIO `json:"output" validate:"required"` Options []string `json:"options"` Reconnect bool `json:"reconnect"` - ReconnectDelay uint64 `json:"reconnect_delay_seconds"` + ReconnectDelay uint64 `json:"reconnect_delay_seconds" format:"uint64"` Autostart bool `json:"autostart"` - StaleTimeout uint64 `json:"stale_timeout_seconds"` + StaleTimeout uint64 `json:"stale_timeout_seconds" format:"uint64"` Limits ProcessConfigLimits `json:"limits"` } @@ -188,7 +188,7 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) { // ProcessReportHistoryEntry represents the logs of a run of a restream process type ProcessReportHistoryEntry struct { - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Prelude []string `json:"prelude"` Log [][2]string `json:"log"` } @@ -235,11 +235,11 @@ func (report *ProcessReport) Unmarshal(l *app.Log) { type ProcessState struct { Order string `json:"order" jsonschema:"enum=start,enum=stop"` State string `json:"exec" jsonschema:"enum=finished,enum=starting,enum=running,enum=finishing,enum=killed,enum=failed"` - Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0"` - Reconnect int64 `json:"reconnect_seconds"` + Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0" format:"int64"` + Reconnect int64 `json:"reconnect_seconds" format:"int64"` LastLog string `json:"last_logline"` Progress *Progress `json:"progress"` - Memory uint64 `json:"memory_bytes"` + Memory uint64 `json:"memory_bytes" format:"uint64"` CPU json.Number `json:"cpu_usage" swaggertype:"number" jsonschema:"type=number"` Command []string `json:"command"` } diff --git a/http/api/progress.go b/http/api/progress.go index ed575fc7..a402d55a 100644 --- a/http/api/progress.go +++ b/http/api/progress.go @@ -13,29 +13,29 @@ type ProgressIO struct { Address string `json:"address" jsonschema:"minLength=1"` // General - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` Format string `json:"format"` Type string `json:"type"` Codec string `json:"codec"` Coder string `json:"coder"` - Frame uint64 `json:"frame"` + Frame uint64 `json:"frame" format:"uint64"` FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet" format:"uint64"` PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_kb" format:"uint64"` // kbytes Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s // Video Pixfmt string `json:"pix_fmt,omitempty"` Quantizer json.Number `json:"q,omitempty" swaggertype:"number" jsonschema:"type=number"` - Width uint64 `json:"width,omitempty"` - Height uint64 `json:"height,omitempty"` + Width uint64 `json:"width,omitempty" format:"uint64"` + Height uint64 `json:"height,omitempty" format:"uint64"` // Audio - Sampling uint64 `json:"sampling_hz,omitempty"` + Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"` Layout string `json:"layout,omitempty"` - Channels uint64 `json:"channels,omitempty"` + Channels uint64 `json:"channels,omitempty" format:"uint64"` // avstream AVstream *AVstream `json:"avstream"` @@ -79,16 +79,16 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { type Progress struct { Input []ProgressIO `json:"inputs"` Output []ProgressIO `json:"outputs"` - Frame uint64 `json:"frame"` - Packet uint64 `json:"packet"` + Frame uint64 `json:"frame" format:"uint64"` + Packet uint64 `json:"packet" format:"uint64"` FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Quantizer json.Number `json:"q" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_kb" format:"uint64"` // kbytes Time json.Number `json:"time" swaggertype:"number" jsonschema:"type=number"` Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s Speed json.Number `json:"speed" swaggertype:"number" jsonschema:"type=number"` - Drop uint64 `json:"drop"` - Dup uint64 `json:"dup"` + Drop uint64 `json:"drop" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` } // Unmarshal converts a restreamer Progress to a Progress in API representation diff --git a/http/api/session.go b/http/api/session.go index 8078531a..c616121f 100644 --- a/http/api/session.go +++ b/http/api/session.go @@ -8,9 +8,9 @@ import ( // SessionStats are the accumulated numbers for the session summary type SessionStats struct { - TotalSessions uint64 `json:"sessions"` - TotalRxBytes uint64 `json:"traffic_rx_mb"` - TotalTxBytes uint64 `json:"traffic_tx_mb"` + TotalSessions uint64 `json:"sessions" format:"uint64"` + TotalRxBytes uint64 `json:"traffic_rx_mb" format:"uint64"` + TotalTxBytes uint64 `json:"traffic_tx_mb" format:"uint64"` } // SessionPeers is for the grouping by peers in the summary @@ -24,12 +24,12 @@ type SessionPeers struct { type Session struct { ID string `json:"id"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Location string `json:"local"` Peer string `json:"remote"` Extra string `json:"extra"` - RxBytes uint64 `json:"bytes_rx"` - TxBytes uint64 `json:"bytes_tx"` + RxBytes uint64 `json:"bytes_rx" format:"uint64"` + TxBytes uint64 `json:"bytes_tx" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s TxBitrate json.Number `json:"bandwidth_tx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s } @@ -50,10 +50,10 @@ func (s *Session) Unmarshal(sess session.Session) { // SessionSummaryActive represents the currently active sessions type SessionSummaryActive struct { SessionList []Session `json:"list"` - Sessions uint64 `json:"sessions"` + Sessions uint64 `json:"sessions" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s TxBitrate json.Number `json:"bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s - MaxSessions uint64 `json:"max_sessions"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` MaxRxBitrate json.Number `json:"max_bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s MaxTxBitrate json.Number `json:"max_bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s } diff --git a/http/api/srt.go b/http/api/srt.go index bb31498e..e41b2514 100644 --- a/http/api/srt.go +++ b/http/api/srt.go @@ -8,60 +8,60 @@ import ( // SRTStatistics represents the statistics of a SRT connection type SRTStatistics struct { - MsTimeStamp uint64 `json:"timestamp_ms"` // The time elapsed, in milliseconds, since the SRT socket has been created + MsTimeStamp uint64 `json:"timestamp_ms" format:"uint64"` // The time elapsed, in milliseconds, since the SRT socket has been created // Accumulated - PktSent uint64 `json:"sent_pkt"` // The total number of sent DATA packets, including retransmitted packets - PktRecv uint64 `json:"recv_pkt"` // The total number of received DATA packets, including retransmitted packets - PktSentUnique uint64 `json:"sent_unique_pkt"` // The total number of unique DATA packets sent by the SRT sender - PktRecvUnique uint64 `json:"recv_unique_pkt"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. - PktSndLoss uint64 `json:"send_loss_pkt"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. - PktRcvLoss uint64 `json:"recv_loss_pkt"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side - PktRetrans uint64 `json:"sent_retrans_pkt"` // The total number of retransmitted packets sent by the SRT sender - PktRcvRetrans uint64 `json:"recv_retran_pkts"` // The total number of retransmitted packets registered at the receiver side - PktSentACK uint64 `json:"sent_ack_pkt"` // The total number of sent ACK (Acknowledgement) control packets - PktRecvACK uint64 `json:"recv_ack_pkt"` // The total number of received ACK (Acknowledgement) control packets - PktSentNAK uint64 `json:"sent_nak_pkt"` // The total number of sent NAK (Negative Acknowledgement) control packets - PktRecvNAK uint64 `json:"recv_nak_pkt"` // The total number of received NAK (Negative Acknowledgement) control packets - PktSentKM uint64 `json:"send_km_pkt"` // The total number of sent KM (Key Material) control packets - PktRecvKM uint64 `json:"recv_km_pkt"` // The total number of received KM (Key Material) control packets - UsSndDuration uint64 `json:"send_duration_us"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged - PktSndDrop uint64 `json:"send_drop_pkt"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time - PktRcvDrop uint64 `json:"recv_drop_pkt"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets - PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt"` // The total number of packets that failed to be decrypted at the receiver side - - ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSentUnique uint64 `json:"sent_unique_bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvLoss uint64 `json:"recv_loss_bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size - ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + PktSent uint64 `json:"sent_pkt" format:"uint64"` // The total number of sent DATA packets, including retransmitted packets + PktRecv uint64 `json:"recv_pkt" format:"uint64"` // The total number of received DATA packets, including retransmitted packets + PktSentUnique uint64 `json:"sent_unique_pkt" format:"uint64"` // The total number of unique DATA packets sent by the SRT sender + PktRecvUnique uint64 `json:"recv_unique_pkt" format:"uint64"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + PktSndLoss uint64 `json:"send_loss_pkt" format:"uint64"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + PktRcvLoss uint64 `json:"recv_loss_pkt" format:"uint64"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + PktRetrans uint64 `json:"sent_retrans_pkt" format:"uint64"` // The total number of retransmitted packets sent by the SRT sender + PktRcvRetrans uint64 `json:"recv_retran_pkts" format:"uint64"` // The total number of retransmitted packets registered at the receiver side + PktSentACK uint64 `json:"sent_ack_pkt" format:"uint64"` // The total number of sent ACK (Acknowledgement) control packets + PktRecvACK uint64 `json:"recv_ack_pkt" format:"uint64"` // The total number of received ACK (Acknowledgement) control packets + PktSentNAK uint64 `json:"sent_nak_pkt" format:"uint64"` // The total number of sent NAK (Negative Acknowledgement) control packets + PktRecvNAK uint64 `json:"recv_nak_pkt" format:"uint64"` // The total number of received NAK (Negative Acknowledgement) control packets + PktSentKM uint64 `json:"send_km_pkt" format:"uint64"` // The total number of sent KM (Key Material) control packets + PktRecvKM uint64 `json:"recv_km_pkt" format:"uint64"` // The total number of received KM (Key Material) control packets + UsSndDuration uint64 `json:"send_duration_us" format:"uint64"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + PktSndDrop uint64 `json:"send_drop_pkt" format:"uint64"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + PktRcvDrop uint64 `json:"recv_drop_pkt" format:"uint64"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt" format:"uint64"` // The total number of packets that failed to be decrypted at the receiver side + + ByteSent uint64 `json:"sent_bytes" format:"uint64"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecv uint64 `json:"recv_bytes" format:"uint64"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSentUnique uint64 `json:"sent_unique_bytes" format:"uint64"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecvUnique uint64 `json:"recv_unique_bytes" format:"uint64"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvLoss uint64 `json:"recv_loss_bytes" format:"uint64"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + ByteRetrans uint64 `json:"sent_retrans_bytes" format:"uint64"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSndDrop uint64 `json:"send_drop_bytes" format:"uint64"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvDrop uint64 `json:"recv_drop_bytes" format:"uint64"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes" format:"uint64"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) // Instantaneous - UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds - PktFlowWindow uint64 `json:"flow_window_pkt"` // The maximum number of packets that can be "in flight" - PktFlightSize uint64 `json:"flight_size_pkt"` // The number of packets in flight - MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds - MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps - ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes"` // The available space in the sender's buffer, in bytes - ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes"` // The available space in the receiver's buffer, in bytes - MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps - ByteMSS uint64 `json:"mss_bytes"` // Maximum Segment Size (MSS), in bytes - PktSndBuf uint64 `json:"send_buf_pkt"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged - ByteSndBuf uint64 `json:"send_buf_bytes"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsSndBuf uint64 `json:"send_buf_ms"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) - MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value of the peer - PktRcvBuf uint64 `json:"recv_buf_pkt"` // The number of acknowledged packets in receiver's buffer - ByteRcvBuf uint64 `json:"recv_buf_bytes"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsRcvBuf uint64 `json:"recv_buf_ms"` // The timespan (msec) of acknowledged packets in the receiver's buffer - MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY - PktReorderTolerance uint64 `json:"reorder_tolerance_pkt"` // Instant value of the packet reorder tolerance - PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms"` // Accumulated difference between the current time and the time-to-play of a packet that is received late + UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds + PktFlowWindow uint64 `json:"flow_window_pkt" format:"uint64"` // The maximum number of packets that can be "in flight" + PktFlightSize uint64 `json:"flight_size_pkt" format:"uint64"` // The number of packets in flight + MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds + MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps + ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes" format:"uint64"` // The available space in the sender's buffer, in bytes + ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes" format:"uint64"` // The available space in the receiver's buffer, in bytes + MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps + ByteMSS uint64 `json:"mss_bytes" format:"uint64"` // Maximum Segment Size (MSS), in bytes + PktSndBuf uint64 `json:"send_buf_pkt" format:"uint64"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + ByteSndBuf uint64 `json:"send_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsSndBuf uint64 `json:"send_buf_ms" format:"uint64"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value of the peer + PktRcvBuf uint64 `json:"recv_buf_pkt" format:"uint64"` // The number of acknowledged packets in receiver's buffer + ByteRcvBuf uint64 `json:"recv_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsRcvBuf uint64 `json:"recv_buf_ms" format:"uint64"` // The timespan (msec) of acknowledged packets in the receiver's buffer + MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + PktReorderTolerance uint64 `json:"reorder_tolerance_pkt" format:"uint64"` // Instant value of the packet reorder tolerance + PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms" format:"uint64"` // Accumulated difference between the current time and the time-to-play of a packet that is received late } // Unmarshal converts the SRT statistics into API representation @@ -119,7 +119,7 @@ func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) { } type SRTLog struct { - Timestamp int64 `json:"ts"` + Timestamp int64 `json:"ts" format:"int64"` Message []string `json:"msg"` } diff --git a/http/api/widget.go b/http/api/widget.go index d0f35e6c..5d91bda6 100644 --- a/http/api/widget.go +++ b/http/api/widget.go @@ -1,7 +1,7 @@ package api type WidgetProcess struct { - CurrentSessions uint64 `json:"current_sessions"` - TotalSessions uint64 `json:"total_sessions"` + CurrentSessions uint64 `json:"current_sessions" format:"uint64"` + TotalSessions uint64 `json:"total_sessions" format:"uint64"` Uptime int64 `json:"uptime"` } From f0ff3b89c144ca1fb97f1a4374909a7a9957b8b0 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Fri, 13 Jan 2023 11:12:21 +0100 Subject: [PATCH 18/39] Allow RTMP token as stream key in the path --- rtmp/rtmp.go | 107 +++++++++++++++++++++++++++++++--------------- rtmp/rtmp_test.go | 26 +++++++++++ 2 files changed, 99 insertions(+), 34 deletions(-) create mode 100644 rtmp/rtmp_test.go diff --git a/rtmp/rtmp.go b/rtmp/rtmp.go index 3c219709..4990b49d 100644 --- a/rtmp/rtmp.go +++ b/rtmp/rtmp.go @@ -6,6 +6,7 @@ import ( "crypto/tls" "fmt" "net" + "net/url" "path/filepath" "strings" "sync" @@ -326,18 +327,53 @@ func (s *server) log(who, action, path, message string, client net.Addr) { }).Log(message) } +// getToken returns the path and the token found in the URL. If the token +// was part of the path, the token is removed from the path. The token in +// the query string takes precedence. The token in the path is assumed to +// be the last path element. +func getToken(u *url.URL) (string, string) { + q := u.Query() + token := q.Get("token") + + if len(token) != 0 { + // The token was in the query. Return the unmomdified path and the token + return u.Path, token + } + + pathElements := strings.Split(u.EscapedPath(), "/") + nPathElements := len(pathElements) + + if nPathElements == 0 { + return u.Path, "" + } + + // Return the path without the token + return strings.Join(pathElements[:nPathElements-1], "/"), pathElements[nPathElements-1] +} + // handlePlay is called when a RTMP client wants to play a stream func (s *server) handlePlay(conn *rtmp.Conn) { client := conn.NetConn().RemoteAddr() - // Check the token - q := conn.URL.Query() - token := q.Get("token") + defer conn.Close() - if len(s.token) != 0 && s.token != token { - s.log("PLAY", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", client) - conn.Close() - return + playPath := conn.URL.Path + + // Check the token in the URL if one is required + if len(s.token) != 0 { + path, token := getToken(conn.URL) + + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) + return + } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } /* @@ -361,14 +397,14 @@ func (s *server) handlePlay(conn *rtmp.Conn) { // Look for the stream s.lock.RLock() - ch := s.channels[conn.URL.Path] + ch := s.channels[playPath] s.lock.RUnlock() if ch != nil { // Set the metadata for the client conn.SetMetaData(ch.metadata) - s.log("PLAY", "START", conn.URL.Path, "", client) + s.log("PLAY", "START", playPath, "", client) // Get a cursor and apply filters cursor := ch.queue.Oldest() @@ -395,32 +431,39 @@ func (s *server) handlePlay(conn *rtmp.Conn) { ch.RemoveSubscriber(id) - s.log("PLAY", "STOP", conn.URL.Path, "", client) + s.log("PLAY", "STOP", playPath, "", client) } else { - s.log("PLAY", "NOTFOUND", conn.URL.Path, "", client) + s.log("PLAY", "NOTFOUND", playPath, "", client) } - - conn.Close() } // handlePublish is called when a RTMP client wants to publish a stream func (s *server) handlePublish(conn *rtmp.Conn) { client := conn.NetConn().RemoteAddr() - // Check the token - q := conn.URL.Query() - token := q.Get("token") + defer conn.Close() - if len(s.token) != 0 && s.token != token { - s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", client) - conn.Close() - return + playPath := conn.URL.Path + + if len(s.token) != 0 { + path, token := getToken(conn.URL) + + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) + return + } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } // Check the app patch - if !strings.HasPrefix(conn.URL.Path, s.app) { + if !strings.HasPrefix(playPath, s.app) { s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid app", client) - conn.Close() return } @@ -428,8 +471,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { streams, _ := conn.Streams() if len(streams) == 0 { - s.log("PUBLISH", "INVALID", conn.URL.Path, "no streams available", client) - conn.Close() + s.log("PUBLISH", "INVALID", playPath, "no streams available", client) return } @@ -437,7 +479,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { ch := s.channels[conn.URL.Path] if ch == nil { - reference := strings.TrimPrefix(strings.TrimSuffix(conn.URL.Path, filepath.Ext(conn.URL.Path)), s.app+"/") + reference := strings.TrimPrefix(strings.TrimSuffix(playPath, filepath.Ext(playPath)), s.app+"/") // Create a new channel ch = newChannel(conn, reference, s.collector) @@ -456,7 +498,7 @@ func (s *server) handlePublish(conn *rtmp.Conn) { } } - s.channels[conn.URL.Path] = ch + s.channels[playPath] = ch } else { ch = nil } @@ -464,27 +506,24 @@ func (s *server) handlePublish(conn *rtmp.Conn) { s.lock.Unlock() if ch == nil { - s.log("PUBLISH", "CONFLICT", conn.URL.Path, "already publishing", client) - conn.Close() + s.log("PUBLISH", "CONFLICT", playPath, "already publishing", client) return } - s.log("PUBLISH", "START", conn.URL.Path, "", client) + s.log("PUBLISH", "START", playPath, "", client) for _, stream := range streams { - s.log("PUBLISH", "STREAM", conn.URL.Path, stream.Type().String(), client) + s.log("PUBLISH", "STREAM", playPath, stream.Type().String(), client) } // Ingest the data avutil.CopyPackets(ch.queue, conn) s.lock.Lock() - delete(s.channels, conn.URL.Path) + delete(s.channels, playPath) s.lock.Unlock() ch.Close() - s.log("PUBLISH", "STOP", conn.URL.Path, "", client) - - conn.Close() + s.log("PUBLISH", "STOP", playPath, "", client) } diff --git a/rtmp/rtmp_test.go b/rtmp/rtmp_test.go new file mode 100644 index 00000000..20bb5274 --- /dev/null +++ b/rtmp/rtmp_test.go @@ -0,0 +1,26 @@ +package rtmp + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestToken(t *testing.T) { + data := [][]string{ + {"/foo/bar", "/foo", "bar"}, + {"/foo/bar?token=abc", "/foo/bar", "abc"}, + {"/foo/bar/abc", "/foo/bar", "abc"}, + } + + for _, d := range data { + u, err := url.Parse(d[0]) + require.NoError(t, err) + + path, token := getToken(u) + + require.Equal(t, d[1], path, "url=%s", u.String()) + require.Equal(t, d[2], token, "url=%s", u.String()) + } +} From 311defb27c87230c13e77f32531b41cc4aeda39a Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Thu, 19 Jan 2023 11:46:45 +0100 Subject: [PATCH 19/39] Fix /config/reload return type --- docs/docs.go | 4 ++-- docs/swagger.json | 4 ++-- docs/swagger.yaml | 4 ++-- http/handler/api/config.go | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/docs.go b/docs/docs.go index c44762d7..672b5d20 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -292,9 +292,9 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" ], "tags": [ "v16.7.2" diff --git a/docs/swagger.json b/docs/swagger.json index 966e1e49..2b32d525 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -284,9 +284,9 @@ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" ], "tags": [ "v16.7.2" diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 5923d296..daabe182 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -2077,10 +2077,10 @@ paths: /api/v3/config/reload: get: description: Reload the currently active configuration. This will trigger a - restart of the Restreamer. + restart of the Core. operationId: config-3-reload produces: - - text/plain + - application/json responses: "200": description: OK diff --git a/http/handler/api/config.go b/http/handler/api/config.go index 5e084e0b..cde44f79 100644 --- a/http/handler/api/config.go +++ b/http/handler/api/config.go @@ -157,15 +157,15 @@ func (p *ConfigHandler) Set(c echo.Context) error { // Reload will reload the currently active configuration // @Summary Reload the currently active configuration -// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer. +// @Description Reload the currently active configuration. This will trigger a restart of the Core. // @Tags v16.7.2 // @ID config-3-reload -// @Produce plain -// @Success 200 {string} string "OK" +// @Produce json +// @Success 200 {string} string // @Security ApiKeyAuth // @Router /api/v3/config/reload [get] func (p *ConfigHandler) Reload(c echo.Context) error { p.store.Reload() - return c.String(http.StatusOK, "OK") + return c.JSON(http.StatusOK, "OK") } From e374f833771fc0b5da317d4907ba1ca89609e20e Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Thu, 19 Jan 2023 16:13:53 +0100 Subject: [PATCH 20/39] Fix config timestamps created_at represents the time when the configuration has been persisted to disk. loaded_at represents the time when the configuration has actually been used. If created_at is larger than loaded_at, then the Core needs a reload in order to apply the latest configuration. if created_at is lower than laoded_at, then the Core applied the latest configuration. The value of updated_at is irrelevant and shouldn't be used. --- app/api/api.go | 2 ++ config/data.go | 6 +++--- config/store/json.go | 14 ++++---------- http/handler/api/config.go | 8 ++++++++ 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/app/api/api.go b/app/api/api.go index 170c44d6..d8722c30 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -257,6 +257,8 @@ func (a *api) Reload() error { return fmt.Errorf("not all variables are set or valid") } + cfg.LoadedAt = time.Now() + store.SetActive(cfg) a.config.store = store diff --git a/config/data.go b/config/data.go index a44cb0b2..6d9d509f 100644 --- a/config/data.go +++ b/config/data.go @@ -10,9 +10,9 @@ import ( // Data is the actual configuration data for the app type Data struct { - CreatedAt time.Time `json:"created_at"` - LoadedAt time.Time `json:"-"` - UpdatedAt time.Time `json:"-"` + CreatedAt time.Time `json:"created_at"` // When this config has been persisted + LoadedAt time.Time `json:"-"` // When this config has been actually used + UpdatedAt time.Time `json:"-"` // Irrelevant Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"` ID string `json:"id"` Name string `json:"name"` diff --git a/config/store/json.go b/config/store/json.go index a63ba627..a6b93bfe 100644 --- a/config/store/json.go +++ b/config/store/json.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "time" "github.com/datarhei/core/v16/config" v1 "github.com/datarhei/core/v16/config/v1" @@ -57,14 +56,10 @@ func (c *jsonStore) Set(d *config.Config) error { data := d.Clone() - data.CreatedAt = time.Now() - if err := c.store(data); err != nil { return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err) } - data.UpdatedAt = time.Now() - c.data["base"] = data return nil @@ -89,7 +84,9 @@ func (c *jsonStore) SetActive(d *config.Config) error { return fmt.Errorf("configuration data has errors after validation") } - c.data["merged"] = d.Clone() + data := d.Clone() + + c.data["merged"] = data return nil } @@ -129,15 +126,12 @@ func (c *jsonStore) load(cfg *config.Config) error { cfg.Data = *data - cfg.LoadedAt = time.Now() - cfg.UpdatedAt = cfg.LoadedAt + cfg.UpdatedAt = cfg.CreatedAt return nil } func (c *jsonStore) store(data *config.Config) error { - data.CreatedAt = time.Now() - if len(c.path) == 0 { return nil } diff --git a/http/handler/api/config.go b/http/handler/api/config.go index cde44f79..d2484de2 100644 --- a/http/handler/api/config.go +++ b/http/handler/api/config.go @@ -3,6 +3,7 @@ package api import ( "io" "net/http" + "time" cfgstore "github.com/datarhei/core/v16/config/store" cfgvars "github.com/datarhei/core/v16/config/vars" @@ -71,6 +72,10 @@ func (p *ConfigHandler) Set(c echo.Context) error { } cfg := p.store.Get() + cfgActive := p.store.GetActive() + + // Copy the timestamp of when this config has been used + cfg.LoadedAt = cfgActive.LoadedAt // For each version, set the current config as default config value. This will // allow to set a partial config without destroying the other values. @@ -119,6 +124,9 @@ func (p *ConfigHandler) Set(c echo.Context) error { return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version) } + cfg.CreatedAt = time.Now() + cfg.UpdatedAt = cfg.CreatedAt + // Now we make a copy from the config and merge it with the environment // variables. If this configuration is valid, we will store the un-merged // one to disk. From 0147651de6193b3a1c7649607374a2ee3dcf4593 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Fri, 20 Jan 2023 13:38:33 +0100 Subject: [PATCH 21/39] Extend placeholders 1. Allow variables in placeholders for parameter values, e.g. {rtmp,name=$processid}. The variable starts with a $ letter. The recognized variables are provided with the Replace func. 2. The template func recieves the process config and the name of the section where this placeholder is located, i.e. "global", "input", or "output". --- app/api/api.go | 78 ++++++++++++++++--------- restream/app/process.go | 74 ------------------------ restream/replace/replace.go | 49 ++++++++++------ restream/replace/replace_test.go | 66 ++++++++++++++++++---- restream/restream.go | 97 +++++++++++++++++++++++++++++++- 5 files changed, 232 insertions(+), 132 deletions(-) diff --git a/app/api/api.go b/app/api/api.go index d8722c30..b1f5d76b 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -31,6 +31,7 @@ import ( "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/prometheus" "github.com/datarhei/core/v16/restream" + restreamapp "github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/replace" "github.com/datarhei/core/v16/restream/store" "github.com/datarhei/core/v16/rtmp" @@ -449,39 +450,62 @@ func (a *api) start() error { a.replacer = replace.New() { - a.replacer.RegisterTemplate("diskfs", a.diskfs.Base(), nil) - a.replacer.RegisterTemplate("memfs", a.memfs.Base(), nil) + a.replacer.RegisterTemplateFunc("diskfs", func(config *restreamapp.Config, section string) string { + return a.diskfs.Base() + }, nil) + + a.replacer.RegisterTemplateFunc("fs:disk", func(config *restreamapp.Config, section string) string { + return a.diskfs.Base() + }, nil) + + a.replacer.RegisterTemplateFunc("memfs", func(config *restreamapp.Config, section string) string { + return a.memfs.Base() + }, nil) + + a.replacer.RegisterTemplateFunc("fs:mem", func(config *restreamapp.Config, section string) string { + return a.memfs.Base() + }, nil) + + a.replacer.RegisterTemplateFunc("rtmp", func(config *restreamapp.Config, section string) string { + host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) + if len(host) == 0 { + host = "localhost" + } - host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) - if len(host) == 0 { - host = "localhost" - } + template := "rtmp://" + host + ":" + port + if cfg.RTMP.App != "/" { + template += cfg.RTMP.App + } + template += "/{name}" - template := "rtmp://" + host + ":" + port - if cfg.RTMP.App != "/" { - template += cfg.RTMP.App - } - template += "/{name}" + if len(cfg.RTMP.Token) != 0 { + template += "?token=" + cfg.RTMP.Token + } - if len(cfg.RTMP.Token) != 0 { - template += "?token=" + cfg.RTMP.Token - } + return template + }, nil) - a.replacer.RegisterTemplate("rtmp", template, nil) + a.replacer.RegisterTemplateFunc("srt", func(config *restreamapp.Config, section string) string { + host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) + if len(host) == 0 { + host = "localhost" + } - host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) - if len(host) == 0 { - host = "localhost" - } + template := "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + if len(cfg.SRT.Token) != 0 { + template += ",token:" + cfg.SRT.Token + } + if len(cfg.SRT.Passphrase) != 0 { + template += "&passphrase=" + cfg.SRT.Passphrase + } - template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name},mode:{mode}" - if len(cfg.SRT.Token) != 0 { - template += ",token:" + cfg.SRT.Token - } - if len(cfg.SRT.Passphrase) != 0 { - template += "&passphrase=" + cfg.SRT.Passphrase - } - a.replacer.RegisterTemplate("srt", template, map[string]string{ + return template + }, map[string]string{ "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds }) } diff --git a/restream/app/process.go b/restream/app/process.go index 1d62220b..4ec6036a 100644 --- a/restream/app/process.go +++ b/restream/app/process.go @@ -2,7 +2,6 @@ package app import ( "github.com/datarhei/core/v16/process" - "github.com/datarhei/core/v16/restream/replace" ) type ConfigIOCleanup struct { @@ -80,79 +79,6 @@ func (config *Config) Clone() *Config { return clone } -// ReplacePlaceholders replaces all placeholders in the config. The config -// will be modified in place. -func (config *Config) ResolvePlaceholders(r replace.Replacer) { - for i, option := range config.Options { - // Replace any known placeholders - option = r.Replace(option, "diskfs", "") - - config.Options[i] = option - } - - // Resolving the given inputs - for i, input := range config.Input { - // Replace any known placeholders - input.ID = r.Replace(input.ID, "processid", config.ID) - input.ID = r.Replace(input.ID, "reference", config.Reference) - input.Address = r.Replace(input.Address, "inputid", input.ID) - input.Address = r.Replace(input.Address, "processid", config.ID) - input.Address = r.Replace(input.Address, "reference", config.Reference) - input.Address = r.Replace(input.Address, "diskfs", "") - input.Address = r.Replace(input.Address, "memfs", "") - input.Address = r.Replace(input.Address, "rtmp", "") - input.Address = r.Replace(input.Address, "srt", "") - - for j, option := range input.Options { - // Replace any known placeholders - option = r.Replace(option, "inputid", input.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - input.Options[j] = option - } - - config.Input[i] = input - } - - // Resolving the given outputs - for i, output := range config.Output { - // Replace any known placeholders - output.ID = r.Replace(output.ID, "processid", config.ID) - output.Address = r.Replace(output.Address, "outputid", output.ID) - output.Address = r.Replace(output.Address, "processid", config.ID) - output.Address = r.Replace(output.Address, "reference", config.Reference) - output.Address = r.Replace(output.Address, "diskfs", "") - output.Address = r.Replace(output.Address, "memfs", "") - output.Address = r.Replace(output.Address, "rtmp", "") - output.Address = r.Replace(output.Address, "srt", "") - - for j, option := range output.Options { - // Replace any known placeholders - option = r.Replace(option, "outputid", output.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - output.Options[j] = option - } - - for j, cleanup := range output.Cleanup { - // Replace any known placeholders - cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference) - - output.Cleanup[j] = cleanup - } - - config.Output[i] = output - } -} - // CreateCommand created the FFmpeg command from this config. func (config *Config) CreateCommand() []string { var command []string diff --git a/restream/replace/replace.go b/restream/replace/replace.go index f87757eb..83202ff1 100644 --- a/restream/replace/replace.go +++ b/restream/replace/replace.go @@ -4,8 +4,13 @@ import ( "net/url" "regexp" "strings" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/restream/app" ) +type TemplateFn func(config *app.Config, section string) string + type Replacer interface { // RegisterTemplate registers a template for a specific placeholder. Template // may contain placeholders as well of the form {name}. They will be replaced @@ -15,7 +20,7 @@ type Replacer interface { // RegisterTemplateFunc does the same as RegisterTemplate, but the template // is returned by the template function. - RegisterTemplateFunc(placeholder string, template func() string, defaults map[string]string) + RegisterTemplateFunc(placeholder string, template TemplateFn, defaults map[string]string) // Replace replaces all occurences of placeholder in str with value. The placeholder is of the // form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^ @@ -25,12 +30,13 @@ type Replacer interface { // the value of the corresponding key in the parameters. // If the value is an empty string, the registered templates will be searched for that // placeholder. If no template is found, the placeholder will be replaced by the empty string. - // A placeholder name may consist on of the letters a-z. - Replace(str, placeholder, value string) string + // A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain + // a glob pattern to find the appropriate template. + Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string } type template struct { - fn func() string + fn TemplateFn defaults map[string]string } @@ -45,38 +51,38 @@ type replacer struct { func New() Replacer { r := &replacer{ templates: make(map[string]template), - re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`), - templateRe: regexp.MustCompile(`{([a-z]+)}`), + re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`), + templateRe: regexp.MustCompile(`{([a-z:]+)}`), } return r } func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) { - r.templates[placeholder] = template{ - fn: func() string { return tmpl }, - defaults: defaults, - } + r.RegisterTemplateFunc(placeholder, func(*app.Config, string) string { return tmpl }, defaults) } -func (r *replacer) RegisterTemplateFunc(placeholder string, tmplFn func() string, defaults map[string]string) { +func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateFn, defaults map[string]string) { r.templates[placeholder] = template{ - fn: tmplFn, + fn: templateFn, defaults: defaults, } } -func (r *replacer) Replace(str, placeholder, value string) string { +func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, kind string) string { str = r.re.ReplaceAllStringFunc(str, func(match string) string { matches := r.re.FindStringSubmatch(match) - if matches[1] != placeholder { + + if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok { return match } + placeholder := matches[1] + // We need a copy from the value v := value var tmpl template = template{ - fn: func() string { return v }, + fn: func(*app.Config, string) string { return v }, } // Check for a registered template @@ -87,8 +93,8 @@ func (r *replacer) Replace(str, placeholder, value string) string { } } - v = tmpl.fn() - v = r.compileTemplate(v, matches[3], tmpl.defaults) + v = tmpl.fn(config, kind) + v = r.compileTemplate(v, matches[3], vars, tmpl.defaults) if len(matches[2]) != 0 { // If there's a character to escape, we also have to escape the @@ -113,7 +119,7 @@ func (r *replacer) Replace(str, placeholder, value string) string { // placeholder name and will be replaced with the value. The resulting string is "Hello World!". // If a placeholder name is not present in the params string, it will not be replaced. The key // and values can be escaped as in net/url.QueryEscape. -func (r *replacer) compileTemplate(str, params string, defaults map[string]string) string { +func (r *replacer) compileTemplate(str, params string, vars map[string]string, defaults map[string]string) string { if len(params) == 0 && len(defaults) == 0 { return str } @@ -132,15 +138,22 @@ func (r *replacer) compileTemplate(str, params string, defaults map[string]strin if key == "" { continue } + key, value, _ := strings.Cut(key, "=") key, err := url.QueryUnescape(key) if err != nil { continue } + value, err = url.QueryUnescape(value) if err != nil { continue } + + for name, v := range vars { + value = strings.ReplaceAll(value, "$"+name, v) + } + p[key] = value } diff --git a/restream/replace/replace_test.go b/restream/replace/replace_test.go index f1ebcceb..1d9ccfe0 100644 --- a/restream/replace/replace_test.go +++ b/restream/replace/replace_test.go @@ -3,6 +3,7 @@ package replace import ( "testing" + "github.com/datarhei/core/v16/restream/app" "github.com/stretchr/testify/require" ) @@ -24,25 +25,39 @@ func TestReplace(t *testing.T) { r := New() for _, e := range samples { - replaced := r.Replace(e[0], "foobar", foobar) + replaced := r.Replace(e[0], "foobar", foobar, nil, nil, "") require.Equal(t, e[1], replaced, e[0]) } - replaced := r.Replace("{foobar}", "foobar", "") + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") require.Equal(t, "", replaced) } func TestReplaceTemplate(t *testing.T) { r := New() - r.RegisterTemplate("foobar", "Hello {who}! {what}?", nil) + r.RegisterTemplate("foo:bar", "Hello {who}! {what}?", nil) - replaced := r.Replace("{foobar,who=World}", "foobar", "") + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! {what}?", replaced) - replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "") + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc^2?", replaced) - replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "") + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) +} + +func TestReplaceTemplateFunc(t *testing.T) { + r := New() + r.RegisterTemplateFunc("foo:bar", func(config *app.Config, kind string) string { return "Hello {who}! {what}?" }, nil) + + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! {what}?", replaced) + + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc^2?", replaced) + + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) } @@ -53,10 +68,10 @@ func TestReplaceTemplateDefaults(t *testing.T) { "what": "something", }) - replaced := r.Replace("{foobar}", "foobar", "") + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") require.Equal(t, "Hello someone! something?", replaced) - replaced = r.Replace("{foobar,who=World}", "foobar", "") + replaced = r.Replace("{foobar,who=World}", "foobar", "", nil, nil, "") require.Equal(t, "Hello World! something?", replaced) } @@ -72,7 +87,7 @@ func TestReplaceCompileTemplate(t *testing.T) { r := New().(*replacer) for _, e := range samples { - replaced := r.compileTemplate(e[0], e[1], nil) + replaced := r.compileTemplate(e[0], e[1], nil, nil) require.Equal(t, e[2], replaced, e[0]) } } @@ -89,10 +104,41 @@ func TestReplaceCompileTemplateDefaults(t *testing.T) { r := New().(*replacer) for _, e := range samples { - replaced := r.compileTemplate(e[0], e[1], map[string]string{ + replaced := r.compileTemplate(e[0], e[1], nil, map[string]string{ "who": "someone", "what": "something", }) require.Equal(t, e[2], replaced, e[0]) } } + +func TestReplaceCompileTemplateWithVars(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "who=$processid", "Hello 123456789!"}, + {"Hello {who}! {what}?", "who=$location", "Hello World! {what}?"}, + {"Hello {who}! {what}?", "who=$location,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=$location,what=$processid", "Hello World! 123456789?"}, + {"Hello {who}!", "who=$processidxxx", "Hello 123456789xxx!"}, + } + + vars := map[string]string{ + "processid": "123456789", + "location": "World", + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], vars, nil) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceGlob(t *testing.T) { + r := New() + r.RegisterTemplate("foo:bar", "Hello foobar", nil) + r.RegisterTemplate("foo:baz", "Hello foobaz", nil) + + replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "", nil, nil, "") + require.Equal(t, "Hello foobaz, Hello foobar", replaced) +} diff --git a/restream/restream.go b/restream/restream.go index 4c5f0b28..66f71b1b 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -290,7 +290,7 @@ func (r *restream) load() error { } // Replace all placeholders in the config - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) tasks[id] = t } @@ -463,7 +463,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) { logger: r.logger.WithField("id", process.ID), } - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -1089,7 +1089,7 @@ func (r *restream) reloadProcess(id string) error { t.config = t.process.Config.Clone() - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -1437,3 +1437,94 @@ func (r *restream) GetMetadata(key string) (interface{}, error) { return data, nil } + +// resolvePlaceholders replaces all placeholders in the config. The config +// will be modified in place. +func resolvePlaceholders(config *app.Config, r replace.Replacer) { + vars := map[string]string{ + "processid": config.ID, + "reference": config.Reference, + } + + for i, option := range config.Options { + // Replace any known placeholders + option = r.Replace(option, "diskfs", "", vars, config, "global") + option = r.Replace(option, "fs:*", "", vars, config, "global") + + config.Options[i] = option + } + + // Resolving the given inputs + for i, input := range config.Input { + vars["inputid"] = input.ID + + // Replace any known placeholders + input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input") + input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input") + input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input") + input.Address = r.Replace(input.Address, "diskfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "memfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "fs:*", "", vars, config, "input") + input.Address = r.Replace(input.Address, "rtmp", "", vars, config, "input") + input.Address = r.Replace(input.Address, "srt", "", vars, config, "input") + + for j, option := range input.Options { + // Replace any known placeholders + option = r.Replace(option, "inputid", input.ID, nil, nil, "input") + option = r.Replace(option, "processid", config.ID, nil, nil, "input") + option = r.Replace(option, "reference", config.Reference, nil, nil, "input") + option = r.Replace(option, "diskfs", "", vars, config, "input") + option = r.Replace(option, "memfs", "", vars, config, "input") + option = r.Replace(option, "fs:*", "", vars, config, "input") + + input.Options[j] = option + } + + delete(vars, "inputid") + + config.Input[i] = input + } + + // Resolving the given outputs + for i, output := range config.Output { + vars["outputid"] = output.ID + + // Replace any known placeholders + output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output") + output.Address = r.Replace(output.Address, "diskfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "memfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "fs:*", "", vars, config, "output") + output.Address = r.Replace(output.Address, "rtmp", "", vars, config, "output") + output.Address = r.Replace(output.Address, "srt", "", vars, config, "output") + + for j, option := range output.Options { + // Replace any known placeholders + option = r.Replace(option, "outputid", output.ID, nil, nil, "output") + option = r.Replace(option, "processid", config.ID, nil, nil, "output") + option = r.Replace(option, "reference", config.Reference, nil, nil, "output") + option = r.Replace(option, "diskfs", "", vars, config, "output") + option = r.Replace(option, "memfs", "", vars, config, "output") + option = r.Replace(option, "fs:*", "", vars, config, "output") + + output.Options[j] = option + } + + for j, cleanup := range output.Cleanup { + // Replace any known placeholders + cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference, nil, nil, "output") + + output.Cleanup[j] = cleanup + } + + delete(vars, "outputid") + + config.Output[i] = output + } +} From 505fbff03f924be8ec73f00c75847438ab7eaa48 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 23 Jan 2023 11:42:17 +0100 Subject: [PATCH 22/39] Add tests --- restream/replace/replace.go | 4 +- restream/restream.go | 11 +- restream/restream_test.go | 321 +++++++++++++++++++++++++++++++++--- 3 files changed, 304 insertions(+), 32 deletions(-) diff --git a/restream/replace/replace.go b/restream/replace/replace.go index 83202ff1..e9b45adc 100644 --- a/restream/replace/replace.go +++ b/restream/replace/replace.go @@ -69,7 +69,7 @@ func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateF } } -func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, kind string) string { +func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string { str = r.re.ReplaceAllStringFunc(str, func(match string) string { matches := r.re.FindStringSubmatch(match) @@ -93,7 +93,7 @@ func (r *replacer) Replace(str, placeholder, value string, vars map[string]strin } } - v = tmpl.fn(config, kind) + v = tmpl.fn(config, section) v = r.compileTemplate(v, matches[3], vars, tmpl.defaults) if len(matches[2]) != 0 { diff --git a/restream/restream.go b/restream/restream.go index 66f71b1b..9f7c6ee1 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -1456,11 +1456,12 @@ func resolvePlaceholders(config *app.Config, r replace.Replacer) { // Resolving the given inputs for i, input := range config.Input { - vars["inputid"] = input.ID - // Replace any known placeholders input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input") input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input") + + vars["inputid"] = input.ID + input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input") input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input") input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input") @@ -1489,10 +1490,12 @@ func resolvePlaceholders(config *app.Config, r replace.Replacer) { // Resolving the given outputs for i, output := range config.Output { - vars["outputid"] = output.ID - // Replace any known placeholders output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output") + output.ID = r.Replace(output.ID, "reference", config.Reference, nil, nil, "output") + + vars["outputid"] = output.ID + output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output") output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output") output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output") diff --git a/restream/restream_test.go b/restream/restream_test.go index 18c53bf5..e4b0510d 100644 --- a/restream/restream_test.go +++ b/restream/restream_test.go @@ -9,11 +9,12 @@ import ( "github.com/datarhei/core/v16/internal/testhelper" "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/restream/app" + "github.com/datarhei/core/v16/restream/replace" "github.com/stretchr/testify/require" ) -func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator) (Restreamer, error) { +func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) { binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper") if err != nil { return nil, fmt.Errorf("failed to build helper program: %w", err) @@ -30,7 +31,8 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp } rs, err := New(Config{ - FFmpeg: ffmpeg, + FFmpeg: ffmpeg, + Replace: replacer, }) if err != nil { return nil, err @@ -77,7 +79,7 @@ func getDummyProcess() *app.Config { } func TestAddProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -97,7 +99,7 @@ func TestAddProcess(t *testing.T) { } func TestAutostartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -112,7 +114,7 @@ func TestAutostartProcess(t *testing.T) { } func TestAddInvalidProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) // Invalid process ID @@ -180,7 +182,7 @@ func TestAddInvalidProcess(t *testing.T) { } func TestRemoveProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -195,24 +197,98 @@ func TestRemoveProcess(t *testing.T) { require.NotEqual(t, nil, err, "Unset process found (%s)", process.ID) } +func TestUpdateProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process1 := getDummyProcess() + require.NotNil(t, process1) + process1.ID = "process1" + + process2 := getDummyProcess() + require.NotNil(t, process2) + process2.ID = "process2" + + err = rs.AddProcess(process1) + require.Equal(t, nil, err) + + err = rs.AddProcess(process2) + require.Equal(t, nil, err) + + process3 := getDummyProcess() + require.NotNil(t, process3) + process3.ID = "process2" + + err = rs.UpdateProcess("process1", process3) + require.Error(t, err) + + process3.ID = "process3" + err = rs.UpdateProcess("process1", process3) + require.NoError(t, err) + + _, err = rs.GetProcess(process1.ID) + require.Error(t, err) + + _, err = rs.GetProcess(process3.ID) + require.NoError(t, err) +} + func TestGetProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) - process := getDummyProcess() + process1 := getDummyProcess() + process1.ID = "foo_aaa_1" + process1.Reference = "foo_aaa_1" + process2 := getDummyProcess() + process2.ID = "bar_bbb_2" + process2.Reference = "bar_bbb_2" + process3 := getDummyProcess() + process3.ID = "foo_ccc_3" + process3.Reference = "foo_ccc_3" + process4 := getDummyProcess() + process4.ID = "bar_ddd_4" + process4.Reference = "bar_ddd_4" - rs.AddProcess(process) + rs.AddProcess(process1) + rs.AddProcess(process2) + rs.AddProcess(process3) + rs.AddProcess(process4) - _, err = rs.GetProcess(process.ID) - require.Equal(t, nil, err, "Process not found (%s)", process.ID) + _, err = rs.GetProcess(process1.ID) + require.Equal(t, nil, err) list := rs.GetProcessIDs("", "") - require.Len(t, list, 1, "expected 1 process") - require.Equal(t, process.ID, list[0], "expected same process ID") + require.Len(t, list, 4) + require.ElementsMatch(t, []string{"foo_aaa_1", "bar_bbb_2", "foo_ccc_3", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("foo_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("bar_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("*_bbb_*", "") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) + + list = rs.GetProcessIDs("", "foo_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("", "bar_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("", "*_bbb_*") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) } func TestStartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -238,7 +314,7 @@ func TestStartProcess(t *testing.T) { } func TestStopProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -263,7 +339,7 @@ func TestStopProcess(t *testing.T) { } func TestRestartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -288,7 +364,7 @@ func TestRestartProcess(t *testing.T) { } func TestReloadProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -318,8 +394,8 @@ func TestReloadProcess(t *testing.T) { rs.StopProcess(process.ID) } -func TestProcessData(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) +func TestProcessMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -340,7 +416,7 @@ func TestProcessData(t *testing.T) { } func TestLog(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -373,7 +449,7 @@ func TestLog(t *testing.T) { } func TestPlayoutNoRange(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -396,7 +472,7 @@ func TestPlayoutRange(t *testing.T) { portrange, err := net.NewPortrange(3000, 3001) require.NoError(t, err) - rs, err := getDummyRestreamer(portrange, nil, nil) + rs, err := getDummyRestreamer(portrange, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -417,7 +493,7 @@ func TestPlayoutRange(t *testing.T) { } func TestAddressReference(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process1 := getDummyProcess() @@ -449,7 +525,7 @@ func TestAddressReference(t *testing.T) { } func TestConfigValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -496,7 +572,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil) require.NoError(t, err) - rsi, err := getDummyRestreamer(nil, valIn, valOut) + rsi, err := getDummyRestreamer(nil, valIn, valOut, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -522,7 +598,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { } func TestOutputAddressValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -561,3 +637,196 @@ func TestOutputAddressValidation(t *testing.T) { require.Equal(t, r.path, path) } } + +func TestMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + data, _ := rs.GetMetadata("foobar") + require.Equal(t, nil, data, "nothing should be stored under the key") + + rs.SetMetadata("foobar", process) + + data, _ = rs.GetMetadata("foobar") + require.NotEqual(t, nil, data, "there should be something stored under the key") + + p := data.(*app.Config) + + require.Equal(t, process.ID, p.ID, "failed to retrieve stored data") +} + +func TestReplacer(t *testing.T) { + replacer := replace.New() + + replacer.RegisterTemplateFunc("diskfs", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:disk", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("memfs", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:mem", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("rtmp", func(config *app.Config, section string) string { + return "rtmp://localhost/app/{name}?token=foobar" + }, nil) + + replacer.RegisterTemplateFunc("srt", func(config *app.Config, section string) string { + template := "srt://localhost:6000?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + template += ",token:abcfoobar&passphrase=secret" + + return template + }, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) + + rsi, err := getDummyRestreamer(nil, nil, nil, replacer) + require.NoError(t, err) + + process := &app.Config{ + ID: "314159265359", + Reference: "refref", + Input: []app.ConfigIO{ + { + ID: "in_{processid}_{reference}", + Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:{inputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$inputid.txt", + }, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_{processid}_{reference}", + Address: "output:{outputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=$processid}_srt:{srt,name=$reference,latency=42}_rtmp:{rtmp,name=$outputid}", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:{outputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_{outputid}_{processid}_{reference}_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "{diskfs}/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "{fs:disk}/foobar_on_disk_aswell.txt", + "{fs:mem}/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + err = rsi.AddProcess(process) + require.NoError(t, err) + + rs := rsi.(*restream) + + process = &app.Config{ + ID: "314159265359", + Reference: "refref", + FFVersion: "^4.0.2", + Input: []app.ConfigIO{ + { + ID: "in_314159265359_refref", + Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:in_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$inputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{}, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_314159265359_refref", + Address: "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:out_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_out_314159265359_refref_314159265359_refref_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "/mnt/diskfs/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "/mnt/diskfs/foobar_on_disk_aswell.txt", + "http://localhost/mnt/memfs/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + require.Equal(t, process, rs.tasks["314159265359"].config) +} From b2cd8f713325d95f356873c955bf4b7b17c99d03 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Mon, 23 Jan 2023 17:09:55 +0100 Subject: [PATCH 23/39] Allow probe with individual timeout --- restream/restream.go | 55 +++++++++++++++++++++------------------ restream/restream_test.go | 13 +++++++++ 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/restream/restream.go b/restream/restream.go index 9f7c6ee1..f654cbf5 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -30,30 +30,31 @@ import ( // The Restreamer interface type Restreamer interface { - ID() string // ID of this instance - Name() string // Arbitrary name of this instance - CreatedAt() time.Time // Time of when this instance has been created - Start() // Start all processes that have a "start" order - Stop() // Stop all running process but keep their "start" order - AddProcess(config *app.Config) error // Add a new process - GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference - DeleteProcess(id string) error // Delete a process - UpdateProcess(id string, config *app.Config) error // Update a process - StartProcess(id string) error // Start a process - StopProcess(id string) error // Stop a process - RestartProcess(id string) error // Restart a process - ReloadProcess(id string) error // Reload a process - GetProcess(id string) (*app.Process, error) // Get a process - GetProcessState(id string) (*app.State, error) // Get the state of a process - GetProcessLog(id string) (*app.Log, error) // Get the logs of a process - GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process - Probe(id string) app.Probe // Probe a process - Skills() skills.Skills // Get the ffmpeg skills - ReloadSkills() error // Reload the ffmpeg skills - SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process - GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process - SetMetadata(key string, data interface{}) error // Set general metadata - GetMetadata(key string) (interface{}, error) // Get previously set general metadata + ID() string // ID of this instance + Name() string // Arbitrary name of this instance + CreatedAt() time.Time // Time of when this instance has been created + Start() // Start all processes that have a "start" order + Stop() // Stop all running process but keep their "start" order + AddProcess(config *app.Config) error // Add a new process + GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference + DeleteProcess(id string) error // Delete a process + UpdateProcess(id string, config *app.Config) error // Update a process + StartProcess(id string) error // Start a process + StopProcess(id string) error // Stop a process + RestartProcess(id string) error // Restart a process + ReloadProcess(id string) error // Reload a process + GetProcess(id string) (*app.Process, error) // Get a process + GetProcessState(id string) (*app.State, error) // Get the state of a process + GetProcessLog(id string) (*app.Log, error) // Get the logs of a process + GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process + Probe(id string) app.Probe // Probe a process + ProbeWithTimeout(id string, timeout time.Duration) app.Probe // Probe a process with specific timeout + Skills() skills.Skills // Get the ffmpeg skills + ReloadSkills() error // Reload the ffmpeg skills + SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process + GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process + SetMetadata(key string, data interface{}) error // Set general metadata + GetMetadata(key string) (interface{}, error) // Get previously set general metadata } // Config is the required configuration for a new restreamer instance. @@ -1251,6 +1252,10 @@ func (r *restream) GetProcessLog(id string) (*app.Log, error) { } func (r *restream) Probe(id string) app.Probe { + return r.ProbeWithTimeout(id, 20*time.Second) +} + +func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe { r.lock.RLock() appprobe := app.Probe{} @@ -1288,7 +1293,7 @@ func (r *restream) Probe(id string) app.Probe { ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{ Reconnect: false, ReconnectDelay: 0, - StaleTimeout: 20 * time.Second, + StaleTimeout: timeout, Command: command, Parser: prober, Logger: task.logger, diff --git a/restream/restream_test.go b/restream/restream_test.go index e4b0510d..11b08240 100644 --- a/restream/restream_test.go +++ b/restream/restream_test.go @@ -394,6 +394,19 @@ func TestReloadProcess(t *testing.T) { rs.StopProcess(process.ID) } +func TestProbeProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + rs.AddProcess(process) + + probe := rs.ProbeWithTimeout(process.ID, 5*time.Second) + + require.Equal(t, 3, len(probe.Streams)) +} + func TestProcessMetadata(t *testing.T) { rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) From 59b7978470eee407528357b15e08f4e2fd8e5240 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 24 Jan 2023 11:22:31 +0100 Subject: [PATCH 24/39] Add tests --- config/value/network_test.go | 127 +++++++++++++++++++++++++++ config/value/os_test.go | 27 ++++++ config/value/primitives_test.go | 147 ++++++++++++++++++++++++++++++++ config/value/time_test.go | 30 +++++++ config/value/value_test.go | 20 ----- 5 files changed, 331 insertions(+), 20 deletions(-) create mode 100644 config/value/network_test.go create mode 100644 config/value/os_test.go create mode 100644 config/value/primitives_test.go create mode 100644 config/value/time_test.go diff --git a/config/value/network_test.go b/config/value/network_test.go new file mode 100644 index 00000000..add7190a --- /dev/null +++ b/config/value/network_test.go @@ -0,0 +1,127 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAddressValue(t *testing.T) { + var x string + + val := NewAddress(&x, ":8080") + + require.Equal(t, ":8080", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz:9090" + + require.Equal(t, "foobaz:9090", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz:7070") + + require.Equal(t, "fooboz:7070", x) +} + +func TestCIDRListValue(t *testing.T) { + var x []string + + val := NewCIDRList(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"127.0.0.1/32", "127.0.0.2/32"} + + require.Equal(t, "127.0.0.1/32 127.0.0.2/32", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("129.0.0.1/32 129.0.0.2/32") + + require.Equal(t, []string{"129.0.0.1/32", "129.0.0.2/32"}, x) +} + +func TestCORSOriginaValue(t *testing.T) { + var x []string + + val := NewCORSOrigins(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"*"} + + require.Equal(t, "*", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost") + + require.Equal(t, []string{"http://localhost"}, x) +} + +func TestPortValue(t *testing.T) { + var x int + + val := NewPort(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestURLValue(t *testing.T) { + var x string + + val := NewURL(&x, "http://localhost/foobar") + + require.Equal(t, "http://localhost/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "http://localhost:8080/foobar" + + require.Equal(t, "http://localhost:8080/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost:8080/fooboz/foobaz") + + require.Equal(t, "http://localhost:8080/fooboz/foobaz", x) +} + +func TestEmailValue(t *testing.T) { + var x string + + val := NewEmail(&x, "foobar@example.com") + + require.Equal(t, "foobar@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobar+baz@example.com" + + require.Equal(t, "foobar+baz@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("foobar@sub.example.com") + + require.Equal(t, "foobar@sub.example.com", x) +} diff --git a/config/value/os_test.go b/config/value/os_test.go new file mode 100644 index 00000000..dd01317a --- /dev/null +++ b/config/value/os_test.go @@ -0,0 +1,27 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAbsolutePathValue(t *testing.T) { + var x string + + val := NewAbsolutePath(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "/foobaz" + + require.Equal(t, "/foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("/fooboz") + + require.Equal(t, "/fooboz", x) +} diff --git a/config/value/primitives_test.go b/config/value/primitives_test.go new file mode 100644 index 00000000..4b815b90 --- /dev/null +++ b/config/value/primitives_test.go @@ -0,0 +1,147 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStringValue(t *testing.T) { + var x string + + val := NewString(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz" + + require.Equal(t, "foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz") + + require.Equal(t, "fooboz", x) +} + +func TestStringListValue(t *testing.T) { + var x []string + + val := NewStringList(&x, []string{"foobar"}, " ") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = []string{"foobar", "foobaz"} + + require.Equal(t, "foobar foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz foobar") + + require.Equal(t, []string{"fooboz", "foobar"}, x) +} + +func TestStringMapStringValue(t *testing.T) { + var x map[string]string + + val := NewStringMapString(&x, map[string]string{"a": "foobar"}) + + require.Equal(t, "a:foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = map[string]string{"a": "foobar", "b": "foobaz"} + + require.Equal(t, "a:foobar b:foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("x:fooboz y:foobar") + + require.Equal(t, map[string]string{"x": "fooboz", "y": "foobar"}, x) +} + +func TestBoolValue(t *testing.T) { + var x bool + + val := NewBool(&x, false) + + require.Equal(t, "false", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = true + + require.Equal(t, "true", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("false") + + require.Equal(t, false, x) +} + +func TestIntValue(t *testing.T) { + var x int + + val := NewInt(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestInt64Value(t *testing.T) { + var x int64 + + val := NewInt64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int64(77), x) +} + +func TestUint64Value(t *testing.T) { + var x uint64 + + val := NewUint64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, uint64(77), x) +} diff --git a/config/value/time_test.go b/config/value/time_test.go new file mode 100644 index 00000000..3259d7d2 --- /dev/null +++ b/config/value/time_test.go @@ -0,0 +1,30 @@ +package value + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestTimeValue(t *testing.T) { + var x time.Time + + tm := time.Unix(1257894000, 0).UTC() + + val := NewTime(&x, tm) + + require.Equal(t, "2009-11-10T23:00:00Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = time.Unix(1257894001, 0).UTC() + + require.Equal(t, "2009-11-10T23:00:01Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("2009-11-11T23:00:00Z") + + require.Equal(t, time.Time(time.Date(2009, time.November, 11, 23, 0, 0, 0, time.UTC)), x) +} diff --git a/config/value/value_test.go b/config/value/value_test.go index aeb707be..3f36b17f 100644 --- a/config/value/value_test.go +++ b/config/value/value_test.go @@ -6,26 +6,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestIntValue(t *testing.T) { - var i int - - ivar := NewInt(&i, 11) - - require.Equal(t, "11", ivar.String()) - require.Equal(t, nil, ivar.Validate()) - require.Equal(t, false, ivar.IsEmpty()) - - i = 42 - - require.Equal(t, "42", ivar.String()) - require.Equal(t, nil, ivar.Validate()) - require.Equal(t, false, ivar.IsEmpty()) - - ivar.Set("77") - - require.Equal(t, int(77), i) -} - type testdata struct { value1 int value2 int From ad3538d224c6a37a9625c15c65c3a90242ef47d4 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 24 Jan 2023 13:40:36 +0100 Subject: [PATCH 25/39] Add tests --- config/vars/vars_test.go | 208 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) diff --git a/config/vars/vars_test.go b/config/vars/vars_test.go index 38e51fb4..c41dd77a 100644 --- a/config/vars/vars_test.go +++ b/config/vars/vars_test.go @@ -1,6 +1,7 @@ package vars import ( + "os" "testing" "github.com/datarhei/core/v16/config/value" @@ -38,3 +39,210 @@ func TestVars(t *testing.T) { x, _ = v1.Get("string") require.Equal(t, "foobar", x) } + +func TestSetDefault(t *testing.T) { + v := Variables{} + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + require.Equal(t, "foobar", s) + + v.Set("string", "foobaz") + + require.Equal(t, "foobaz", s) + + v.SetDefault("strong") + + require.Equal(t, "foobaz", s) + + v.SetDefault("string") + + require.Equal(t, "foobar", s) +} + +func TestGet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + value, err := v.Get("string") + require.NoError(t, err) + require.Equal(t, "foobar", value) + + value, err = v.Get("strong") + require.Error(t, err) + require.Equal(t, "", value) +} + +func TestSet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + err := v.Set("string", "foobaz") + require.NoError(t, err) + require.Equal(t, "foobaz", s) + + err = v.Set("strong", "fooboz") + require.Error(t, err) + require.Equal(t, "foobaz", s) +} + +func TestLog(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + v.Log("info", "strong", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "foobar", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) + + v.ResetLogs() + + require.Equal(t, 0, len(v.logs)) +} + +func TestMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 0, len(v.logs)) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestMergeAlt(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRUNG", []string{"CORE_TEST_STRING"}, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 1, len(v.logs)) + + require.Contains(t, v.logs[0].message, "CORE_TEST_STRUNG") + require.Equal(t, "warn", v.logs[0].level) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestNoMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRONG", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobar") + require.Equal(t, false, v.IsMerged("string")) + + os.Unsetenv("CORE_TEST_STRONG") +} + +func TestValidate(t *testing.T) { + v := Variables{} + + s1 := "" + s2 := "" + + v.Register(value.NewString(&s1, ""), "string", "", nil, "a string", false, false) + v.Register(value.NewString(&s2, ""), "string", "", nil, "a string", true, false) + + require.Equal(t, s1, "") + require.Equal(t, s2, "") + + require.Equal(t, false, v.HasErrors()) + + v.Validate() + + require.Equal(t, true, v.HasErrors()) + + ninfo := 0 + nerror := 0 + v.Messages(func(level string, v Variable, message string) { + if level == "info" { + ninfo++ + } else if level == "error" { + nerror++ + } + }) + + require.Equal(t, 2, ninfo) + require.Equal(t, 1, nerror) +} + +func TestOverrides(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + v.Merge() + + overrides := v.Overrides() + + require.ElementsMatch(t, []string{"string"}, overrides) +} + +func TestDisquise(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, true) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "***", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) +} From a8e86a71118a3f9795dfca35c98d66025e3a2ef6 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 24 Jan 2023 16:08:11 +0100 Subject: [PATCH 26/39] Add tests --- monitor/metric/metric.go | 32 +++++-- monitor/metric/metric_test.go | 151 +++++++++++++++++++++++++++++++--- 2 files changed, 167 insertions(+), 16 deletions(-) diff --git a/monitor/metric/metric.go b/monitor/metric/metric.go index a327c6d0..f2e88e42 100644 --- a/monitor/metric/metric.go +++ b/monitor/metric/metric.go @@ -12,7 +12,7 @@ type Pattern interface { Name() string // Match returns whether a map of labels with its label values - // match this pattern. + // match this pattern. All labels have to be present and need to match. Match(labels map[string]string) bool // IsValid returns whether the pattern is valid. @@ -26,7 +26,7 @@ type pattern struct { } // NewPattern creates a new pattern with the given prefix and group name. There -// has to be an even number of parameter, which is ("label", "labelvalue", "label", +// has to be an even number of labels, which is ("label", "labelvalue", "label", // "labelvalue" ...). The label value will be interpreted as regular expression. func NewPattern(name string, labels ...string) Pattern { p := &pattern{ @@ -38,7 +38,6 @@ func NewPattern(name string, labels ...string) Pattern { for i := 0; i < len(labels); i += 2 { exp, err := regexp.Compile(labels[i+1]) if err != nil { - fmt.Printf("error: %s\n", err) continue } @@ -84,19 +83,35 @@ func (p *pattern) IsValid() bool { return p.valid } +// Metrics is a collection of values type Metrics interface { + // Value returns the first value that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Value(name string, labels ...string) Value + + // Values returns all values that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Values(name string, labels ...string) []Value + + // Labels return a list of all values for a label. Labels(name string, label string) []string + + // All returns all values currently stored in the collection. All() []Value + + // Add adds a value to the collection. Add(v Value) + + // String return a string representation of all collected values. String() string } +// metrics is an implementation of the Metrics interface. type metrics struct { values []Value } +// NewMetrics returns a new metrics instance. func NewMetrics() *metrics { return &metrics{} } @@ -231,8 +246,15 @@ func (v *value) Hash() string { func (v *value) String() string { s := fmt.Sprintf("%s: %f {", v.name, v.value) - for k, v := range v.labels { - s += k + "=" + v + " " + keys := []string{} + for k := range v.labels { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + s += k + "=" + v.labels[k] + " " } s += "}" diff --git a/monitor/metric/metric_test.go b/monitor/metric/metric_test.go index 743739a7..615ce7cb 100644 --- a/monitor/metric/metric_test.go +++ b/monitor/metric/metric_test.go @@ -2,25 +2,154 @@ package metric import ( "testing" + + "github.com/stretchr/testify/require" ) +func TestPattern(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) + + p = NewPattern("bla", "label1", "value1", "label2", "valu(e2") + require.Equal(t, false, p.IsValid()) + + p = NewPattern("bla") + require.Equal(t, true, p.IsValid()) + require.Equal(t, "bla", p.Name()) + + p = NewPattern("bla", "label1", "value1", "label2", "value2") + require.Equal(t, true, p.IsValid()) +} + +func TestPatternMatch(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + + p0 := NewPattern("bla") + require.Equal(t, true, p0.IsValid()) + require.Equal(t, true, p0.Match(map[string]string{})) + require.Equal(t, true, p0.Match(map[string]string{"labelX": "foobar"})) + + p = NewPattern("bla", "label1", "value.", "label2", "val?ue2") + require.Equal(t, true, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{})) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value1", "label2": "value2"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value5", "label2": "vaue2"})) +} + func TestValue(t *testing.T) { - d := NewDesc("group", "", []string{"name"}) + d := NewDesc("group", "", []string{"label1", "label2"}) v := NewValue(d, 42, "foobar") + require.Nil(t, v) + + v = NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v) + require.Equal(t, float64(42), v.Val()) + + require.Equal(t, "", v.L("labelX")) + require.Equal(t, "foobar", v.L("label1")) + require.Equal(t, "foobaz", v.L("label2")) + require.Equal(t, "group", v.Name()) + require.Equal(t, "group:label1=foobar label2=foobaz ", v.Hash()) + require.Equal(t, "group: 42.000000 {label1=foobar label2=foobaz }", v.String()) - if v.L("name") != "foobar" { - t.Fatalf("label name doesn't have the expected value") - } + require.Equal(t, map[string]string{"label1": "foobar", "label2": "foobaz"}, v.Labels()) +} + +func TestValuePattern(t *testing.T) { + d := NewDesc("group", "", []string{"label1", "label2"}) + v := NewValue(d, 42, "foobar", "foobaz") p1 := NewPattern("group") + p2 := NewPattern("group", "label1", "foobar") + p3 := NewPattern("group", "label2", "foobaz") + p4 := NewPattern("group", "label2", "foobaz", "label1", "foobar") + + require.Equal(t, true, v.Match(nil)) + require.Equal(t, true, v.Match([]Pattern{p1})) + require.Equal(t, true, v.Match([]Pattern{p2})) + require.Equal(t, true, v.Match([]Pattern{p3})) + require.Equal(t, true, v.Match([]Pattern{p4})) + require.Equal(t, true, v.Match([]Pattern{p1, p2, p3, p4})) + + p5 := NewPattern("group", "label1", "foobaz") + + require.Equal(t, false, v.Match([]Pattern{p5})) + + require.Equal(t, true, v.Match([]Pattern{p4, p5})) + require.Equal(t, true, v.Match([]Pattern{p5, p4})) +} + +func TestDescription(t *testing.T) { + d := NewDesc("name", "blabla", []string{"label"}) + + require.Equal(t, "name", d.Name()) + require.Equal(t, "blabla", d.Description()) + require.ElementsMatch(t, []string{"label"}, d.Labels()) + require.Equal(t, "name: blabla (label)", d.String()) +} + +func TestMetri(t *testing.T) { + m := NewMetrics() + + require.Equal(t, "", m.String()) + require.Equal(t, 0, len(m.All())) + + d := NewDesc("group", "", []string{"label1", "label2"}) + v1 := NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v1) + + m.Add(v1) + + require.Equal(t, v1.String(), m.String()) + require.Equal(t, 1, len(m.All())) + + l := m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz"}, l) + + v2 := NewValue(d, 77, "barfoo", "bazfoo") + + m.Add(v2) + + require.Equal(t, v1.String()+v2.String(), m.String()) + require.Equal(t, 2, len(m.All())) + + l = m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz", "bazfoo"}, l) + + v := m.Value("bla", "label1", "foo*") + + require.Equal(t, nullValue, v) + + v = m.Value("group") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label1", "foo*") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label2", "baz") + + require.NotEqual(t, nullValue, v) + + vs := m.Values("group") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label1", "foo*") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label2", "*baz*") - if v.Match([]Pattern{p1}) == false { - t.Fatalf("pattern p1 should have matched") - } + require.NotEqual(t, 2, len(vs)) - p2 := NewPattern("group", "name", "foobar") + vs = m.Values("group", "label1") - if v.Match([]Pattern{p2}) == false { - t.Fatalf("pattern p2 should have matched") - } + require.Equal(t, 0, len(vs)) } From 8c0f2ebabc640e63f3e34fa71fd5edabeab9dc6b Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 24 Jan 2023 16:45:28 +0100 Subject: [PATCH 27/39] Add tests --- net/ip_test.go | 19 +++++++++++---- net/iplimit_test.go | 32 +++++++++++++++----------- net/port_test.go | 56 +++++++++++++++++++++++++++++++-------------- net/url/url_test.go | 32 +++++++++++++++++++++++++- 4 files changed, 103 insertions(+), 36 deletions(-) diff --git a/net/ip_test.go b/net/ip_test.go index eaca6bc3..bd9bd575 100644 --- a/net/ip_test.go +++ b/net/ip_test.go @@ -3,18 +3,27 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAnonymizeIPString(t *testing.T) { + _, err := AnonymizeIPString("127.987.475.21") + require.Error(t, err) + + _, err = AnonymizeIPString("bbd1:xxxx") + require.Error(t, err) + + _, err = AnonymizeIPString("hello-world") + require.Error(t, err) + ipv4 := "192.168.1.42" ipv6 := "bbd1:e95a:adbb:b29a:e38b:577f:6f9a:1fa7" anonymizedIPv4, err := AnonymizeIPString(ipv4) - assert.Nil(t, err) - assert.Equal(t, "192.168.1.0", anonymizedIPv4) + require.NoError(t, err) + require.Equal(t, "192.168.1.0", anonymizedIPv4) anonymizedIPv6, err := AnonymizeIPString(ipv6) - assert.Nil(t, err) - assert.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) + require.NoError(t, err) + require.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) } diff --git a/net/iplimit_test.go b/net/iplimit_test.go index 7016cc12..3e6a2d1c 100644 --- a/net/iplimit_test.go +++ b/net/iplimit_test.go @@ -3,57 +3,63 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIPLimiterNew(t *testing.T) { var err error _, err = NewIPLimiter([]string{}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{"::1/128", "127.0.0.1/32", ""}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{}, []string{"::1/128", "127.0.0.1/32", ""}) - assert.Nil(t, err) + require.Nil(t, err) } func TestIPLimiterError(t *testing.T) { var err error _, err = NewIPLimiter([]string{}, []string{}) - assert.Nil(t, err) + require.Nil(t, err) _, err = NewIPLimiter([]string{"::1"}, []string{}) - assert.NotNil(t, err, "Should not accept invalid IP") + require.NotNil(t, err, "Should not accept invalid IP") _, err = NewIPLimiter([]string{}, []string{"::1"}) - assert.NotNil(t, err, "Should not accept invalid IP") + require.NotNil(t, err, "Should not accept invalid IP") } func TestIPLimiterInvalidIPs(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{}) - assert.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed") + require.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed") } func TestIPLimiterNoIPs(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{}) - assert.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed") + require.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed") } func TestIPLimiterAllowlist(t *testing.T) { limiter, _ := NewIPLimiter([]string{}, []string{"::1/128"}) - assert.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed") - assert.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed") + require.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed") + require.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed") } func TestIPLimiterBlocklist(t *testing.T) { limiter, _ := NewIPLimiter([]string{"::1/128"}, []string{}) - assert.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed") - assert.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed") + require.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed") + require.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed") +} + +func TestNullIPLimiter(t *testing.T) { + limiter := NewNullIPLimiter() + + require.True(t, limiter.IsAllowed("foobar")) } diff --git a/net/port_test.go b/net/port_test.go index 019afcf0..dec2d5b9 100644 --- a/net/port_test.go +++ b/net/port_test.go @@ -3,19 +3,30 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewPortrange(t *testing.T) { _, err := NewPortrange(1000, 1999) - assert.Nil(t, err, "Valid port range not accepted: %s", err) + require.Nil(t, err, "Valid port range not accepted: %s", err) } func TestInvalidPortrange(t *testing.T) { _, err := NewPortrange(1999, 1000) - assert.NotNil(t, err, "Invalid port range accepted") + require.NotNil(t, err, "Invalid port range accepted") +} + +func TestOutOfRangePortrange(t *testing.T) { + p, err := NewPortrange(-1, 70000) + + require.NoError(t, err) + + portrange := p.(*portrange) + + require.Equal(t, 1, portrange.min) + require.Equal(t, 65535, len(portrange.ports)) } func TestGetPort(t *testing.T) { @@ -23,26 +34,26 @@ func TestGetPort(t *testing.T) { port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestGetPutPort(t *testing.T) { portrange, _ := NewPortrange(1000, 1999) port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1001, port) + require.Nil(t, err) + require.Equal(t, 1001, port) portrange.Put(1000) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestPortUnavailable(t *testing.T) { @@ -50,12 +61,12 @@ func TestPortUnavailable(t *testing.T) { for i := 0; i < 1000; i++ { port, _ := portrange.Get() - assert.Equal(t, 1000+i, port, "at index %d", i) + require.Equal(t, 1000+i, port, "at index %d", i) } port, err := portrange.Get() - assert.NotNil(t, err) - assert.Less(t, port, 0) + require.NotNil(t, err) + require.Less(t, port, 0) } func TestPutPort(t *testing.T) { @@ -73,16 +84,27 @@ func TestClampRange(t *testing.T) { port, _ := portrange.Get() - assert.Equal(t, 65000, port) + require.Equal(t, 65000, port) portrange.Put(65000) for i := 65000; i <= 65535; i++ { port, _ := portrange.Get() - assert.Equal(t, i, port, "at index %d", i) + require.Equal(t, i, port, "at index %d", i) } port, _ = portrange.Get() - assert.Less(t, port, 0) + require.Less(t, port, 0) +} + +func TestDummyPortranger(t *testing.T) { + portrange := NewDummyPortrange() + + port, err := portrange.Get() + + require.Error(t, err) + require.Equal(t, 0, port) + + portrange.Put(42) } diff --git a/net/url/url_test.go b/net/url/url_test.go index 977a5123..460663e7 100644 --- a/net/url/url_test.go +++ b/net/url/url_test.go @@ -7,9 +7,20 @@ import ( ) func TestLookup(t *testing.T) { - _, err := Lookup("https://www.google.com") + ip, err := Lookup("/localhost:8080/foobar") require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("http://") + + require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("https://www.google.com") + + require.NoError(t, err) + require.NotEmpty(t, ip) } func TestLocalhost(t *testing.T) { @@ -18,3 +29,22 @@ func TestLocalhost(t *testing.T) { require.NoError(t, err) require.Subset(t, []string{"127.0.0.1", "::1"}, []string{ip}) } + +func TestValidate(t *testing.T) { + err := Validate("http://localhost/foobar") + require.NoError(t, err) + + err = Validate("foobar") + require.NoError(t, err) +} + +func TestScheme(t *testing.T) { + r := HasScheme("http://localhost/foobar") + require.True(t, r) + + r = HasScheme("iueriherfd://localhost/foobar") + require.True(t, r) + + r = HasScheme("//localhost/foobar") + require.False(t, r) +} From c05e16b6a0f8088a6701cc03f1d0122e66209134 Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 24 Jan 2023 21:04:24 +0100 Subject: [PATCH 28/39] Add tests --- log/log.go | 9 --- log/log_test.go | 82 ++++++++++++-------- log/writer_test.go | 181 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 233 insertions(+), 39 deletions(-) create mode 100644 log/writer_test.go diff --git a/log/log.go b/log/log.go index be226028..14a78e2c 100644 --- a/log/log.go +++ b/log/log.go @@ -103,7 +103,6 @@ type Logger interface { type logger struct { output Writer component string - topics map[string]struct{} } // New returns an implementation of the Logger interface. @@ -121,14 +120,6 @@ func (l *logger) clone() *logger { component: l.component, } - if len(l.topics) != 0 { - clone.topics = make(map[string]struct{}) - - for topic := range l.topics { - clone.topics[topic] = struct{}{} - } - } - return clone } diff --git a/log/log_test.go b/log/log_test.go index 1a04a1f0..3ed0910c 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -5,15 +5,15 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoglevelNames(t *testing.T) { - assert.Equal(t, "DEBUG", Ldebug.String()) - assert.Equal(t, "ERROR", Lerror.String()) - assert.Equal(t, "WARN", Lwarn.String()) - assert.Equal(t, "INFO", Linfo.String()) - assert.Equal(t, `SILENT`, Lsilent.String()) + require.Equal(t, "DEBUG", Ldebug.String()) + require.Equal(t, "ERROR", Lerror.String()) + require.Equal(t, "WARN", Lwarn.String()) + require.Equal(t, "INFO", Linfo.String()) + require.Equal(t, `SILENT`, Lsilent.String()) } func TestLogColorToNotTTY(t *testing.T) { @@ -23,7 +23,7 @@ func TestLogColorToNotTTY(t *testing.T) { w := NewConsoleWriter(writer, Linfo, true).(*syncWriter) formatter := w.writer.(*consoleWriter).formatter.(*consoleFormatter) - assert.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") + require.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") } func TestLogContext(t *testing.T) { @@ -53,7 +53,7 @@ func TestLogContext(t *testing.T) { lenWithoutCtx := buffer.Len() buffer.Reset() - assert.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") + require.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") } func TestLogClone(t *testing.T) { @@ -65,7 +65,7 @@ func TestLogClone(t *testing.T) { logger.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="test"`) + require.Contains(t, buffer.String(), `component="test"`) buffer.Reset() @@ -74,7 +74,7 @@ func TestLogClone(t *testing.T) { logger2.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="tset"`) + require.Contains(t, buffer.String(), `component="tset"`) } func TestLogSilent(t *testing.T) { @@ -85,22 +85,22 @@ func TestLogSilent(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() } @@ -112,22 +112,22 @@ func TestLogDebug(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -139,22 +139,22 @@ func TestLogInfo(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -166,22 +166,22 @@ func TestLogWarn(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -193,21 +193,43 @@ func TestLogError(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } + +func TestLogWithField(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + logger := New("test").WithOutput(bufwriter) + logger = logger.WithField("foo", "bar") + logger.Info().Log("hello") + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Empty(t, events[0].err) + require.Equal(t, "bar", events[0].Data["foo"]) + + logger = logger.WithField("func", func() bool { return true }) + logger.Info().Log("hello") + + events = bufwriter.Events() + require.Equal(t, 2, len(events)) + require.NotEmpty(t, events[1].err) + require.Equal(t, "bar", events[0].Data["foo"]) +} diff --git a/log/writer_test.go b/log/writer_test.go new file mode 100644 index 00000000..7951cf29 --- /dev/null +++ b/log/writer_test.go @@ -0,0 +1,181 @@ +package log + +import ( + "bytes" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestJSONWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewJSONWriter(&buffer, Linfo) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `{"Time":"2009-11-10T23:00:00Z","Level":"INFO","Component":"test","Caller":"me","Message":"hello world","Data":{"caller":"me","component":"test","foo":"bar","message":"hello world","ts":"2009-11-10T23:00:00Z"}}`, buffer.String()) +} + +func TestConsoleWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewConsoleWriter(&buffer, Linfo, false) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `ts=2009-11-10T23:00:00Z level=INFO component="test" msg="hello world" foo="bar"`+"\n", buffer.String()) +} + +func TestTopicWriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + writer1 := NewTopicWriter(bufwriter, []string{}) + writer2 := NewTopicWriter(bufwriter, []string{"foobar"}) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter.Events())) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 3, len(bufwriter.Events())) +} + +func TestMultiwriter(t *testing.T) { + bufwriter1 := NewBufferWriter(Linfo, 10) + bufwriter2 := NewBufferWriter(Linfo, 10) + + writer := NewMultiWriter(bufwriter1, bufwriter2) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter1.Events())) + require.Equal(t, 1, len(bufwriter2.Events())) +} + +func TestLevelRewriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + rule := LevelRewriteRule{ + Level: Lwarn, + Component: "foobar", + Match: map[string]string{ + "foo": "bar", + }, + } + + writer := NewLevelRewriter(bufwriter, []LevelRewriteRule{rule}) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Equal(t, Lwarn, events[0].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"bar": "foo"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 2, len(events)) + require.Equal(t, Linfo, events[1].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 3, len(events)) + require.Equal(t, Linfo, events[2].Level) +} From f519acfd719fa24d3fdc87cb1a4a8deb4b657fbd Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Tue, 31 Jan 2023 14:45:58 +0100 Subject: [PATCH 29/39] Add S3 storage support --- README.md | 21 +- app/api/api.go | 175 +- config/config.go | 7 +- config/data.go | 3 + config/value/s3.go | 179 + docs/docs.go | 484 +- docs/swagger.json | 484 +- docs/swagger.yaml | 319 +- glob/glob.go | 3 + go.mod | 13 +- go.sum | 22 + http/api/fs.go | 7 + http/fs/fs.go | 25 + http/handler/api/diskfs.go | 215 - http/handler/api/filesystems.go | 146 + http/handler/api/memfs.go | 177 - http/handler/diskfs.go | 88 - http/handler/filesystem.go | 164 + http/handler/memfs.go | 130 - http/server.go | 235 +- io/fs/disk.go | 22 +- io/fs/dummy.go | 14 +- io/fs/fs.go | 8 +- io/fs/mem.go | 18 +- io/fs/s3.go | 389 + restream/fs/fs.go | 5 + restream/restream.go | 167 +- .../github.com/dustin/go-humanize/.travis.yml | 21 + vendor/github.com/dustin/go-humanize/LICENSE | 21 + .../dustin/go-humanize/README.markdown | 124 + vendor/github.com/dustin/go-humanize/big.go | 31 + .../github.com/dustin/go-humanize/bigbytes.go | 173 + vendor/github.com/dustin/go-humanize/bytes.go | 143 + vendor/github.com/dustin/go-humanize/comma.go | 116 + .../github.com/dustin/go-humanize/commaf.go | 40 + vendor/github.com/dustin/go-humanize/ftoa.go | 46 + .../github.com/dustin/go-humanize/humanize.go | 8 + .../github.com/dustin/go-humanize/number.go | 192 + .../github.com/dustin/go-humanize/ordinals.go | 25 + vendor/github.com/dustin/go-humanize/si.go | 123 + vendor/github.com/dustin/go-humanize/times.go | 117 + .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/Gopkg.lock | 21 + vendor/github.com/json-iterator/go/Gopkg.toml | 26 + vendor/github.com/json-iterator/go/LICENSE | 21 + vendor/github.com/json-iterator/go/README.md | 85 + vendor/github.com/json-iterator/go/adapter.go | 150 + vendor/github.com/json-iterator/go/any.go | 325 + .../github.com/json-iterator/go/any_array.go | 278 + .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + vendor/github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 + vendor/github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + vendor/github.com/json-iterator/go/build.sh | 12 + vendor/github.com/json-iterator/go/config.go | 375 + .../go/fuzzy_mode_convert_table.md | 7 + vendor/github.com/json-iterator/go/iter.go | 349 + .../github.com/json-iterator/go/iter_array.go | 64 + .../github.com/json-iterator/go/iter_float.go | 342 + .../github.com/json-iterator/go/iter_int.go | 346 + .../json-iterator/go/iter_object.go | 267 + .../github.com/json-iterator/go/iter_skip.go | 130 + .../json-iterator/go/iter_skip_sloppy.go | 163 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/pool.go | 42 + vendor/github.com/json-iterator/go/reflect.go | 337 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 + .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 76 + .../json-iterator/go/reflect_map.go | 346 + .../json-iterator/go/reflect_marshaler.go | 225 + .../json-iterator/go/reflect_native.go | 453 + .../json-iterator/go/reflect_optional.go | 129 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1097 + .../go/reflect_struct_encoder.go | 211 + vendor/github.com/json-iterator/go/stream.go | 210 + .../json-iterator/go/stream_float.go | 111 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 + vendor/github.com/json-iterator/go/test.sh | 12 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../klauspost/compress/s2/.gitignore | 15 + .../github.com/klauspost/compress/s2/LICENSE | 28 + .../klauspost/compress/s2/README.md | 965 + .../klauspost/compress/s2/decode.go | 1046 + .../klauspost/compress/s2/decode_amd64.s | 568 + .../klauspost/compress/s2/decode_arm64.s | 574 + .../klauspost/compress/s2/decode_asm.go | 17 + .../klauspost/compress/s2/decode_other.go | 267 + .../klauspost/compress/s2/encode.go | 1341 ++ .../klauspost/compress/s2/encode_all.go | 456 + .../klauspost/compress/s2/encode_amd64.go | 142 + .../klauspost/compress/s2/encode_best.go | 630 + .../klauspost/compress/s2/encode_better.go | 431 + .../klauspost/compress/s2/encode_go.go | 307 + .../compress/s2/encodeblock_amd64.go | 191 + .../klauspost/compress/s2/encodeblock_amd64.s | 17779 ++++++++++++++++ .../github.com/klauspost/compress/s2/index.go | 598 + vendor/github.com/klauspost/compress/s2/s2.go | 143 + vendor/github.com/minio/md5-simd/LICENSE | 202 + .../github.com/minio/md5-simd/LICENSE.Golang | 27 + vendor/github.com/minio/md5-simd/README.md | 198 + .../github.com/minio/md5-simd/block16_amd64.s | 228 + .../github.com/minio/md5-simd/block8_amd64.s | 281 + .../github.com/minio/md5-simd/block_amd64.go | 210 + .../minio/md5-simd/md5-digest_amd64.go | 188 + .../minio/md5-simd/md5-server_amd64.go | 397 + .../minio/md5-simd/md5-server_fallback.go | 12 + .../minio/md5-simd/md5-util_amd64.go | 85 + vendor/github.com/minio/md5-simd/md5.go | 63 + .../minio/md5-simd/md5block_amd64.go | 11 + .../minio/md5-simd/md5block_amd64.s | 714 + .../github.com/minio/minio-go/v7/.gitignore | 5 + .../minio/minio-go/v7/.golangci.yml | 27 + vendor/github.com/minio/minio-go/v7/CNAME | 1 + .../minio/minio-go/v7/CONTRIBUTING.md | 22 + vendor/github.com/minio/minio-go/v7/LICENSE | 202 + .../minio/minio-go/v7/MAINTAINERS.md | 35 + vendor/github.com/minio/minio-go/v7/Makefile | 36 + vendor/github.com/minio/minio-go/v7/NOTICE | 9 + vendor/github.com/minio/minio-go/v7/README.md | 246 + .../minio/minio-go/v7/README_zh_CN.md | 260 + .../minio-go/v7/api-bucket-encryption.go | 134 + .../minio/minio-go/v7/api-bucket-lifecycle.go | 147 + .../minio-go/v7/api-bucket-notification.go | 254 + .../minio/minio-go/v7/api-bucket-policy.go | 147 + .../minio-go/v7/api-bucket-replication.go | 291 + .../minio/minio-go/v7/api-bucket-tagging.go | 135 + .../minio-go/v7/api-bucket-versioning.go | 146 + .../minio/minio-go/v7/api-compose-object.go | 592 + .../minio/minio-go/v7/api-copy-object.go | 77 + .../minio/minio-go/v7/api-datatypes.go | 190 + .../minio/minio-go/v7/api-error-response.go | 295 + .../minio/minio-go/v7/api-get-object-acl.go | 152 + .../minio/minio-go/v7/api-get-object-file.go | 127 + .../minio/minio-go/v7/api-get-object.go | 690 + .../minio/minio-go/v7/api-get-options.go | 150 + .../github.com/minio/minio-go/v7/api-list.go | 973 + .../minio-go/v7/api-object-legal-hold.go | 176 + .../minio/minio-go/v7/api-object-lock.go | 241 + .../minio/minio-go/v7/api-object-retention.go | 165 + .../minio/minio-go/v7/api-object-tagging.go | 157 + .../minio/minio-go/v7/api-presigned.go | 228 + .../minio/minio-go/v7/api-put-bucket.go | 123 + .../minio-go/v7/api-put-object-common.go | 149 + .../v7/api-put-object-file-context.go | 64 + .../minio-go/v7/api-put-object-multipart.go | 435 + .../minio-go/v7/api-put-object-streaming.go | 542 + .../minio/minio-go/v7/api-put-object.go | 424 + .../minio-go/v7/api-putobject-snowball.go | 215 + .../minio/minio-go/v7/api-remove.go | 544 + .../minio/minio-go/v7/api-restore.go | 182 + .../minio/minio-go/v7/api-s3-datatypes.go | 379 + .../minio/minio-go/v7/api-select.go | 757 + .../github.com/minio/minio-go/v7/api-stat.go | 115 + vendor/github.com/minio/minio-go/v7/api.go | 931 + .../minio/minio-go/v7/bucket-cache.go | 259 + .../minio/minio-go/v7/code_of_conduct.md | 80 + .../github.com/minio/minio-go/v7/constants.go | 101 + vendor/github.com/minio/minio-go/v7/core.go | 128 + .../minio/minio-go/v7/functional_tests.go | 12391 +++++++++++ .../minio/minio-go/v7/hook-reader.go | 101 + .../v7/pkg/credentials/assume_role.go | 235 + .../minio-go/v7/pkg/credentials/chain.go | 88 + .../v7/pkg/credentials/config.json.sample | 17 + .../v7/pkg/credentials/credentials.go | 193 + .../v7/pkg/credentials/credentials.sample | 12 + .../minio/minio-go/v7/pkg/credentials/doc.go | 60 + .../minio-go/v7/pkg/credentials/env_aws.go | 71 + .../minio-go/v7/pkg/credentials/env_minio.go | 68 + .../v7/pkg/credentials/error_response.go | 96 + .../pkg/credentials/file_aws_credentials.go | 119 + .../v7/pkg/credentials/file_minio_client.go | 134 + .../minio-go/v7/pkg/credentials/iam_aws.go | 377 + .../v7/pkg/credentials/signature_type.go | 77 + .../minio-go/v7/pkg/credentials/static.go | 67 + .../v7/pkg/credentials/sts_client_grants.go | 182 + .../v7/pkg/credentials/sts_custom_identity.go | 146 + .../v7/pkg/credentials/sts_ldap_identity.go | 189 + .../v7/pkg/credentials/sts_tls_identity.go | 209 + .../v7/pkg/credentials/sts_web_identity.go | 205 + .../minio-go/v7/pkg/encrypt/fips_disabled.go | 24 + .../minio-go/v7/pkg/encrypt/fips_enabled.go | 24 + .../minio-go/v7/pkg/encrypt/server-side.go | 198 + .../minio-go/v7/pkg/lifecycle/lifecycle.go | 458 + .../minio-go/v7/pkg/notification/info.go | 78 + .../v7/pkg/notification/notification.go | 398 + .../v7/pkg/replication/replication.go | 746 + .../minio/minio-go/v7/pkg/s3utils/utils.go | 426 + .../minio/minio-go/v7/pkg/set/stringset.go | 200 + .../pkg/signer/request-signature-streaming.go | 306 + .../v7/pkg/signer/request-signature-v2.go | 319 + .../v7/pkg/signer/request-signature-v4.go | 331 + .../minio/minio-go/v7/pkg/signer/utils.go | 62 + .../minio/minio-go/v7/pkg/sse/sse.go | 66 + .../minio/minio-go/v7/pkg/tags/tags.go | 404 + .../minio/minio-go/v7/post-policy.go | 326 + .../minio/minio-go/v7/retry-continous.go | 69 + vendor/github.com/minio/minio-go/v7/retry.go | 148 + .../minio/minio-go/v7/s3-endpoints.go | 58 + .../github.com/minio/minio-go/v7/s3-error.go | 61 + .../github.com/minio/minio-go/v7/transport.go | 84 + vendor/github.com/minio/minio-go/v7/utils.go | 634 + .../github.com/minio/sha256-simd/.gitignore | 1 + vendor/github.com/minio/sha256-simd/LICENSE | 202 + vendor/github.com/minio/sha256-simd/README.md | 137 + .../minio/sha256-simd/cpuid_other.go | 46 + vendor/github.com/minio/sha256-simd/sha256.go | 399 + .../sha256-simd/sha256blockAvx512_amd64.asm | 686 + .../sha256-simd/sha256blockAvx512_amd64.go | 500 + .../sha256-simd/sha256blockAvx512_amd64.s | 267 + .../minio/sha256-simd/sha256blockSha_amd64.go | 6 + .../minio/sha256-simd/sha256blockSha_amd64.s | 266 + .../minio/sha256-simd/sha256block_amd64.go | 27 + .../minio/sha256-simd/sha256block_arm64.go | 36 + .../minio/sha256-simd/sha256block_arm64.s | 192 + .../minio/sha256-simd/sha256block_other.go | 28 + .../minio/sha256-simd/test-architectures.sh | 15 + .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 9 + .../github.com/modern-go/reflect2/Gopkg.toml | 31 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_19.go | 17 + .../modern-go/reflect2/go_below_118.go | 21 + .../github.com/modern-go/reflect2/reflect2.go | 300 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/type_map.go | 70 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 76 + .../modern-go/reflect2/unsafe_map.go | 130 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + vendor/github.com/rs/xid/.appveyor.yml | 27 + vendor/github.com/rs/xid/.travis.yml | 8 + vendor/github.com/rs/xid/LICENSE | 19 + vendor/github.com/rs/xid/README.md | 116 + vendor/github.com/rs/xid/error.go | 11 + vendor/github.com/rs/xid/hostid_darwin.go | 9 + vendor/github.com/rs/xid/hostid_fallback.go | 9 + vendor/github.com/rs/xid/hostid_freebsd.go | 9 + vendor/github.com/rs/xid/hostid_linux.go | 13 + vendor/github.com/rs/xid/hostid_windows.go | 38 + vendor/github.com/rs/xid/id.go | 392 + vendor/github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 513 + vendor/github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 43 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 442 + vendor/github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + vendor/github.com/sirupsen/logrus/logger.go | 417 + vendor/github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + vendor/github.com/sirupsen/logrus/writer.go | 70 + vendor/golang.org/x/crypto/argon2/argon2.go | 283 + vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 61 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 244 + .../x/crypto/argon2/blamka_generic.go | 163 + .../golang.org/x/crypto/argon2/blamka_ref.go | 16 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 + .../x/crypto/blake2b/blake2b_amd64.go | 25 + .../x/crypto/blake2b/blake2b_amd64.s | 279 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 12 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 33 + vendor/golang.org/x/net/publicsuffix/list.go | 191 + vendor/golang.org/x/net/publicsuffix/table.go | 10586 +++++++++ vendor/gopkg.in/ini.v1/.editorconfig | 12 + vendor/gopkg.in/ini.v1/.gitignore | 7 + vendor/gopkg.in/ini.v1/.golangci.yml | 27 + vendor/gopkg.in/ini.v1/LICENSE | 191 + vendor/gopkg.in/ini.v1/Makefile | 15 + vendor/gopkg.in/ini.v1/README.md | 43 + vendor/gopkg.in/ini.v1/codecov.yml | 16 + vendor/gopkg.in/ini.v1/data_source.go | 76 + vendor/gopkg.in/ini.v1/deprecated.go | 25 + vendor/gopkg.in/ini.v1/error.go | 49 + vendor/gopkg.in/ini.v1/file.go | 541 + vendor/gopkg.in/ini.v1/helper.go | 24 + vendor/gopkg.in/ini.v1/ini.go | 176 + vendor/gopkg.in/ini.v1/key.go | 837 + vendor/gopkg.in/ini.v1/parser.go | 520 + vendor/gopkg.in/ini.v1/section.go | 256 + vendor/gopkg.in/ini.v1/struct.go | 747 + vendor/modules.txt | 46 + 351 files changed, 99291 insertions(+), 1330 deletions(-) create mode 100644 config/value/s3.go create mode 100644 http/fs/fs.go delete mode 100644 http/handler/api/diskfs.go create mode 100644 http/handler/api/filesystems.go delete mode 100644 http/handler/api/memfs.go delete mode 100644 http/handler/diskfs.go create mode 100644 http/handler/filesystem.go delete mode 100644 http/handler/memfs.go create mode 100644 io/fs/s3.go create mode 100644 vendor/github.com/dustin/go-humanize/.travis.yml create mode 100644 vendor/github.com/dustin/go-humanize/LICENSE create mode 100644 vendor/github.com/dustin/go-humanize/README.markdown create mode 100644 vendor/github.com/dustin/go-humanize/big.go create mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go create mode 100644 vendor/github.com/dustin/go-humanize/bytes.go create mode 100644 vendor/github.com/dustin/go-humanize/comma.go create mode 100644 vendor/github.com/dustin/go-humanize/commaf.go create mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go create mode 100644 vendor/github.com/dustin/go-humanize/humanize.go create mode 100644 vendor/github.com/dustin/go-humanize/number.go create mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go create mode 100644 vendor/github.com/dustin/go-humanize/si.go create mode 100644 vendor/github.com/dustin/go-humanize/times.go create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 vendor/github.com/json-iterator/go/LICENSE create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/adapter.go create mode 100644 vendor/github.com/json-iterator/go/any.go create mode 100644 vendor/github.com/json-iterator/go/any_array.go create mode 100644 vendor/github.com/json-iterator/go/any_bool.go create mode 100644 vendor/github.com/json-iterator/go/any_float.go create mode 100644 vendor/github.com/json-iterator/go/any_int32.go create mode 100644 vendor/github.com/json-iterator/go/any_int64.go create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/any_nil.go create mode 100644 vendor/github.com/json-iterator/go/any_number.go create mode 100644 vendor/github.com/json-iterator/go/any_object.go create mode 100644 vendor/github.com/json-iterator/go/any_str.go create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/build.sh create mode 100644 vendor/github.com/json-iterator/go/config.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/iter.go create mode 100644 vendor/github.com/json-iterator/go/iter_array.go create mode 100644 vendor/github.com/json-iterator/go/iter_float.go create mode 100644 vendor/github.com/json-iterator/go/iter_int.go create mode 100644 vendor/github.com/json-iterator/go/iter_object.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/iter_str.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/pool.go create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 vendor/github.com/json-iterator/go/stream.go create mode 100644 vendor/github.com/json-iterator/go/stream_float.go create mode 100644 vendor/github.com/json-iterator/go/stream_int.go create mode 100644 vendor/github.com/json-iterator/go/stream_str.go create mode 100644 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/s2/.gitignore create mode 100644 vendor/github.com/klauspost/compress/s2/LICENSE create mode 100644 vendor/github.com/klauspost/compress/s2/README.md create mode 100644 vendor/github.com/klauspost/compress/s2/decode.go create mode 100644 vendor/github.com/klauspost/compress/s2/decode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/s2/decode_arm64.s create mode 100644 vendor/github.com/klauspost/compress/s2/decode_asm.go create mode 100644 vendor/github.com/klauspost/compress/s2/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_all.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_best.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_better.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_go.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s create mode 100644 vendor/github.com/klauspost/compress/s2/index.go create mode 100644 vendor/github.com/klauspost/compress/s2/s2.go create mode 100644 vendor/github.com/minio/md5-simd/LICENSE create mode 100644 vendor/github.com/minio/md5-simd/LICENSE.Golang create mode 100644 vendor/github.com/minio/md5-simd/README.md create mode 100644 vendor/github.com/minio/md5-simd/block16_amd64.s create mode 100644 vendor/github.com/minio/md5-simd/block8_amd64.s create mode 100644 vendor/github.com/minio/md5-simd/block_amd64.go create mode 100644 vendor/github.com/minio/md5-simd/md5-digest_amd64.go create mode 100644 vendor/github.com/minio/md5-simd/md5-server_amd64.go create mode 100644 vendor/github.com/minio/md5-simd/md5-server_fallback.go create mode 100644 vendor/github.com/minio/md5-simd/md5-util_amd64.go create mode 100644 vendor/github.com/minio/md5-simd/md5.go create mode 100644 vendor/github.com/minio/md5-simd/md5block_amd64.go create mode 100644 vendor/github.com/minio/md5-simd/md5block_amd64.s create mode 100644 vendor/github.com/minio/minio-go/v7/.gitignore create mode 100644 vendor/github.com/minio/minio-go/v7/.golangci.yml create mode 100644 vendor/github.com/minio/minio-go/v7/CNAME create mode 100644 vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md create mode 100644 vendor/github.com/minio/minio-go/v7/LICENSE create mode 100644 vendor/github.com/minio/minio-go/v7/MAINTAINERS.md create mode 100644 vendor/github.com/minio/minio-go/v7/Makefile create mode 100644 vendor/github.com/minio/minio-go/v7/NOTICE create mode 100644 vendor/github.com/minio/minio-go/v7/README.md create mode 100644 vendor/github.com/minio/minio-go/v7/README_zh_CN.md create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-notification.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-policy.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-replication.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-compose-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-copy-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-datatypes.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-error-response.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-acl.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-file.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-options.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-list.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-lock.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-retention.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-tagging.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-presigned.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-bucket.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-common.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-remove.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-restore.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-select.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-stat.go create mode 100644 vendor/github.com/minio/minio-go/v7/api.go create mode 100644 vendor/github.com/minio/minio-go/v7/bucket-cache.go create mode 100644 vendor/github.com/minio/minio-go/v7/code_of_conduct.md create mode 100644 vendor/github.com/minio/minio-go/v7/constants.go create mode 100644 vendor/github.com/minio/minio-go/v7/core.go create mode 100644 vendor/github.com/minio/minio-go/v7/functional_tests.go create mode 100644 vendor/github.com/minio/minio-go/v7/hook-reader.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/info.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go create mode 100644 vendor/github.com/minio/minio-go/v7/post-policy.go create mode 100644 vendor/github.com/minio/minio-go/v7/retry-continous.go create mode 100644 vendor/github.com/minio/minio-go/v7/retry.go create mode 100644 vendor/github.com/minio/minio-go/v7/s3-endpoints.go create mode 100644 vendor/github.com/minio/minio-go/v7/s3-error.go create mode 100644 vendor/github.com/minio/minio-go/v7/transport.go create mode 100644 vendor/github.com/minio/minio-go/v7/utils.go create mode 100644 vendor/github.com/minio/sha256-simd/.gitignore create mode 100644 vendor/github.com/minio/sha256-simd/LICENSE create mode 100644 vendor/github.com/minio/sha256-simd/README.md create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_other.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_other.go create mode 100644 vendor/github.com/minio/sha256-simd/test-architectures.sh create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 vendor/github.com/rs/xid/.appveyor.yml create mode 100644 vendor/github.com/rs/xid/.travis.yml create mode 100644 vendor/github.com/rs/xid/LICENSE create mode 100644 vendor/github.com/rs/xid/README.md create mode 100644 vendor/github.com/rs/xid/error.go create mode 100644 vendor/github.com/rs/xid/hostid_darwin.go create mode 100644 vendor/github.com/rs/xid/hostid_fallback.go create mode 100644 vendor/github.com/rs/xid/hostid_freebsd.go create mode 100644 vendor/github.com/rs/xid/hostid_linux.go create mode 100644 vendor/github.com/rs/xid/hostid_windows.go create mode 100644 vendor/github.com/rs/xid/id.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go create mode 100644 vendor/gopkg.in/ini.v1/.editorconfig create mode 100644 vendor/gopkg.in/ini.v1/.gitignore create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/gopkg.in/ini.v1/LICENSE create mode 100644 vendor/gopkg.in/ini.v1/Makefile create mode 100644 vendor/gopkg.in/ini.v1/README.md create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml create mode 100644 vendor/gopkg.in/ini.v1/data_source.go create mode 100644 vendor/gopkg.in/ini.v1/deprecated.go create mode 100644 vendor/gopkg.in/ini.v1/error.go create mode 100644 vendor/gopkg.in/ini.v1/file.go create mode 100644 vendor/gopkg.in/ini.v1/helper.go create mode 100644 vendor/gopkg.in/ini.v1/ini.go create mode 100644 vendor/gopkg.in/ini.v1/key.go create mode 100644 vendor/gopkg.in/ini.v1/parser.go create mode 100644 vendor/gopkg.in/ini.v1/section.go create mode 100644 vendor/gopkg.in/ini.v1/struct.go diff --git a/README.md b/README.md index 0c1baf20..5cf9367e 100644 --- a/README.md +++ b/README.md @@ -652,16 +652,17 @@ A command is defined as: Currently supported placeholders are: -| Placeholder | Description | Location | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | -| `{diskfs}` | Will be replaced by the provided `CORE_STORAGE_DISK_DIR`. | `options`, `input.address`, `input.options`, `output.address`, `output.options` | -| `{memfs}` | Will be replace by the base URL of the MemFS. | `input.address`, `input.options`, `output.address`, `output.options` | -| `{processid}` | Will be replaced by the ID of the process. | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | -| `{reference}` | Will be replaced by the reference of the process | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | -| `{inputid}` | Will be replaced by the ID of the input. | `input.address`, `input.options` | -| `{outputid}` | Will be replaced by the ID of the output. | `output.address`, `output.options`, `output.cleanup.pattern` | -| `{rtmp}` | Will be replaced by the internal address of the RTMP server. Requires parameter `name` (name of the stream). | `input.address`, `output.address` | -| `{srt}` | Will be replaced by the internal address of the SRT server. Requires parameter `name` (name of the stream) and `mode` (either `publish` or `request`). | `input.address`, `output.address` | +| Placeholder | Description | Location | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | +| `{diskfs}` or `{fs:disk}` | Will be replaced by the provided `CORE_STORAGE_DISK_DIR`. | `options`, `input.address`, `input.options`, `output.address`, `output.options` | +| `{memfs}` or `{fs:mem}` | Will be replaced by the base address of the MemFS. | `input.address`, `input.options`, `output.address`, `output.options` | +| `{fs:*}` | Will be replaces by the base address of the respective filesystem. | See `{memfs}` | +| `{processid}` | Will be replaced by the ID of the process. | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | +| `{reference}` | Will be replaced by the reference of the process | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | +| `{inputid}` | Will be replaced by the ID of the input. | `input.address`, `input.options` | +| `{outputid}` | Will be replaced by the ID of the output. | `output.address`, `output.options`, `output.cleanup.pattern` | +| `{rtmp}` | Will be replaced by the internal address of the RTMP server. Requires parameter `name` (name of the stream). | `input.address`, `output.address` | +| `{srt}` | Will be replaced by the internal address of the SRT server. Requires parameter `name` (name of the stream) and `mode` (either `publish` or `request`). | `input.address`, `output.address` | Before replacing the placeholders in the process config, all references (see below) will be resolved. diff --git a/app/api/api.go b/app/api/api.go index b1f5d76b..92f94495 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -22,6 +22,7 @@ import ( "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/http" "github.com/datarhei/core/v16/http/cache" + httpfs "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/io/fs" @@ -69,6 +70,7 @@ type api struct { ffmpeg ffmpeg.FFmpeg diskfs fs.Filesystem memfs fs.Filesystem + s3fs map[string]fs.Filesystem rtmpserver rtmp.Server srtserver srt.Server metrics monitor.HistoryMonitor @@ -118,6 +120,7 @@ var ErrConfigReload = fmt.Errorf("configuration reload") func New(configpath string, logwriter io.Writer) (API, error) { a := &api{ state: "idle", + s3fs: map[string]fs.Filesystem{}, } a.config.path = configpath @@ -372,12 +375,13 @@ func (a *api) start() error { } diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{ + Name: "disk", Dir: cfg.Storage.Disk.Dir, Size: cfg.Storage.Disk.Size * 1024 * 1024, - Logger: a.log.logger.core.WithComponent("DiskFS"), + Logger: a.log.logger.core.WithComponent("FS"), }) if err != nil { - return err + return fmt.Errorf("disk filesystem: %w", err) } a.diskfs = diskfs @@ -400,10 +404,11 @@ func (a *api) start() error { if a.memfs == nil { memfs := fs.NewMemFilesystem(fs.MemConfig{ + Name: "mem", Base: baseMemFS.String(), Size: cfg.Storage.Memory.Size * 1024 * 1024, Purge: cfg.Storage.Memory.Purge, - Logger: a.log.logger.core.WithComponent("MemFS"), + Logger: a.log.logger.core.WithComponent("FS"), }) a.memfs = memfs @@ -412,23 +417,62 @@ func (a *api) start() error { a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024) } + for _, s3 := range cfg.Storage.S3 { + baseS3FS := url.URL{ + Scheme: "http", + Path: s3.Mountpoint, + } + + host, port, _ := gonet.SplitHostPort(cfg.Address) + if len(host) == 0 { + baseS3FS.Host = "localhost:" + port + } else { + baseS3FS.Host = cfg.Address + } + + if s3.Auth.Enable { + baseS3FS.User = url.UserPassword(s3.Auth.Username, s3.Auth.Password) + } + + s3fs, err := fs.NewS3Filesystem(fs.S3Config{ + Name: s3.Name, + Base: baseS3FS.String(), + Endpoint: s3.Endpoint, + AccessKeyID: s3.AccessKeyID, + SecretAccessKey: s3.SecretAccessKey, + Region: s3.Region, + Bucket: s3.Bucket, + UseSSL: s3.UseSSL, + Logger: a.log.logger.core.WithComponent("FS"), + }) + if err != nil { + return fmt.Errorf("s3 filesystem (%s): %w", s3.Name, err) + } + + if _, ok := a.s3fs[s3.Name]; ok { + return fmt.Errorf("the name '%s' for a filesystem is already in use", s3.Name) + } + + a.s3fs[s3.Name] = s3fs + } + var portrange net.Portranger if cfg.Playout.Enable { portrange, err = net.NewPortrange(cfg.Playout.MinPort, cfg.Playout.MaxPort) if err != nil { - return err + return fmt.Errorf("playout port range: %w", err) } } validatorIn, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Input.Allow, cfg.FFmpeg.Access.Input.Block) if err != nil { - return err + return fmt.Errorf("input address validator: %w", err) } validatorOut, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Output.Allow, cfg.FFmpeg.Access.Output.Block) if err != nil { - return err + return fmt.Errorf("output address validator: %w", err) } ffmpeg, err := ffmpeg.New(ffmpeg.Config{ @@ -442,7 +486,7 @@ func (a *api) start() error { Collector: a.sessions.Collector("ffmpeg"), }) if err != nil { - return err + return fmt.Errorf("unable to create ffmpeg: %w", err) } a.ffmpeg = ffmpeg @@ -510,6 +554,15 @@ func (a *api) start() error { }) } + filesystems := []fs.Filesystem{ + a.diskfs, + a.memfs, + } + + for _, fs := range a.s3fs { + filesystems = append(filesystems, fs) + } + store := store.NewJSONStore(store.JSONConfig{ Filepath: cfg.DB.Dir + "/db.json", FFVersion: a.ffmpeg.Skills().FFmpeg.Version, @@ -520,8 +573,7 @@ func (a *api) start() error { ID: cfg.ID, Name: cfg.Name, Store: store, - DiskFS: a.diskfs, - MemFS: a.memfs, + Filesystems: filesystems, Replace: a.replacer, FFmpeg: a.ffmpeg, MaxProcesses: cfg.FFmpeg.MaxProcesses, @@ -592,6 +644,9 @@ func (a *api) start() error { metrics.Register(monitor.NewDiskCollector(a.diskfs.Base())) metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs)) metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs)) + for name, fs := range a.s3fs { + metrics.Register(monitor.NewFilesystemCollector(name, fs)) + } metrics.Register(monitor.NewRestreamCollector(a.restream)) metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg)) metrics.Register(monitor.NewSessionCollector(a.sessions, []string{})) @@ -666,7 +721,7 @@ func (a *api) start() error { } if cfg.Storage.Disk.Cache.Enable { - diskCache, err := cache.NewLRUCache(cache.LRUConfig{ + cache, err := cache.NewLRUCache(cache.LRUConfig{ TTL: time.Duration(cfg.Storage.Disk.Cache.TTL) * time.Second, MaxSize: cfg.Storage.Disk.Cache.Size * 1024 * 1024, MaxFileSize: cfg.Storage.Disk.Cache.FileSize * 1024 * 1024, @@ -676,10 +731,10 @@ func (a *api) start() error { }) if err != nil { - return fmt.Errorf("unable to create disk cache: %w", err) + return fmt.Errorf("unable to create cache: %w", err) } - a.cache = diskCache + a.cache = cache } var autocertManager *certmagic.Config @@ -867,22 +922,61 @@ func (a *api) start() error { a.log.logger.main = a.log.logger.core.WithComponent(logcontext).WithField("address", cfg.Address) - mainserverhandler, err := http.NewServer(http.Config{ + httpfilesystems := []httpfs.FS{ + { + Name: a.diskfs.Name(), + Mountpoint: "", + AllowWrite: false, + EnableAuth: false, + Username: "", + Password: "", + DefaultFile: "index.html", + DefaultContentType: "text/html", + Gzip: true, + Filesystem: a.diskfs, + Cache: a.cache, + }, + { + Name: a.memfs.Name(), + Mountpoint: "/memfs", + AllowWrite: true, + EnableAuth: cfg.Storage.Memory.Auth.Enable, + Username: cfg.Storage.Memory.Auth.Username, + Password: cfg.Storage.Memory.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.memfs, + Cache: nil, + }, + } + + for _, s3 := range cfg.Storage.S3 { + httpfilesystems = append(httpfilesystems, httpfs.FS{ + Name: s3.Name, + Mountpoint: s3.Mountpoint, + AllowWrite: true, + EnableAuth: s3.Auth.Enable, + Username: s3.Auth.Username, + Password: s3.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.s3fs[s3.Name], + Cache: a.cache, + }) + } + + serverConfig := http.Config{ Logger: a.log.logger.main, LogBuffer: a.log.buffer, Restream: a.restream, Metrics: a.metrics, Prometheus: a.prom, MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, + Filesystems: httpfilesystems, + IPLimiter: iplimiter, + Profiling: cfg.Debug.Profiling, Cors: http.CorsConfig{ Origins: cfg.Storage.CORS.Origins, }, @@ -890,11 +984,12 @@ func (a *api) start() error { SRT: a.srtserver, JWT: a.httpjwt, Config: a.config.store, - Cache: a.cache, Sessions: a.sessions, Router: router, ReadOnly: cfg.API.ReadOnly, - }) + } + + mainserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create server: %w", err) @@ -929,34 +1024,10 @@ func (a *api) start() error { a.log.logger.sidecar = a.log.logger.core.WithComponent("HTTP").WithField("address", cfg.Address) - sidecarserverhandler, err := http.NewServer(http.Config{ - Logger: a.log.logger.sidecar, - LogBuffer: a.log.buffer, - Restream: a.restream, - Metrics: a.metrics, - Prometheus: a.prom, - MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, - Cors: http.CorsConfig{ - Origins: cfg.Storage.CORS.Origins, - }, - RTMP: a.rtmpserver, - SRT: a.srtserver, - JWT: a.httpjwt, - Config: a.config.store, - Cache: a.cache, - Sessions: a.sessions, - Router: router, - ReadOnly: cfg.API.ReadOnly, - }) + serverConfig.Logger = a.log.logger.sidecar + serverConfig.IPLimiter = iplimiter + + sidecarserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create sidecar HTTP server: %w", err) diff --git a/config/config.go b/config/config.go index b8a5028e..256f5022 100644 --- a/config/config.go +++ b/config/config.go @@ -6,11 +6,12 @@ import ( "net" "time" - haikunator "github.com/atrox/haikunatorgo/v2" "github.com/datarhei/core/v16/config/copy" "github.com/datarhei/core/v16/config/value" "github.com/datarhei/core/v16/config/vars" "github.com/datarhei/core/v16/math/rand" + + haikunator "github.com/atrox/haikunatorgo/v2" "github.com/google/uuid" ) @@ -111,6 +112,7 @@ func (d *Config) Clone() *Config { data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow) data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block) + data.Storage.S3 = copy.Slice(d.Storage.S3) data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) @@ -195,6 +197,9 @@ func (d *Config) init() { d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + // Storage (S3) + d.vars.Register(value.NewS3StorageListValue(&d.Storage.S3, []value.S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false) + // Storage (CORS) d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) diff --git a/config/data.go b/config/data.go index 6d9d509f..e534347c 100644 --- a/config/data.go +++ b/config/data.go @@ -88,6 +88,7 @@ type Data struct { Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` + S3 []value.S3Storage `json:"s3"` CORS struct { Origins []string `json:"origins"` } `json:"cors"` @@ -246,6 +247,8 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types) + data.Storage.S3 = []value.S3Storage{} + data.Version = 3 return data, nil diff --git a/config/value/s3.go b/config/value/s3.go new file mode 100644 index 00000000..a85a0838 --- /dev/null +++ b/config/value/s3.go @@ -0,0 +1,179 @@ +package value + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/publicsuffix" +) + +// array of s3 storages +// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy + +type S3Storage struct { + Name string `json:"name"` + Mountpoint string `json:"mountpoint"` + Auth struct { + Enable bool `json:"enable"` + Username string `json:"username"` + Password string `json:"password"` + } `json:"auth"` + Endpoint string `json:"endpoint"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Bucket string `json:"bucket"` + Region string `json:"region"` + UseSSL bool `json:"use_ssl"` +} + +func (t *S3Storage) String() string { + u := url.URL{} + + if t.UseSSL { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + + u.User = url.UserPassword(t.AccessKeyID, "---") + + u.Host = t.Endpoint + + if len(t.Region) != 0 { + u.Host = t.Region + "." + u.Host + } + + if len(t.Bucket) != 0 { + u.Path = "/" + t.Bucket + } + + v := url.Values{} + v.Set("name", t.Name) + v.Set("mountpoint", t.Mountpoint) + + if t.Auth.Enable { + if len(t.Auth.Username) != 0 { + v.Set("username", t.Auth.Username) + } + + if len(t.Auth.Password) != 0 { + v.Set("password", "---") + } + } + + u.RawQuery = v.Encode() + + return u.String() +} + +type s3StorageListValue struct { + p *[]S3Storage + separator string +} + +func NewS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue { + v := &s3StorageListValue{ + p: p, + separator: separator, + } + + *p = val + return v +} + +func (s *s3StorageListValue) Set(val string) error { + list := []S3Storage{} + + for _, elm := range strings.Split(val, s.separator) { + u, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err) + } + + t := S3Storage{ + Name: u.Query().Get("name"), + Mountpoint: u.Query().Get("mountpoint"), + AccessKeyID: u.User.Username(), + } + + hostname := u.Hostname() + port := u.Port() + + domain, err := publicsuffix.EffectiveTLDPlusOne(hostname) + if err != nil { + return fmt.Errorf("invalid eTLD (%s): %w", hostname, err) + } + + t.Endpoint = domain + if len(port) != 0 { + t.Endpoint += ":" + port + } + + region := strings.TrimSuffix(hostname, domain) + if len(region) != 0 { + t.Region = strings.TrimSuffix(region, ".") + } + + secret, ok := u.User.Password() + if ok { + t.SecretAccessKey = secret + } + + t.Bucket = strings.TrimPrefix(u.Path, "/") + + if u.Scheme == "https" { + t.UseSSL = true + } + + if u.Query().Has("username") || u.Query().Has("password") { + t.Auth.Enable = true + t.Auth.Username = u.Query().Get("username") + t.Auth.Username = u.Query().Get("password") + } + + list = append(list, t) + } + + *s.p = list + + return nil +} + +func (s *s3StorageListValue) String() string { + if s.IsEmpty() { + return "(empty)" + } + + list := []string{} + + for _, t := range *s.p { + list = append(list, t.String()) + } + + return strings.Join(list, s.separator) +} + +func (s *s3StorageListValue) Validate() error { + for i, t := range *s.p { + if len(t.Name) == 0 { + return fmt.Errorf("the name for s3 storage %d is missing", i) + } + + if len(t.Mountpoint) == 0 { + return fmt.Errorf("the mountpoint for s3 storage %d is missing", i) + } + + if t.Auth.Enable { + if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 { + return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i) + } + } + } + + return nil +} + +func (s *s3StorageListValue) IsEmpty() bool { + return len(*s.p) == 0 +} diff --git a/docs/docs.go b/docs/docs.go index 672b5d20..9452936b 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -311,6 +311,32 @@ const docTemplate = `{ } } }, + "/api/v3/fs": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Listall registered filesystems", + "produces": [ + "application/json" + ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FilesystemInfo" + } + } + } + } + } + }, "/api/v3/fs/disk": { "get": { "security": [ @@ -757,6 +783,219 @@ const docTemplate = `{ } } }, + "/api/v3/fs/{name}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "produces": [ + "application/json" + ], + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "glob pattern for file names", + "name": "glob", + "in": "query" + }, + { + "type": "string", + "description": "none, name, size, lastmod", + "name": "sort", + "in": "query" + }, + { + "type": "string", + "description": "asc, desc", + "name": "order", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FileInfo" + } + } + } + } + } + }, + "/api/v3/fs/{name}/{path}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Fetch a file from a filesystem", + "produces": [ + "application/data", + "application/json" + ], + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "file" + } + }, + "301": { + "description": "Moved Permanently", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + }, + "put": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Writes or overwrites a file on a filesystem", + "consumes": [ + "application/data" + ], + "produces": [ + "text/plain", + "application/json" + ], + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + }, + { + "description": "File data", + "name": "data", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "type": "string" + } + }, + "204": { + "description": "No Content", + "schema": { + "type": "string" + } + }, + "507": { + "description": "Insufficient Storage", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + }, + "delete": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Remove a file from a filesystem", + "produces": [ + "text/plain" + ], + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + } + }, "/api/v3/log": { "get": { "security": [ @@ -2132,140 +2371,6 @@ const docTemplate = `{ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2325,46 +2430,6 @@ const docTemplate = `{ } } } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } } }, "definitions": { @@ -2569,6 +2634,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2953,6 +3019,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -3031,6 +3103,20 @@ const docTemplate = `{ } } }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, "api.GraphQuery": { "type": "object", "properties": { @@ -4330,6 +4416,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4714,6 +4801,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -5056,6 +5149,49 @@ const docTemplate = `{ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.json b/docs/swagger.json index 2b32d525..5e270188 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -303,6 +303,32 @@ } } }, + "/api/v3/fs": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Listall registered filesystems", + "produces": [ + "application/json" + ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FilesystemInfo" + } + } + } + } + } + }, "/api/v3/fs/disk": { "get": { "security": [ @@ -749,6 +775,219 @@ } } }, + "/api/v3/fs/{name}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "produces": [ + "application/json" + ], + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "glob pattern for file names", + "name": "glob", + "in": "query" + }, + { + "type": "string", + "description": "none, name, size, lastmod", + "name": "sort", + "in": "query" + }, + { + "type": "string", + "description": "asc, desc", + "name": "order", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FileInfo" + } + } + } + } + } + }, + "/api/v3/fs/{name}/{path}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Fetch a file from a filesystem", + "produces": [ + "application/data", + "application/json" + ], + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "file" + } + }, + "301": { + "description": "Moved Permanently", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + }, + "put": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Writes or overwrites a file on a filesystem", + "consumes": [ + "application/data" + ], + "produces": [ + "text/plain", + "application/json" + ], + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + }, + { + "description": "File data", + "name": "data", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "type": "string" + } + }, + "204": { + "description": "No Content", + "schema": { + "type": "string" + } + }, + "507": { + "description": "Insufficient Storage", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + }, + "delete": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Remove a file from a filesystem", + "produces": [ + "text/plain" + ], + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", + "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path to file", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } + } + }, "/api/v3/log": { "get": { "security": [ @@ -2124,140 +2363,6 @@ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2317,46 +2422,6 @@ } } } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } } }, "definitions": { @@ -2561,6 +2626,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2945,6 +3011,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -3023,6 +3095,20 @@ } } }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, "api.GraphQuery": { "type": "object", "properties": { @@ -4322,6 +4408,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4706,6 +4793,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -5048,6 +5141,49 @@ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index daabe182..e60eff9f 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -134,6 +134,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -391,6 +392,10 @@ definitions: type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -442,6 +447,15 @@ definitions: format: int64 type: integer type: object + api.FilesystemInfo: + properties: + mount: + type: string + name: + type: string + type: + type: string + type: object api.GraphQuery: properties: query: @@ -1388,6 +1402,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -1645,6 +1660,10 @@ definitions: type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -1867,6 +1886,34 @@ definitions: type: string type: array type: object + value.S3Storage: + properties: + access_key_id: + type: string + auth: + properties: + enable: + type: boolean + password: + type: string + username: + type: string + type: object + bucket: + type: string + endpoint: + type: string + mountpoint: + type: string + name: + type: string + region: + type: string + secret_access_key: + type: string + use_ssl: + type: boolean + type: object info: contact: email: hello@datarhei.com @@ -1879,34 +1926,6 @@ info: title: datarhei Core API version: "3.0" paths: - /{path}: - get: - description: Fetch a file from the filesystem. If the file is a directory, a - index.html is returned, if it exists. - operationId: diskfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the filesystem /api: get: description: API version and build infos in case auth is valid or not required. @@ -2091,6 +2110,162 @@ paths: summary: Reload the currently active configuration tags: - v16.7.2 + /api/v3/fs: + get: + description: Listall registered filesystems + operationId: filesystem-3-list + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/api.FilesystemInfo' + type: array + security: + - ApiKeyAuth: [] + summary: List all registered filesystems + /api/v3/fs/{name}: + get: + description: List all files on a filesystem. The listing can be ordered by name, + size, or date of last modification in ascending or descending order. + operationId: filesystem-3-list-files + parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string + - description: glob pattern for file names + in: query + name: glob + type: string + - description: none, name, size, lastmod + in: query + name: sort + type: string + - description: asc, desc + in: query + name: order + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/api.FileInfo' + type: array + security: + - ApiKeyAuth: [] + summary: List all files on a filesystem + /api/v3/fs/{name}/{path}: + delete: + description: Remove a file from a filesystem + operationId: filesystem-3-delete-file + parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string + - description: Path to file + in: path + name: path + required: true + type: string + produces: + - text/plain + responses: + "200": + description: OK + schema: + type: string + "404": + description: Not Found + schema: + $ref: '#/definitions/api.Error' + security: + - ApiKeyAuth: [] + summary: Remove a file from a filesystem + get: + description: Fetch a file from a filesystem + operationId: filesystem-3-get-file + parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string + - description: Path to file + in: path + name: path + required: true + type: string + produces: + - application/data + - application/json + responses: + "200": + description: OK + schema: + type: file + "301": + description: Moved Permanently + schema: + type: string + "404": + description: Not Found + schema: + $ref: '#/definitions/api.Error' + security: + - ApiKeyAuth: [] + summary: Fetch a file from a filesystem + put: + consumes: + - application/data + description: Writes or overwrites a file on a filesystem + operationId: filesystem-3-put-file + parameters: + - description: Name of the filesystem + in: path + name: name + required: true + type: string + - description: Path to file + in: path + name: path + required: true + type: string + - description: File data + in: body + name: data + required: true + schema: + items: + type: integer + type: array + produces: + - text/plain + - application/json + responses: + "201": + description: Created + schema: + type: string + "204": + description: No Content + schema: + type: string + "507": + description: Insufficient Storage + schema: + $ref: '#/definitions/api.Error' + security: + - ApiKeyAuth: [] + summary: Add a file to a filesystem /api/v3/fs/disk: get: description: List all files on the filesystem. The listing can be ordered by @@ -3292,94 +3467,6 @@ paths: summary: Fetch minimal statistics about a process tags: - v16.7.2 - /memfs/{path}: - delete: - description: Remove a file from the memory filesystem - operationId: memfs-delete-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - text/plain - responses: - "200": - description: OK - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Remove a file from the memory filesystem - get: - description: Fetch a file from the memory filesystem - operationId: memfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the memory filesystem - put: - consumes: - - application/data - description: Writes or overwrites a file on the memory filesystem - operationId: memfs-put-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - - description: File data - in: body - name: data - required: true - schema: - items: - type: integer - type: array - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "204": - description: No Content - schema: - type: string - "507": - description: Insufficient Storage - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Add a file to the memory filesystem /metrics: get: description: Prometheus metrics diff --git a/glob/glob.go b/glob/glob.go index 690daf61..89b57f00 100644 --- a/glob/glob.go +++ b/glob/glob.go @@ -4,6 +4,9 @@ import ( "github.com/gobwas/glob" ) +// Match returns whether the name matches the glob pattern, also considering +// one or several optionnal separator. An error is only returned if the pattern +// is invalid. func Match(pattern, name string, separators ...rune) (bool, error) { g, err := glob.Compile(pattern, separators...) if err != nil { diff --git a/go.mod b/go.mod index c5e49d28..f286bea7 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/labstack/echo/v4 v4.9.1 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.16 + github.com/minio/minio-go/v7 v7.0.39 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 github.com/prometheus/client_golang v1.13.1 github.com/shirou/gopsutil/v3 v3.22.10 @@ -28,6 +29,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 go.uber.org/zap v1.23.0 golang.org/x/mod v0.6.0 + golang.org/x/net v0.1.0 ) require ( @@ -38,6 +40,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -51,6 +54,8 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/cpuid/v2 v2.1.2 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect @@ -61,13 +66,19 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mholt/acmez v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/rs/xid v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.5.0 // indirect @@ -81,11 +92,11 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.1.0 // indirect golang.org/x/sys v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.2.0 // indirect google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 8712f523..7020c606 100644 --- a/go.sum +++ b/go.sum @@ -89,6 +89,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -214,6 +216,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -222,6 +225,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.2 h1:XhdX4fqAJUA0yj+kUwMavO0hHrSPAecYdYf1ZmxHvak= github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -270,13 +277,21 @@ github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80= github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.39 h1:upnbu1jCGOqEvrGSpRauSN9ZG7RCHK7VHxXS8Vmg2zk= +github.com/minio/minio-go/v7 v7.0.39/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -329,6 +344,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -340,6 +357,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -575,6 +594,7 @@ golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= @@ -735,6 +755,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/http/api/fs.go b/http/api/fs.go index 540670d2..84535bcc 100644 --- a/http/api/fs.go +++ b/http/api/fs.go @@ -6,3 +6,10 @@ type FileInfo struct { Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"` LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"` } + +// FilesystemInfo represents information about a filesystem +type FilesystemInfo struct { + Name string `json:"name"` + Type string `json:"type"` + Mount string `json:"mount"` +} diff --git a/http/fs/fs.go b/http/fs/fs.go new file mode 100644 index 00000000..500ab733 --- /dev/null +++ b/http/fs/fs.go @@ -0,0 +1,25 @@ +package fs + +import ( + "github.com/datarhei/core/v16/http/cache" + "github.com/datarhei/core/v16/io/fs" +) + +type FS struct { + Name string + Mountpoint string + + AllowWrite bool + + EnableAuth bool + Username string + Password string + + DefaultFile string + DefaultContentType string + Gzip bool + + Filesystem fs.Filesystem + + Cache cache.Cacher +} diff --git a/http/handler/api/diskfs.go b/http/handler/api/diskfs.go deleted file mode 100644 index 98156ac9..00000000 --- a/http/handler/api/diskfs.go +++ /dev/null @@ -1,215 +0,0 @@ -package api - -import ( - "net/http" - "path/filepath" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem - handler *handler.DiskFSHandler -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - handler: handler.NewDiskFS(fs, cache), - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. The contents of that file are returned. -// @Tags v16.7.2 -// @ID diskfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ := file.Stat() - - if stat.IsDir() { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the filesystem -// @Description Writes or overwrites a file on the filesystem -// @Tags v16.7.2 -// @ID diskfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [put] -func (h *DiskFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, path) - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the filesystem -// @Description Remove a file from the filesystem -// @Tags v16.7.2 -// @ID diskfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [delete] -func (h *DiskFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - return c.String(http.StatusOK, "OK") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the filesystem -// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @Tags v16.7.2 -// @ID diskfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk [get] -func (h *DiskFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - fileinfos := []api.FileInfo{} - - for _, f := range files { - if f.IsDir() { - continue - } - - fileinfos = append(fileinfos, api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - }) - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/api/filesystems.go b/http/handler/api/filesystems.go new file mode 100644 index 00000000..ce93812b --- /dev/null +++ b/http/handler/api/filesystems.go @@ -0,0 +1,146 @@ +package api + +import ( + "net/http" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/handler" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +type FSConfig struct { + Type string + Mountpoint string + Handler *handler.FSHandler +} + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + filesystems map[string]FSConfig +} + +// NewFS return a new FSHanlder type. You have to provide a filesystem to act on. +func NewFS(filesystems map[string]FSConfig) *FSHandler { + return &FSHandler{ + filesystems: filesystems, + } +} + +// GetFileAPI returns the file at the given path +// @Summary Fetch a file from a filesystem +// @Description Fetch a file from a filesystem +// @ID filesystem-3-get-file +// @Produce application/data +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Success 200 {file} byte +// @Success 301 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [get] +func (h *FSHandler) GetFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.GetFile(c) +} + +// PutFileAPI adds or overwrites a file at the given path +// @Summary Add a file to a filesystem +// @Description Writes or overwrites a file on a filesystem +// @ID filesystem-3-put-file +// @Accept application/data +// @Produce text/plain +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Param data body []byte true "File data" +// @Success 201 {string} string +// @Success 204 {string} string +// @Failure 507 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [put] +func (h *FSHandler) PutFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.PutFile(c) +} + +// DeleteFileAPI removes a file from a filesystem +// @Summary Remove a file from a filesystem +// @Description Remove a file from a filesystem +// @ID filesystem-3-delete-file +// @Produce text/plain +// @Param name path string true "Name of the filesystem" +// @Param path path string true "Path to file" +// @Success 200 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name}/{path} [delete] +func (h *FSHandler) DeleteFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.DeleteFile(c) +} + +// ListFiles lists all files on a filesystem +// @Summary List all files on a filesystem +// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. +// @ID filesystem-3-list-files +// @Produce json +// @Param name path string true "Name of the filesystem" +// @Param glob query string false "glob pattern for file names" +// @Param sort query string false "none, name, size, lastmod" +// @Param order query string false "asc, desc" +// @Success 200 {array} api.FileInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs/{name} [get] +func (h *FSHandler) ListFiles(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.ListFiles(c) +} + +// List lists all registered filesystems +// @Summary List all registered filesystems +// @Description Listall registered filesystems +// @ID filesystem-3-list +// @Produce json +// @Success 200 {array} api.FilesystemInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs [get] +func (h *FSHandler) List(c echo.Context) error { + fss := []api.FilesystemInfo{} + + for name, config := range h.filesystems { + fss = append(fss, api.FilesystemInfo{ + Name: name, + Type: config.Type, + Mount: config.Mountpoint, + }) + } + + return c.JSON(http.StatusOK, fss) +} diff --git a/http/handler/api/memfs.go b/http/handler/api/memfs.go deleted file mode 100644 index 2b64c4d0..00000000 --- a/http/handler/api/memfs.go +++ /dev/null @@ -1,177 +0,0 @@ -package api - -import ( - "io" - "net/http" - "net/url" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem - handler *handler.MemFSHandler -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - handler: handler.NewMemFS(fs), - } -} - -// GetFileAPI returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - return h.handler.GetFile(c) -} - -// PutFileAPI adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - return h.handler.PutFile(c) -} - -// DeleteFileAPI removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @Tags v16.7.2 -// @ID memfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - return h.handler.DeleteFile(c) -} - -// PatchFile creates a symbolic link to a file in the filesystem -// @Summary Create a link to a file in the memory filesystem -// @Description Create a link to a file in the memory filesystem. The file linked to has to exist. -// @Tags v16.7.2 -// @ID memfs-3-patch -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param url body string true "Path to the file to link to" -// @Success 201 {string} string -// @Failure 400 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [patch] -func (h *MemFSHandler) PatchFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - body, err := io.ReadAll(req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err) - } - - u, err := url.Parse(string(body)) - if err != nil { - return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err) - } - - if err := h.filesystem.Symlink(u.Path, path); err != nil { - return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - return c.String(http.StatusCreated, "") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the memory filesystem -// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @Tags v16.7.2 -// @ID memfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem [get] -func (h *MemFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) - - for i, f := range files { - fileinfos[i] = api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - } - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/diskfs.go b/http/handler/diskfs.go deleted file mode 100644 index 9726c258..00000000 --- a/http/handler/diskfs.go +++ /dev/null @@ -1,88 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists. -// @ID diskfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ := file.Stat() - - if stat.IsDir() { - path = filepath.Join(path, "index.html") - - file.Close() - - file = h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ = file.Stat() - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} diff --git a/http/handler/filesystem.go b/http/handler/filesystem.go new file mode 100644 index 00000000..254ff609 --- /dev/null +++ b/http/handler/filesystem.go @@ -0,0 +1,164 @@ +package handler + +import ( + "net/http" + "path/filepath" + "sort" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/fs" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + fs fs.FS +} + +// NewFS return a new FSHandler type. You have to provide a filesystem to act on. +func NewFS(fs fs.FS) *FSHandler { + return &FSHandler{ + fs: fs, + } +} + +func (h *FSHandler) GetFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + mimeType := c.Response().Header().Get(echo.HeaderContentType) + c.Response().Header().Del(echo.HeaderContentType) + + file := h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ := file.Stat() + + if len(h.fs.DefaultFile) != 0 { + if stat.IsDir() { + path = filepath.Join(path, h.fs.DefaultFile) + + file.Close() + + file = h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ = file.Stat() + } + } + + defer file.Close() + + c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) + + if path, ok := stat.IsLink(); ok { + path = filepath.Clean("/" + path) + + if path[0] == '/' { + path = path[1:] + } + + return c.Redirect(http.StatusMovedPermanently, path) + } + + c.Response().Header().Set(echo.HeaderContentType, mimeType) + + if c.Request().Method == "HEAD" { + return c.Blob(http.StatusOK, "application/data", nil) + } + + return c.Stream(http.StatusOK, "application/data", file) +} + +func (h *FSHandler) PutFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + req := c.Request() + + _, created, err := h.fs.Filesystem.Store(path, req.Body) + if err != nil { + return api.Err(http.StatusBadRequest, "%s", err) + } + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + } + + c.Response().Header().Set("Content-Location", req.URL.RequestURI()) + + if created { + return c.String(http.StatusCreated, "") + } + + return c.NoContent(http.StatusNoContent) +} + +func (h *FSHandler) DeleteFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + size := h.fs.Filesystem.Delete(path) + + if size < 0 { + return api.Err(http.StatusNotFound, "File not found", path) + } + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + } + + return c.String(http.StatusOK, "Deleted: "+path) +} + +func (h *FSHandler) ListFiles(c echo.Context) error { + pattern := util.DefaultQuery(c, "glob", "") + sortby := util.DefaultQuery(c, "sort", "none") + order := util.DefaultQuery(c, "order", "asc") + + files := h.fs.Filesystem.List(pattern) + + var sortFunc func(i, j int) bool + + switch sortby { + case "name": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } + } else { + sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } + } + case "size": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } + } else { + sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } + } + default: + if order == "asc" { + sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } + } else { + sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } + } + } + + sort.Slice(files, sortFunc) + + var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) + + for i, f := range files { + fileinfos[i] = api.FileInfo{ + Name: f.Name(), + Size: f.Size(), + LastMod: f.ModTime().Unix(), + } + } + + return c.JSON(http.StatusOK, fileinfos) +} diff --git a/http/handler/memfs.go b/http/handler/memfs.go deleted file mode 100644 index 1369a6dc..00000000 --- a/http/handler/memfs.go +++ /dev/null @@ -1,130 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @ID memfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /memfs/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - stat, _ := file.Stat() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @ID memfs-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, "") - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @ID memfs-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - return c.String(http.StatusOK, "Deleted: "+path) -} diff --git a/http/server.go b/http/server.go index 34d51579..6fe78975 100644 --- a/http/server.go +++ b/http/server.go @@ -29,19 +29,20 @@ package http import ( + "fmt" "net/http" "strings" cfgstore "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/http/cache" "github.com/datarhei/core/v16/http/errorhandler" + "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/graph/resolver" "github.com/datarhei/core/v16/http/handler" api "github.com/datarhei/core/v16/http/handler/api" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/http/validator" - "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/monitor" "github.com/datarhei/core/v16/net" @@ -79,8 +80,7 @@ type Config struct { Metrics monitor.HistoryReader Prometheus prometheus.Reader MimeTypesFile string - DiskFS fs.Filesystem - MemFS MemFSConfig + Filesystems []fs.FS IPLimiter net.IPLimiter Profiling bool Cors CorsConfig @@ -94,13 +94,6 @@ type Config struct { ReadOnly bool } -type MemFSConfig struct { - EnableAuth bool - Username string - Password string - Filesystem fs.Filesystem -} - type CorsConfig struct { Origins []string } @@ -114,8 +107,6 @@ type server struct { handler struct { about *api.AboutHandler - memfs *handler.MemFSHandler - diskfs *handler.DiskFSHandler prometheus *handler.PrometheusHandler profiling *handler.ProfilingHandler ping *handler.PingHandler @@ -127,8 +118,6 @@ type server struct { log *api.LogHandler restream *api.RestreamHandler playout *api.PlayoutHandler - memfs *api.MemFSHandler - diskfs *api.DiskFSHandler rtmp *api.RTMPHandler srt *api.SRTHandler config *api.ConfigHandler @@ -148,18 +137,12 @@ type server struct { hlsrewrite echo.MiddlewareFunc } - memfs struct { - enableAuth bool - username string - password string - } - - diskfs fs.Filesystem - gzip struct { mimetypes []string } + filesystems map[string]*filesystem + router *echo.Echo mimeTypesFile string profiling bool @@ -167,32 +150,62 @@ type server struct { readOnly bool } +type filesystem struct { + fs.FS + + handler *handler.FSHandler +} + func NewServer(config Config) (Server, error) { s := &server{ logger: config.Logger, mimeTypesFile: config.MimeTypesFile, profiling: config.Profiling, - diskfs: config.DiskFS, readOnly: config.ReadOnly, } - s.v3handler.diskfs = api.NewDiskFS( - config.DiskFS, - config.Cache, - ) + s.filesystems = map[string]*filesystem{} - s.handler.diskfs = handler.NewDiskFS( - config.DiskFS, - config.Cache, - ) + corsPrefixes := map[string][]string{ + "/api": {"*"}, + } - s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ - PathPrefix: config.DiskFS.Base(), - }) + for _, fs := range config.Filesystems { + if _, ok := s.filesystems[fs.Name]; ok { + return nil, fmt.Errorf("the filesystem name '%s' is already in use", fs.Name) + } - s.memfs.enableAuth = config.MemFS.EnableAuth - s.memfs.username = config.MemFS.Username - s.memfs.password = config.MemFS.Password + if !strings.HasPrefix(fs.Mountpoint, "/") { + fs.Mountpoint = "/" + fs.Mountpoint + } + + if !strings.HasSuffix(fs.Mountpoint, "/") { + fs.Mountpoint = strings.TrimSuffix(fs.Mountpoint, "/") + } + + if _, ok := corsPrefixes[fs.Mountpoint]; ok { + return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", fs.Mountpoint, fs.Name) + } + + corsPrefixes[fs.Mountpoint] = config.Cors.Origins + + filesystem := &filesystem{ + FS: fs, + handler: handler.NewFS(fs), + } + + s.filesystems[filesystem.Name] = filesystem + + if fs.Filesystem.Type() == "disk" { + s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ + PathPrefix: fs.Filesystem.Base(), + }) + } + } + + if _, ok := corsPrefixes["/"]; !ok { + return nil, fmt.Errorf("one filesystem must be mounted at /") + } if config.Logger == nil { s.logger = log.New("HTTP") @@ -224,16 +237,6 @@ func NewServer(config Config) (Server, error) { ) } - if config.MemFS.Filesystem != nil { - s.v3handler.memfs = api.NewMemFS( - config.MemFS.Filesystem, - ) - - s.handler.memfs = handler.NewMemFS( - config.MemFS.Filesystem, - ) - } - if config.Prometheus != nil { s.handler.prometheus = handler.NewPrometheus( config.Prometheus.HTTPHandler(), @@ -292,12 +295,6 @@ func NewServer(config Config) (Server, error) { Logger: s.logger, }) - if config.Cache != nil { - s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{ - Cache: config.Cache, - }) - } - s.v3handler.widget = api.NewWidget(api.WidgetConfig{ Restream: config.Restream, Registry: config.Sessions, @@ -308,11 +305,7 @@ func NewServer(config Config) (Server, error) { }) if middleware, err := mwcors.NewWithConfig(mwcors.Config{ - Prefixes: map[string][]string{ - "/": config.Cors.Origins, - "/api": {"*"}, - "/memfs": config.Cors.Origins, - }, + Prefixes: corsPrefixes, }); err != nil { return nil, err } else { @@ -437,65 +430,58 @@ func (s *server) setRoutes() { doc.Use(gzipMiddleware) doc.GET("", echoSwagger.WrapHandler) - // Serve static data - fs := s.router.Group("/*") - fs.Use(mwmime.NewWithConfig(mwmime.Config{ - MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "text/html", - })) - fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), - })) - if s.middleware.cache != nil { - fs.Use(s.middleware.cache) - } - fs.Use(s.middleware.hlsrewrite) - if s.middleware.session != nil { - fs.Use(s.middleware.session) - } + // Mount filesystems + for _, filesystem := range s.filesystems { + // Define a local variable because later in the loop we have a closure + filesystem := filesystem - fs.GET("", s.handler.diskfs.GetFile) - fs.HEAD("", s.handler.diskfs.GetFile) + mountpoint := filesystem.Mountpoint + "/*" + if filesystem.Mountpoint == "/" { + mountpoint = "/*" + } - // Memory FS - if s.handler.memfs != nil { - memfs := s.router.Group("/memfs/*") - memfs.Use(mwmime.NewWithConfig(mwmime.Config{ + fs := s.router.Group(mountpoint) + fs.Use(mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "application/data", + DefaultContentType: filesystem.DefaultContentType, })) - memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), - })) - if s.middleware.session != nil { - memfs.Use(s.middleware.session) - } - memfs.HEAD("", s.handler.memfs.GetFile) - memfs.GET("", s.handler.memfs.GetFile) - - var authmw echo.MiddlewareFunc - - if s.memfs.enableAuth { - authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { - if username == s.memfs.username && password == s.memfs.password { - return true, nil - } + if filesystem.Gzip { + fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ + Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), + Level: mwgzip.BestSpeed, + MinLength: 1000, + })) + } - return false, nil + if filesystem.Cache != nil { + mwcache := mwcache.NewWithConfig(mwcache.Config{ + Cache: filesystem.Cache, }) + fs.Use(mwcache) + } - memfs.POST("", s.handler.memfs.PutFile, authmw) - memfs.PUT("", s.handler.memfs.PutFile, authmw) - memfs.DELETE("", s.handler.memfs.DeleteFile, authmw) - } else { - memfs.POST("", s.handler.memfs.PutFile) - memfs.PUT("", s.handler.memfs.PutFile) - memfs.DELETE("", s.handler.memfs.DeleteFile) + fs.GET("", filesystem.handler.GetFile) + fs.HEAD("", filesystem.handler.GetFile) + + if filesystem.AllowWrite { + if filesystem.EnableAuth { + authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { + if username == filesystem.Username && password == filesystem.Password { + return true, nil + } + + return false, nil + }) + + fs.POST("", filesystem.handler.PutFile, authmw) + fs.PUT("", filesystem.handler.PutFile, authmw) + fs.DELETE("", filesystem.handler.DeleteFile, authmw) + } else { + fs.POST("", filesystem.handler.PutFile) + fs.PUT("", filesystem.handler.PutFile) + fs.DELETE("", filesystem.handler.DeleteFile) + } } } @@ -593,32 +579,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) { } } - // v3 Memory FS - if s.v3handler.memfs != nil { - v3.GET("/fs/mem", s.v3handler.memfs.ListFiles) - v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile) - - if !s.readOnly { - v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile) - v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile) - v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile) + // v3 Filesystems + fshandlers := map[string]api.FSConfig{} + for _, fs := range s.filesystems { + fshandlers[fs.Name] = api.FSConfig{ + Type: fs.Filesystem.Type(), + Mountpoint: fs.Mountpoint, + Handler: fs.handler, } } - // v3 Disk FS - v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles) - v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + handler := api.NewFS(fshandlers) + + v3.GET("/fs", handler.List) + + v3.GET("/fs/:name", handler.ListFiles) + v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) - v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) if !s.readOnly { - v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile) - v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile) + v3.PUT("/fs/:name/*", handler.PutFile) + v3.DELETE("/fs/:name/*", handler.DeleteFile) } // v3 RTMP diff --git a/io/fs/disk.go b/io/fs/disk.go index bf9e1843..c08cb367 100644 --- a/io/fs/disk.go +++ b/io/fs/disk.go @@ -15,6 +15,9 @@ import ( // DiskConfig is the config required to create a new disk // filesystem. type DiskConfig struct { + // Namee is the name of the filesystem + Name string + // Dir is the path to the directory to observe Dir string @@ -109,7 +112,8 @@ func (f *diskFile) Read(p []byte) (int, error) { // diskFilesystem implements the Filesystem interface type diskFilesystem struct { - dir string + name string + dir string // Max. size of the filesystem in bytes as // given by the config @@ -127,14 +131,20 @@ type diskFilesystem struct { // that implements the Filesystem interface func NewDiskFilesystem(config DiskConfig) (Filesystem, error) { fs := &diskFilesystem{ + name: config.Name, maxSize: config.Size, logger: config.Logger, } if fs.logger == nil { - fs.logger = log.New("DiskFS") + fs.logger = log.New("") } + fs.logger = fs.logger.WithFields(log.Fields{ + "name": fs.name, + "type": "disk", + }) + if err := fs.Rebase(config.Dir); err != nil { return nil, err } @@ -142,6 +152,10 @@ func NewDiskFilesystem(config DiskConfig) (Filesystem, error) { return fs, nil } +func (fs *diskFilesystem) Name() string { + return fs.name +} + func (fs *diskFilesystem) Base() string { return fs.dir } @@ -172,6 +186,10 @@ func (fs *diskFilesystem) Rebase(base string) error { return nil } +func (fs *diskFilesystem) Type() string { + return "diskfs" +} + func (fs *diskFilesystem) Size() (int64, int64) { // This is to cache the size for some time in order not to // stress the underlying filesystem too much. diff --git a/io/fs/dummy.go b/io/fs/dummy.go index 442d1586..b8e36ad1 100644 --- a/io/fs/dummy.go +++ b/io/fs/dummy.go @@ -20,10 +20,15 @@ func (d *dummyFile) Close() error { return nil } func (d *dummyFile) Name() string { return "" } func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil } -type dummyFilesystem struct{} +type dummyFilesystem struct { + name string + typ string +} +func (d *dummyFilesystem) Name() string { return d.name } func (d *dummyFilesystem) Base() string { return "/" } func (d *dummyFilesystem) Rebase(string) error { return nil } +func (d *dummyFilesystem) Type() string { return d.typ } func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 } func (d *dummyFilesystem) Resize(int64) {} func (d *dummyFilesystem) Files() int64 { return 0 } @@ -35,6 +40,9 @@ func (d *dummyFilesystem) DeleteAll() int64 { return func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} } // NewDummyFilesystem return a dummy filesystem -func NewDummyFilesystem() Filesystem { - return &dummyFilesystem{} +func NewDummyFilesystem(name, typ string) Filesystem { + return &dummyFilesystem{ + name: name, + typ: typ, + } } diff --git a/io/fs/fs.go b/io/fs/fs.go index d1923c47..8db65b46 100644 --- a/io/fs/fs.go +++ b/io/fs/fs.go @@ -38,12 +38,18 @@ type File interface { // Filesystem is an interface that provides access to a filesystem. type Filesystem interface { + // Name returns the name of this filesystem + Name() string + // Base returns the base path of this filesystem Base() string // Rebase sets a new base path for this filesystem Rebase(string) error + // Type returns the type of this filesystem + Type() string + // Size returns the consumed size and capacity of the filesystem in bytes. The // capacity is negative if the filesystem can consume as much space as it can. Size() (int64, int64) @@ -67,7 +73,7 @@ type Filesystem interface { Store(path string, r io.Reader) (int64, bool, error) // Delete removes a file at the given path from the filesystem. Returns the size of - // the remove file in bytes. The size is negative if the file doesn't exist. + // the removed file in bytes. The size is negative if the file doesn't exist. Delete(path string) int64 // DeleteAll removes all files from the filesystem. Returns the size of the diff --git a/io/fs/mem.go b/io/fs/mem.go index d682d0a3..1b8ca87e 100644 --- a/io/fs/mem.go +++ b/io/fs/mem.go @@ -15,6 +15,9 @@ import ( // MemConfig is the config that is required for creating // a new memory filesystem. type MemConfig struct { + // Namee is the name of the filesystem + Name string + // Base is the base path to be reported for this filesystem Base string @@ -107,6 +110,7 @@ func (f *memFile) Close() error { } type memFilesystem struct { + name string base string // Mapping of path to file @@ -136,6 +140,7 @@ type memFilesystem struct { // the Filesystem interface. func NewMemFilesystem(config MemConfig) Filesystem { fs := &memFilesystem{ + name: config.Name, base: config.Base, maxSize: config.Size, purge: config.Purge, @@ -143,9 +148,11 @@ func NewMemFilesystem(config MemConfig) Filesystem { } if fs.logger == nil { - fs.logger = log.New("MemFS") + fs.logger = log.New("") } + fs.logger = fs.logger.WithField("type", "mem") + fs.files = make(map[string]*memFile) fs.dataPool = sync.Pool{ @@ -155,6 +162,7 @@ func NewMemFilesystem(config MemConfig) Filesystem { } fs.logger.WithFields(log.Fields{ + "name": fs.name, "size_bytes": fs.maxSize, "purge": fs.purge, }).Debug().Log("Created") @@ -162,6 +170,10 @@ func NewMemFilesystem(config MemConfig) Filesystem { return fs } +func (fs *memFilesystem) Name() string { + return fs.name +} + func (fs *memFilesystem) Base() string { return fs.base } @@ -172,6 +184,10 @@ func (fs *memFilesystem) Rebase(base string) error { return nil } +func (fs *memFilesystem) Type() string { + return "memfs" +} + func (fs *memFilesystem) Size() (int64, int64) { fs.filesLock.RLock() defer fs.filesLock.RUnlock() diff --git a/io/fs/s3.go b/io/fs/s3.go new file mode 100644 index 00000000..dff8c738 --- /dev/null +++ b/io/fs/s3.go @@ -0,0 +1,389 @@ +package fs + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/log" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +type S3Config struct { + // Namee is the name of the filesystem + Name string + Base string + Endpoint string + AccessKeyID string + SecretAccessKey string + Region string + Bucket string + UseSSL bool + + Logger log.Logger +} + +type s3fs struct { + name string + base string + + endpoint string + accessKeyID string + secretAccessKey string + region string + bucket string + useSSL bool + + client *minio.Client + + logger log.Logger +} + +func NewS3Filesystem(config S3Config) (Filesystem, error) { + fs := &s3fs{ + name: config.Name, + base: config.Base, + endpoint: config.Endpoint, + accessKeyID: config.AccessKeyID, + secretAccessKey: config.SecretAccessKey, + region: config.Region, + bucket: config.Bucket, + useSSL: config.UseSSL, + logger: config.Logger, + } + + if fs.logger == nil { + fs.logger = log.New("") + } + + client, err := minio.New(fs.endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""), + Region: fs.region, + Secure: fs.useSSL, + }) + + if err != nil { + return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err) + } + + fs.logger = fs.logger.WithFields(log.Fields{ + "name": fs.name, + "type": "s3", + "bucket": fs.bucket, + "region": fs.region, + "endpoint": fs.endpoint, + }) + + fs.logger.Debug().Log("Connected") + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) + defer cancel() + + exists, err := client.BucketExists(ctx, fs.bucket) + if err != nil { + fs.logger.WithError(err).Log("Can't access bucket") + return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err) + } + + if exists { + fs.logger.Debug().Log("Bucket already exists") + } else { + fs.logger.Debug().Log("Bucket doesn't exists") + err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region}) + if err != nil { + fs.logger.WithError(err).Log("Can't create bucket") + return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err) + } else { + fs.logger.Debug().Log("Bucket created") + } + } + + fs.client = client + + return fs, nil +} + +func (fs *s3fs) Name() string { + return fs.name +} + +func (fs *s3fs) Base() string { + return fs.base +} + +func (fs *s3fs) Rebase(base string) error { + fs.base = base + + return nil +} + +func (fs *s3fs) Type() string { + return "s3fs" +} + +func (fs *s3fs) Size() (int64, int64) { + size := int64(0) + + files := fs.List("") + + for _, file := range files { + size += file.Size() + } + + return size, -1 +} + +func (fs *s3fs) Resize(size int64) {} + +func (fs *s3fs) Files() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + Recursive: true, + }) + + nfiles := int64(0) + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + } + nfiles++ + } + + return nfiles +} + +func (fs *s3fs) Symlink(oldname, newname string) error { + return fmt.Errorf("not implemented") +} + +func (fs *s3fs) Open(path string) File { + //ctx, cancel := context.WithCancel(context.Background()) + //defer cancel() + ctx := context.Background() + + object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return nil + } + + stat, err := object.Stat() + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Stat failed") + return nil + } + + file := &s3File{ + data: object, + name: stat.Key, + size: stat.Size, + lastModified: stat.LastModified, + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Opened") + + return file +} + +func (fs *s3fs) Store(path string, r io.Reader) (int64, bool, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + overwrite := false + + _, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err == nil { + overwrite = true + } + + info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{ + UserMetadata: map[string]string{}, + UserTags: map[string]string{}, + Progress: nil, + ContentType: "", + ContentEncoding: "", + ContentDisposition: "", + ContentLanguage: "", + CacheControl: "", + Mode: "", + RetainUntilDate: time.Time{}, + ServerSideEncryption: nil, + NumThreads: 0, + StorageClass: "", + WebsiteRedirectLocation: "", + PartSize: 0, + LegalHold: "", + SendContentMd5: false, + DisableContentSha256: false, + DisableMultipart: false, + Internal: minio.AdvancedPutOptions{}, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", path).Log("Failed to store file") + return -1, false, err + } + + fs.logger.Debug().WithFields(log.Fields{ + "key": path, + "overwrite": overwrite, + }).Log("Stored") + + return info.Size, overwrite, nil +} + +func (fs *s3fs) Delete(path string) int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return -1 + } + + err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{ + GovernanceBypass: true, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file") + return -1 + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Deleted") + + return stat.Size +} + +func (fs *s3fs) DeleteAll() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + totalSize := int64(0) + + objectsCh := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + + for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + Recursive: true, + }) { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + totalSize += object.Size + objectsCh <- object + } + }() + + for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{ + GovernanceBypass: true, + }) { + fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed") + } + + fs.logger.Debug().Log("Deleted all files") + + return totalSize +} + +func (fs *s3fs) List(pattern string) []FileInfo { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: "", + Recursive: true, + MaxKeys: 0, + StartAfter: "", + UseV1: false, + }) + + files := []FileInfo{} + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + + if len(pattern) != 0 { + if ok, _ := glob.Match(pattern, object.Key, '/'); !ok { + continue + } + } + + f := &s3FileInfo{ + name: object.Key, + size: object.Size, + lastModified: object.LastModified, + } + + files = append(files, f) + } + + return files +} + +type s3FileInfo struct { + name string + size int64 + lastModified time.Time +} + +func (f *s3FileInfo) Name() string { + return f.name +} + +func (f *s3FileInfo) Size() int64 { + return f.size +} + +func (f *s3FileInfo) ModTime() time.Time { + return f.lastModified +} + +func (f *s3FileInfo) IsLink() (string, bool) { + return "", false +} + +func (f *s3FileInfo) IsDir() bool { + return false +} + +type s3File struct { + data io.ReadCloser + name string + size int64 + lastModified time.Time +} + +func (f *s3File) Read(p []byte) (int, error) { + return f.data.Read(p) +} + +func (f *s3File) Close() error { + return f.data.Close() +} + +func (f *s3File) Name() string { + return f.name +} + +func (f *s3File) Stat() (FileInfo, error) { + return &s3FileInfo{ + name: f.name, + size: f.size, + lastModified: f.lastModified, + }, nil +} diff --git a/restream/fs/fs.go b/restream/fs/fs.go index 29216aa9..4cc82081 100644 --- a/restream/fs/fs.go +++ b/restream/fs/fs.go @@ -62,6 +62,11 @@ func New(config Config) Filesystem { rfs.logger = log.New("") } + rfs.logger = rfs.logger.WithFields(log.Fields{ + "name": config.FS.Name(), + "type": config.FS.Type(), + }) + rfs.cleanupPatterns = make(map[string][]Pattern) // already drain the stop diff --git a/restream/restream.go b/restream/restream.go index f654cbf5..74ea2d41 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -62,8 +62,7 @@ type Config struct { ID string Name string Store store.Store - DiskFS fs.Filesystem - MemFS fs.Filesystem + Filesystems []fs.Filesystem Replace replace.Replacer FFmpeg ffmpeg.FFmpeg MaxProcesses int64 @@ -94,8 +93,8 @@ type restream struct { maxProc int64 nProc int64 fs struct { - diskfs rfs.Filesystem - memfs rfs.Filesystem + list []rfs.Filesystem + diskfs []rfs.Filesystem stopObserver context.CancelFunc } replace replace.Replacer @@ -128,26 +127,18 @@ func New(config Config) (Restreamer, error) { r.store = store.NewDummyStore(store.DummyConfig{}) } - if config.DiskFS != nil { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: config.DiskFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"), + for _, fs := range config.Filesystems { + fs := rfs.New(rfs.Config{ + FS: fs, + Logger: r.logger.WithComponent("Cleanup"), }) - } else { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), - }) - } - if config.MemFS != nil { - r.fs.memfs = rfs.New(rfs.Config{ - FS: config.MemFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"), - }) - } else { - r.fs.memfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), - }) + r.fs.list = append(r.fs.list, fs) + + // Add the diskfs filesystems also to a separate array. We need it later for input and output validation + if fs.Type() == "diskfs" { + r.fs.diskfs = append(r.fs.diskfs, fs) + } } if r.replace == nil { @@ -186,12 +177,16 @@ func (r *restream) Start() { r.setCleanup(id, t.config) } - r.fs.diskfs.Start() - r.fs.memfs.Start() - ctx, cancel := context.WithCancel(context.Background()) r.fs.stopObserver = cancel - go r.observe(ctx, 10*time.Second) + + for _, fs := range r.fs.list { + fs.Start() + + if fs.Type() == "diskfs" { + go r.observe(ctx, fs, 10*time.Second) + } + } r.stopOnce = sync.Once{} }) @@ -215,14 +210,16 @@ func (r *restream) Stop() { r.fs.stopObserver() - r.fs.diskfs.Stop() - r.fs.memfs.Stop() + // Stop the cleanup jobs + for _, fs := range r.fs.list { + fs.Stop() + } r.startOnce = sync.Once{} }) } -func (r *restream) observe(ctx context.Context, interval time.Duration) { +func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() @@ -231,14 +228,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { case <-ctx.Done(): return case <-ticker.C: - size, limit := r.fs.diskfs.Size() + size, limit := fs.Size() isFull := false if limit > 0 && size >= limit { isFull = true } if isFull { - // Stop all tasks that write to disk + // Stop all tasks that write to this filesystem r.lock.Lock() for id, t := range r.tasks { if !t.valid { @@ -253,7 +250,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { continue } - r.logger.Warn().Log("Shutting down because disk is full") + r.logger.Warn().Log("Shutting down because filesystem is full") r.stopProcess(id) } r.lock.Unlock() @@ -503,34 +500,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) { } func (r *restream) setCleanup(id string, config *app.Config) { + rePrefix := regexp.MustCompile(`^([a-z]+):`) + for _, output := range config.Output { for _, c := range output.Cleanup { - if strings.HasPrefix(c.Pattern, "memfs:") { - r.fs.memfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "memfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, - }) - } else if strings.HasPrefix(c.Pattern, "diskfs:") { - r.fs.diskfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, + matches := rePrefix.FindStringSubmatch(c.Pattern) + if matches == nil { + continue + } + + name := matches[1] + + // Support legacy names + if name == "diskfs" { + name = "disk" + } else if name == "memfs" { + name = "mem" + } + + for _, fs := range r.fs.list { + if fs.Name() != name { + continue + } + + pattern := rfs.Pattern{ + Pattern: rePrefix.ReplaceAllString(c.Pattern, ""), + MaxFiles: c.MaxFiles, + MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, + PurgeOnDelete: c.PurgeOnDelete, + } + + fs.SetCleanup(id, []rfs.Pattern{ + pattern, }) + + break } } } } func (r *restream) unsetCleanup(id string) { - r.fs.diskfs.UnsetCleanup(id) - r.fs.memfs.UnsetCleanup(id) + for _, fs := range r.fs.list { + fs.UnsetCleanup(id) + } } func (r *restream) setPlayoutPorts(t *task) error { @@ -619,9 +632,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID) } - io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + io.Address, err = r.validateInputAddress(io.Address, fs.Base()) + if err != nil { + maxFails++ + } + } + + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } + } else { + io.Address, err = r.validateInputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } } } @@ -651,15 +678,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID) } - isFile := false + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Base()) + if err != nil { + maxFails++ + } - io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) - } + if isFile { + hasFiles = true + } + } - if isFile { - hasFiles = true + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + } else { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + + if isFile { + hasFiles = true + } } } diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 00000000..ba95cdd1 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go: + - 1.3.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 00000000..8d9a94a9 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 00000000..91b4ae56 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 00000000..f49dc337 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 00000000..1a2bf617 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,173 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 00000000..0b498f48 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 00000000..520ae3e5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 00000000..620690de --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,40 @@ +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 00000000..1c62b640 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,46 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 00000000..a2c2da31 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 00000000..dec61865 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < -math.MaxFloat64 { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 00000000..43d88a86 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 00000000..ae659e0e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,123 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 00000000..dd3fbf5e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000..955dc0be --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000..15556530 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000..449e67cd --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..c8a9fbb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..313a0f88 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..c589addf --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,85 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000..92d2cc4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000..f6b8aeab --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000..9d1e901a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000..1f12f661 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000..2adcdc3b --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000..3095662b --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000..29b31cf7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000..204fe0e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000..8a3d8b6f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,342 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000..d786a89f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000..58ee89c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000..e91eefb1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000..9303de41 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000..6cf66d04 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000..c2934f91 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000..e2389b56 --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000..39acb320 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ == nil || typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000..13a0b7b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000..8b6bc8b4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000..74a97bfe --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000..98d45c1e --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000..eba434f2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,76 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000..58296713 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000..3e21f375 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000..f88722d1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000..fa71f474 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000..9441d79d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000..92ae912d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1097 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000..152e3ef5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000..23d8a3ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000..826aa594 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000..d1059ee4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000..54c2ba0b --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML