Update dependencies and replace uuid library (#100)

This commit is contained in:
Joona Hoikkala 2018-08-10 16:51:32 +03:00 committed by GitHub
parent 8aa869b2f8
commit 75d4a30c1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1771 changed files with 103836 additions and 39300 deletions

228
Gopkg.lock generated
View File

@ -2,224 +2,358 @@
[[projects]] [[projects]]
digest = "1:289dd4d7abfb3ad2b5f728fbe9b1d5c1bf7d265a3eb9ef92869af1f7baba4c7a"
name = "github.com/BurntSushi/toml" name = "github.com/BurntSushi/toml"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "b26d9c308763d68093482582cea63d69be07a0f0" revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0" version = "v0.3.0"
[[projects]] [[projects]]
digest = "1:9ceecb4271682fd824475d451b6abf02b99ad04e72f8de3408a9d8b7fd15b933"
name = "github.com/ajg/form" name = "github.com/ajg/form"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "cc2954064ec9ea8d93917f0f87456e11d7b881ad" revision = "cc2954064ec9ea8d93917f0f87456e11d7b881ad"
version = "v1.5" version = "v1.5"
[[projects]] [[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
packages = ["spew"] packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76" revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0" version = "v1.1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:358f9731027e87f8ae3a7e52f0473031be47d1e041295b3c0afc84dc779c0666"
name = "github.com/erikstmartin/go-testdb" name = "github.com/erikstmartin/go-testdb"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "8d10e4a1bae52cd8b81ffdec3445890d6dccab3d" revision = "8d10e4a1bae52cd8b81ffdec3445890d6dccab3d"
[[projects]] [[projects]]
digest = "1:55848e643a99a9dfceb19e090ce67111328fbb1780f34c62a0430994ff85fb90"
name = "github.com/fatih/structs" name = "github.com/fatih/structs"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "a720dfa8df582c51dee1b36feabb906bde1588bd" revision = "a720dfa8df582c51dee1b36feabb906bde1588bd"
version = "v1.0" version = "v1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:7a98bc5cfa8e0c82156a33edb7ed582a9e700af089994e1b89bdb8517cd2db58"
name = "github.com/gavv/httpexpect" name = "github.com/gavv/httpexpect"
packages = ["."] packages = ["."]
revision = "c44a6d7bb636b17e880a53998a7f7061a56ffacb" pruneopts = ""
revision = "bdde308713130a703436e014ff782958c251d20a"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:44e52e928b91a686e2b9518f7eea82ba729d227d94b69574416bbe2d9d418b33"
name = "github.com/gavv/monotime" name = "github.com/gavv/monotime"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "6f8212e8d10df7383609d3c377ca08884d8f3ec0" revision = "6f8212e8d10df7383609d3c377ca08884d8f3ec0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:9abc49f39e3e23e262594bb4fb70abf74c0c99e94f99153f43b143805e850719"
name = "github.com/google/go-querystring" name = "github.com/google/go-querystring"
packages = ["query"] packages = ["query"]
pruneopts = ""
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]] [[projects]]
digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = ""
revision = "064e2069ce9c359c118179501254f67d7d37ba24"
version = "0.2"
[[projects]]
digest = "1:788735d9cba7f51b7cc86e6c7ba7b40b0b7c7b374bf3a0b5fa7c929fa2af2da8"
name = "github.com/imkira/go-interpol" name = "github.com/imkira/go-interpol"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "5accad8134979a6ac504d456a6c7f1c53da237ca" revision = "5accad8134979a6ac504d456a6c7f1c53da237ca"
version = "v1.1.0" version = "v1.1.0"
[[projects]] [[projects]]
digest = "1:3c818dada3e41bdb0f509f78e6775610f1bb179449ec8c4c86a45fae35460f3f"
name = "github.com/julienschmidt/httprouter" name = "github.com/julienschmidt/httprouter"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669" revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
version = "v1.1" version = "v1.1"
[[projects]] [[projects]]
digest = "1:4d0614a5d2e5e394368521b087428b2996ae95ebc4699afabc61adac9b7cec38"
name = "github.com/klauspost/compress" name = "github.com/klauspost/compress"
packages = ["flate","gzip","zlib"] packages = [
revision = "6c8db69c4b49dd4df1fff66996cf556176d0b9bf" "flate",
version = "v1.2.1" "gzip",
"zlib",
]
pruneopts = ""
revision = "b939724e787a27c0005cabe3f78e7ed7987ac74f"
version = "v1.4.0"
[[projects]] [[projects]]
digest = "1:f0117357f14b0a625ddbbe25e23637291ac0276402fcfd25fc447422456364ce"
name = "github.com/klauspost/cpuid" name = "github.com/klauspost/cpuid"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da" revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da"
version = "v1.1" version = "v1.1"
[[projects]]
name = "github.com/klauspost/crc32"
packages = ["."]
revision = "cb6bfca970f6908083f26f39a79009d608efd5cd"
version = "v1.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:09792d732b079867772cdbabdf7dc54ef9f9d04c998a9ce6226657151fccbb94"
name = "github.com/lib/pq" name = "github.com/lib/pq"
packages = [".","oid"] packages = [
revision = "27ea5d92de30060e7121ddd543fe14e9a327e0cc" ".",
"oid",
]
pruneopts = ""
revision = "90697d60dd844d5ef6ff15135d0203f65d2f53b8"
[[projects]] [[projects]]
digest = "1:bc03901fc8f0965ccba8bc453eae21a9b04f95999eab664c7de6dc7290f4e8f4"
name = "github.com/mattn/go-sqlite3" name = "github.com/mattn/go-sqlite3"
packages = ["."] packages = ["."]
revision = "6c771bb9887719704b210e87e934f08be014bdb1" pruneopts = ""
version = "v1.6.0" revision = "25ecb14adfc7543176f7d85291ec7dba82c6f7e4"
version = "v1.9.0"
[[projects]] [[projects]]
digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890"
name = "github.com/miekg/dns" name = "github.com/miekg/dns"
packages = ["."] packages = ["."]
revision = "5ec25f2a5044291b6c8abf43ed8a201da241e69e" pruneopts = ""
version = "v1.0.3" revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
version = "v1.0.8"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:fe67641b990bdc1802f8a1e462a4924210a8762a8a17b72e09656049c906b871"
name = "github.com/moul/http2curl" name = "github.com/moul/http2curl"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d" revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d"
[[projects]] [[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
name = "github.com/pmezard/go-difflib" name = "github.com/pmezard/go-difflib"
packages = ["difflib"] packages = ["difflib"]
pruneopts = ""
revision = "792786c7400a136282c1664665ae0a8db921c6c2" revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0" version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:78c9cf43ddeacd0e472f412082227a0fac2ae107ee60e9112156f9371f9912cf"
name = "github.com/rs/cors" name = "github.com/rs/cors"
packages = ["."] packages = ["."]
revision = "7af7a1e09ba336d2ea14b1ce73bf693c6837dbf6" pruneopts = ""
version = "v1.2" revision = "3fb1b69b103a84de38a19c3c6ec073dd6caa4d3f"
version = "v1.5.0"
[[projects]] [[projects]]
name = "github.com/satori/go.uuid" digest = "1:3962f553b77bf6c03fc07cd687a22dd3b00fe11aa14d31194f5505f5bb65cdc8"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/sergi/go-diff" name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"] packages = ["diffmatchpatch"]
pruneopts = ""
revision = "1744e2970ca51c86172c8190fadad617561ed6e7" revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:3fcbf733a8d810a21265a7f2fe08a3353db2407da052b233f8b204b5afc03d9b"
name = "github.com/sirupsen/logrus" name = "github.com/sirupsen/logrus"
packages = [".","hooks/test"] packages = [
revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba" ".",
version = "v1.0.4" "hooks/test",
]
pruneopts = ""
revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
version = "v1.0.6"
[[projects]] [[projects]]
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
packages = ["assert","require"] packages = [
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" "assert",
version = "v1.2.0" "require",
]
pruneopts = ""
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:857a9ecd5cb13379ecc8f798f6e6b6b574c98b9355657d91e068275f1120aaf7"
name = "github.com/valyala/bytebufferpool" name = "github.com/valyala/bytebufferpool"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:ed95b0b73de30dc5507de18b0cd6a2b79dc0ef685eefeb27fd1386f7a4e04f2b"
name = "github.com/valyala/fasthttp" name = "github.com/valyala/fasthttp"
packages = [".","fasthttputil","stackless"] packages = [
".",
"fasthttputil",
"stackless",
]
pruneopts = ""
revision = "e5f51c11919d4f66400334047b897ef0a94c6f3c" revision = "e5f51c11919d4f66400334047b897ef0a94c6f3c"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:e6338f2518362ff701a556bd76afd90a2168d1c658ec5d1ea1e9c5ef30a7d157"
name = "github.com/xeipuuv/gojsonpointer" name = "github.com/xeipuuv/gojsonpointer"
packages = ["."] packages = ["."]
revision = "6fe8760cad3569743d51ddbb243b26f8456742dc" pruneopts = ""
revision = "4e3ac2762d5f479393488629ee9370b50873b3a6"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:604f98a38394d2805a78c462396a4992b93fdd5b7306130add330f1a99ac6b0a"
name = "github.com/xeipuuv/gojsonreference" name = "github.com/xeipuuv/gojsonreference"
packages = ["."] packages = ["."]
revision = "e02fc20de94c78484cd5ffb007f8af96be030a45" pruneopts = ""
revision = "bd5ef7bd5415a7ac448318e64f11a24cd21e594b"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:0dd2250939dcf18c0b1a7b0e364e13b083331f723a6853ff2950a0c4273ff6e2"
name = "github.com/xeipuuv/gojsonschema" name = "github.com/xeipuuv/gojsonschema"
packages = ["."] packages = ["."]
revision = "511d08a359d14c0dd9c4302af52ee9abb6f93c2a" pruneopts = ""
revision = "6cd6dcbc9e7514bea255a9241665a6c2d0b37fb4"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:81daf39130b8efb47ab2b841ee42adedeaaf20ab3675236f577722ae78d37728"
name = "github.com/yalp/jsonpath" name = "github.com/yalp/jsonpath"
packages = ["."] packages = ["."]
revision = "31a79c7593bb93eb10b163650d4a3e6ca190e4dc" pruneopts = ""
revision = "5cc68e5049a040829faef3a44c00ec4332f6dec7"
[[projects]] [[projects]]
digest = "1:529ed3f98838f69e13761788d0cc71b44e130058fab13bae2ce09f7a176bced4"
name = "github.com/yudai/gojsondiff" name = "github.com/yudai/gojsondiff"
packages = [".","formatter"] packages = [
".",
"formatter",
]
pruneopts = ""
revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6" revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6"
version = "1.0.0" version = "1.0.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:9857bb2293f372b2181004d8b62179bbdb4ab0982ec6f762abe6cf2bfedaff85"
name = "github.com/yudai/golcs" name = "github.com/yudai/golcs"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68" revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:4cae11053a5fc8e7b08228fcc14d161d3e60b64ba508a8b216937da472690991"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["acme","acme/autocert","bcrypt","blowfish","ed25519","ed25519/internal/edwards25519","ssh/terminal"] packages = [
revision = "a6600008915114d9c087fad9f03d75087b1a74df" "acme",
"acme/autocert",
"bcrypt",
"blowfish",
"ed25519",
"ed25519/internal/edwards25519",
"ssh/terminal",
]
pruneopts = ""
revision = "de0752318171da717af4ce24d0a2e8626afaeb11"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:67c2d940f2d5c017ef88e9847709dca9b38d5fe82f1e33fb42ace515219f22f1"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["bpf","idna","internal/iana","internal/socket","ipv4","ipv6","publicsuffix"] packages = [
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec" "bpf",
"idna",
"internal/iana",
"internal/socket",
"ipv4",
"ipv6",
"publicsuffix",
]
pruneopts = ""
revision = "f9ce57c11b242f0f1599cf25c89d8cb02c45295a"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:8812fbc18f45708b5580ed61267fefe5eeb29b36773bdfaad48b0843e3810c02"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix","windows"] packages = [
revision = "af50095a40f9041b3b38960738837185c26e9419" "unix",
"windows",
]
pruneopts = ""
revision = "f0d5e33068cb57c22a181f5df0ffda885309eb5a"
[[projects]] [[projects]]
branch = "master" digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] packages = [
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" "collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = ""
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]] [[projects]]
digest = "1:4014584c076f25aaf35d9de36c79ae2d208bba32c780f405e3395dad79292e22"
name = "gopkg.in/DATA-DOG/go-sqlmock.v1" name = "gopkg.in/DATA-DOG/go-sqlmock.v1"
packages = ["."] packages = ["."]
pruneopts = ""
revision = "d76b18b42f285b792bf985118980ce9eacea9d10" revision = "d76b18b42f285b792bf985118980ce9eacea9d10"
version = "v1.3.0" version = "v1.3.0"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "991ecc43a6e9bcfe3c39169d7509ee821076b0b75bbf7cb38ec06db3041cd009" input-imports = [
"github.com/BurntSushi/toml",
"github.com/erikstmartin/go-testdb",
"github.com/gavv/httpexpect",
"github.com/google/uuid",
"github.com/julienschmidt/httprouter",
"github.com/lib/pq",
"github.com/mattn/go-sqlite3",
"github.com/miekg/dns",
"github.com/rs/cors",
"github.com/sirupsen/logrus",
"github.com/sirupsen/logrus/hooks/test",
"github.com/valyala/fasthttp",
"golang.org/x/crypto/acme/autocert",
"golang.org/x/crypto/bcrypt",
"gopkg.in/DATA-DOG/go-sqlmock.v1",
]
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -59,10 +59,6 @@ required = ["github.com/valyala/fasthttp"]
name = "github.com/rs/cors" name = "github.com/rs/cors"
version = "1.2.0" version = "1.2.0"
[[constraint]]
name = "github.com/satori/go.uuid"
version = "1.2.0"
[[constraint]] [[constraint]]
name = "github.com/sirupsen/logrus" name = "github.com/sirupsen/logrus"
version = "1.0.4" version = "1.0.4"

View File

@ -4,7 +4,7 @@ import (
"encoding/json" "encoding/json"
"net" "net"
"github.com/satori/go.uuid" "github.com/google/uuid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -76,8 +76,8 @@ func (a ACMETxt) allowedFromList(ips []string) bool {
func newACMETxt() ACMETxt { func newACMETxt() ACMETxt {
var a = ACMETxt{} var a = ACMETxt{}
password := generatePassword(40) password := generatePassword(40)
a.Username = uuid.NewV4() a.Username = uuid.New()
a.Password = password a.Password = password
a.Subdomain = uuid.NewV4().String() a.Subdomain = uuid.New().String()
return a return a
} }

View File

@ -9,9 +9,9 @@ import (
"testing" "testing"
"github.com/gavv/httpexpect" "github.com/gavv/httpexpect"
"github.com/google/uuid"
"github.com/julienschmidt/httprouter" "github.com/julienschmidt/httprouter"
"github.com/rs/cors" "github.com/rs/cors"
"github.com/satori/go.uuid"
"gopkg.in/DATA-DOG/go-sqlmock.v1" "gopkg.in/DATA-DOG/go-sqlmock.v1"
) )
@ -25,7 +25,7 @@ func noAuth(update httprouter.Handle) httprouter.Handle {
dec := json.NewDecoder(r.Body) dec := json.NewDecoder(r.Body)
_ = dec.Decode(&postData) _ = dec.Decode(&postData)
// Set user info to the decoded ACMETxt object // Set user info to the decoded ACMETxt object
postData.Username, _ = uuid.FromString(uname) postData.Username, _ = uuid.Parse(uname)
postData.Password = passwd postData.Password = passwd
// Set the ACMETxt struct to context to pull in from update function // Set the ACMETxt struct to context to pull in from update function
ctx := r.Context() ctx := r.Context()

2
db.go
View File

@ -9,9 +9,9 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/google/uuid"
_ "github.com/lib/pq" _ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
"github.com/satori/go.uuid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )

View File

@ -2,9 +2,10 @@ package main
import ( import (
"database/sql" "database/sql"
"github.com/miekg/dns"
"github.com/satori/go.uuid"
"sync" "sync"
"github.com/google/uuid"
"github.com/miekg/dns"
) )
// Config is global configuration struct // Config is global configuration struct

View File

@ -3,12 +3,12 @@ package main
import ( import (
"unicode/utf8" "unicode/utf8"
"github.com/satori/go.uuid" "github.com/google/uuid"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
func getValidUsername(u string) (uuid.UUID, error) { func getValidUsername(u string) (uuid.UUID, error) {
uname, err := uuid.FromString(u) uname, err := uuid.Parse(u)
if err != nil { if err != nil {
return uuid.UUID{}, err return uuid.UUID{}, err
} }
@ -25,7 +25,7 @@ func validKey(k string) bool {
} }
func validSubdomain(s string) bool { func validSubdomain(s string) bool {
_, err := uuid.FromString(s) _, err := uuid.Parse(s)
if err == nil { if err == nil {
return true return true
} }

View File

@ -1,12 +1,13 @@
package main package main
import ( import (
"github.com/satori/go.uuid"
"testing" "testing"
"github.com/google/uuid"
) )
func TestGetValidUsername(t *testing.T) { func TestGetValidUsername(t *testing.T) {
v1, _ := uuid.FromString("a097455b-52cc-4569-90c8-7a4b97c6eba8") v1, _ := uuid.Parse("a097455b-52cc-4569-90c8-7a4b97c6eba8")
for i, test := range []struct { for i, test := range []struct {
uname string uname string
output uuid.UUID output uuid.UUID

View File

@ -5,14 +5,12 @@
"gotype", "gotype",
"goimports", "goimports",
"golint", "golint",
"gosimple",
"gofmt", "gofmt",
"vet", "vet",
"lll", "lll",
"staticcheck", "megacheck",
"ineffassign", "ineffassign",
"deadcode", "deadcode",
"unused",
"unconvert", "unconvert",
"misspell", "misspell",
"test", "test",

View File

@ -2,8 +2,7 @@ language: go
sudo: false sudo: false
go: go:
- 1.7 - 1.10.x
- 1.8
before_install: before_install:
- go get golang.org/x/tools/cmd/cover - go get golang.org/x/tools/cmd/cover

View File

@ -411,6 +411,207 @@ func TestValuePathError(t *testing.T) {
} }
} }
// based on github.com/yalp/jsonpath
func TestValuePathExpressions(t *testing.T) {
data := map[string]interface{}{
"A": []interface{}{
"string",
23.3,
3.0,
true,
false,
nil,
},
"B": "value",
"C": 3.14,
"D": map[string]interface{}{
"C": 3.1415,
"V": []interface{}{
"string2a",
"string2b",
map[string]interface{}{
"C": 3.141592,
},
},
},
"E": map[string]interface{}{
"A": []interface{}{"string3"},
"D": map[string]interface{}{
"V": map[string]interface{}{
"C": 3.14159265,
},
},
},
"F": map[string]interface{}{
"V": []interface{}{
"string4a",
"string4b",
map[string]interface{}{
"CC": 3.1415926535,
},
map[string]interface{}{
"CC": "hello",
},
[]interface{}{
"string5a",
"string5b",
},
[]interface{}{
"string6a",
"string6b",
},
},
},
}
reporter := newMockReporter(t)
runTests := func(tests map[string]interface{}) {
value := NewValue(reporter, data)
value.chain.assertOK(t)
for path, expected := range tests {
actual := value.Path(path)
actual.chain.assertOK(t)
assert.Equal(t, expected, actual.Raw())
}
}
t.Run("pick", func(t *testing.T) {
runTests(map[string]interface{}{
"$": data,
"$.A[0]": "string",
`$["A"][0]`: "string",
"$.A": []interface{}{"string", 23.3, 3.0, true, false, nil},
"$.A[*]": []interface{}{"string", 23.3, 3.0, true, false, nil},
"$.A.*": []interface{}{"string", 23.3, 3.0, true, false, nil},
"$.A.*.a": []interface{}{},
})
})
t.Run("slice", func(t *testing.T) {
runTests(map[string]interface{}{
"$.A[1,4,2]": []interface{}{23.3, false, 3.0},
`$["B","C"]`: []interface{}{"value", 3.14},
`$["C","B"]`: []interface{}{3.14, "value"},
"$.A[1:4]": []interface{}{23.3, 3.0, true},
"$.A[::2]": []interface{}{"string", 3.0, false},
"$.A[-2:]": []interface{}{false, nil},
"$.A[:-1]": []interface{}{"string", 23.3, 3.0, true, false},
"$.A[::-1]": []interface{}{nil, false, true, 3.0, 23.3, "string"},
"$.F.V[4:5][0,1]": []interface{}{"string5a", "string5b"},
"$.F.V[4:6][1]": []interface{}{"string5b", "string6b"},
"$.F.V[4:6][0,1]": []interface{}{"string5a", "string5b", "string6a", "string6b"},
"$.F.V[4,5][0:2]": []interface{}{"string5a", "string5b", "string6a", "string6b"},
"$.F.V[4:6]": []interface{}{
[]interface{}{
"string5a",
"string5b",
},
[]interface{}{
"string6a",
"string6b",
},
},
})
})
t.Run("quote", func(t *testing.T) {
runTests(map[string]interface{}{
`$[A][0]`: "string",
`$["A"][0]`: "string",
`$[B,C]`: []interface{}{"value", 3.14},
`$["B","C"]`: []interface{}{"value", 3.14},
})
})
t.Run("search", func(t *testing.T) {
runTests(map[string]interface{}{
"$..C": []interface{}{3.14, 3.1415, 3.141592, 3.14159265},
`$..["C"]`: []interface{}{3.14, 3.1415, 3.141592, 3.14159265},
"$.D.V..C": []interface{}{3.141592},
"$.D.V.*.C": []interface{}{3.141592},
"$.D.V..*.C": []interface{}{3.141592},
"$.D.*..C": []interface{}{3.141592},
"$.*.V..C": []interface{}{3.141592},
"$.*.D.V.C": []interface{}{3.14159265},
"$.*.D..C": []interface{}{3.14159265},
"$.*.D.V..*": []interface{}{3.14159265},
"$..D..V..C": []interface{}{3.141592, 3.14159265},
"$.*.*.*.C": []interface{}{3.141592, 3.14159265},
"$..V..C": []interface{}{3.141592, 3.14159265},
"$.D.V..*": []interface{}{
"string2a",
"string2b",
map[string]interface{}{
"C": 3.141592,
},
3.141592,
},
"$..A": []interface{}{
[]interface{}{"string", 23.3, 3.0, true, false, nil},
[]interface{}{"string3"},
},
"$..A..*": []interface{}{"string", 23.3, 3.0, true, false, nil, "string3"},
"$.A..*": []interface{}{"string", 23.3, 3.0, true, false, nil},
"$.A.*": []interface{}{"string", 23.3, 3.0, true, false, nil},
"$..A[0,1]": []interface{}{"string", 23.3},
"$..A[0]": []interface{}{"string", "string3"},
"$.*.V[0]": []interface{}{"string2a", "string4a"},
"$.*.V[1]": []interface{}{"string2b", "string4b"},
"$.*.V[0,1]": []interface{}{"string2a", "string2b", "string4a", "string4b"},
"$.*.V[0:2]": []interface{}{"string2a", "string2b", "string4a", "string4b"},
"$.*.V[2].C": []interface{}{3.141592},
"$..V[2].C": []interface{}{3.141592},
"$..V[*].C": []interface{}{3.141592},
"$.*.V[2].*": []interface{}{3.141592, 3.1415926535},
"$.*.V[2:3].*": []interface{}{3.141592, 3.1415926535},
"$.*.V[2:4].*": []interface{}{3.141592, 3.1415926535, "hello"},
"$..V[2,3].CC": []interface{}{3.1415926535, "hello"},
"$..V[2:4].CC": []interface{}{3.1415926535, "hello"},
"$..V[*].*": []interface{}{
3.141592,
3.1415926535,
"hello",
"string5a",
"string5b",
"string6a",
"string6b",
},
"$..[0]": []interface{}{
"string",
"string2a",
"string3",
"string4a",
"string5a",
"string6a",
},
"$..ZZ": []interface{}{},
})
})
}
func TestValuePathIntFloat(t *testing.T) {
reporter := newMockReporter(t)
data := map[string]interface{}{
"A": 123,
"B": 123.0,
}
value := NewValue(reporter, data)
value.chain.assertOK(t)
a := value.Path(`$["A"]`)
a.chain.assertOK(t)
assert.Equal(t, 123.0, a.Raw())
b := value.Path(`$["B"]`)
b.chain.assertOK(t)
assert.Equal(t, 123.0, b.Raw())
}
func TestValueSchema(t *testing.T) { func TestValueSchema(t *testing.T) {
reporter := newMockReporter(t) reporter := newMockReporter(t)

9
vendor/github.com/google/uuid/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,9 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

10
vendor/github.com/google/uuid/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

9
vendor/github.com/google/uuid/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

View File

@ -1,5 +1,4 @@
Copyright (c) 2012 The Go Authors. All rights reserved. Copyright (c) 2009,2014 Google Inc. All rights reserved.
Copyright (c) 2015 Klaus Post
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are

23
vendor/github.com/google/uuid/README.md generated vendored Normal file
View File

@ -0,0 +1,23 @@
**This package is currently in development and the API may not be stable.**
The API will become stable with v1.
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCEPerson(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCEGroup(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

53
vendor/github.com/google/uuid/hash.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write([]byte(data))
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

62
vendor/github.com/google/uuid/json_test.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/json"
"reflect"
"testing"
)
var testUUID = Must(Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479"))
func TestJSON(t *testing.T) {
type S struct {
ID1 UUID
ID2 UUID
}
s1 := S{ID1: testUUID}
data, err := json.Marshal(&s1)
if err != nil {
t.Fatal(err)
}
var s2 S
if err := json.Unmarshal(data, &s2); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&s1, &s2) {
t.Errorf("got %#v, want %#v", s2, s1)
}
}
func BenchmarkUUID_MarshalJSON(b *testing.B) {
x := &struct {
UUID UUID `json:"uuid"`
}{}
var err error
x.UUID, err = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
js, err := json.Marshal(x)
if err != nil {
b.Fatalf("marshal json: %#v (%v)", js, err)
}
}
}
func BenchmarkUUID_UnmarshalJSON(b *testing.B) {
js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`)
var x *struct {
UUID UUID `json:"uuid"`
}
for i := 0; i < b.N; i++ {
err := json.Unmarshal(js, &x)
if err != nil {
b.Fatalf("marshal json: %#v (%v)", js, err)
}
}
}

39
vendor/github.com/google/uuid/marshal.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
// See comment in ParseBytes why we do this.
// id, err := ParseBytes(data)
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

103
vendor/github.com/google/uuid/node.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"net"
"sync"
)
var (
nodeMu sync.Mutex
interfaces []net.Interface // cached list of interfaces
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil && name != "" {
return false
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
copy(nodeID[:], ifs.HardwareAddr)
ifname = ifs.Name
return true
}
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
if len(uuid) != 16 {
return nil
}
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

66
vendor/github.com/google/uuid/seq_test.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"flag"
"runtime"
"testing"
"time"
)
// This test is only run when --regressions is passed on the go test line.
var regressions = flag.Bool("regressions", false, "run uuid regression tests")
// TestClockSeqRace tests for a particular race condition of returning two
// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
// condition, before being fixed, nearly always occured in under 30 seconds.
func TestClockSeqRace(t *testing.T) {
if !*regressions {
t.Skip("skipping regression tests")
}
duration := time.Minute
done := make(chan struct{})
defer close(done)
ch := make(chan UUID, 10000)
ncpu := runtime.NumCPU()
switch ncpu {
case 0, 1:
// We can't run the test effectively.
t.Skip("skipping race test, only one CPU detected")
return
default:
runtime.GOMAXPROCS(ncpu)
}
for i := 0; i < ncpu; i++ {
go func() {
for {
select {
case <-done:
return
case ch <- Must(NewUUID()):
}
}
}()
}
uuids := make(map[string]bool)
cnt := 0
start := time.Now()
for u := range ch {
s := u.String()
if uuids[s] {
t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
return
}
uuids[s] = true
if time.Since(start) > duration {
return
}
cnt++
}
}

58
vendor/github.com/google/uuid/sql.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src.(type) {
case string:
// if an empty UUID comes from a table, we return a null UUID
if src.(string) == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src.(string))
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
b := src.([]byte)
// if an empty UUID comes from a table, we return a null UUID
if len(b) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(b) != 16 {
return uuid.Scan(string(b))
}
copy((*uuid)[:], b)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

102
vendor/github.com/google/uuid/sql_test.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"strings"
"testing"
)
func TestScan(t *testing.T) {
var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var badTypeTest int = 6
var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
byteTest := make([]byte, 16)
byteTestUUID := Must(Parse(stringTest))
copy(byteTest, byteTestUUID[:])
// sunny day tests
var uuid UUID
err := (&uuid).Scan(stringTest)
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan([]byte(stringTest))
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan(byteTest)
if err != nil {
t.Fatal(err)
}
// bad type tests
err = (&uuid).Scan(badTypeTest)
if err == nil {
t.Error("int correctly parsed and shouldn't have")
}
if !strings.Contains(err.Error(), "unable to scan type") {
t.Error("attempting to parse an int returned an incorrect error message")
}
// invalid/incomplete uuids
err = (&uuid).Scan(invalidTest)
if err == nil {
t.Error("invalid uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid UUID returned an incorrect error message")
}
err = (&uuid).Scan(byteTest[:len(byteTest)-2])
if err == nil {
t.Error("invalid byte uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
}
// empty tests
uuid = UUID{}
var emptySlice []byte
err = (&uuid).Scan(emptySlice)
if err != nil {
t.Fatal(err)
}
for _, v := range uuid {
if v != 0 {
t.Error("UUID was not nil after scanning empty byte slice")
}
}
uuid = UUID{}
var emptyString string
err = (&uuid).Scan(emptyString)
if err != nil {
t.Fatal(err)
}
for _, v := range uuid {
if v != 0 {
t.Error("UUID was not nil after scanning empty byte slice")
}
}
}
func TestValue(t *testing.T) {
stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479"
uuid := Must(Parse(stringTest))
val, _ := uuid.Value()
if val != stringTest {
t.Error("Value() did not return expected string")
}
}

123
vendor/github.com/google/uuid/time.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
old_seq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if old_seq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

43
vendor/github.com/google/uuid/util.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

191
vendor/github.com/google/uuid/uuid.go generated vendored Normal file
View File

@ -0,0 +1,191 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the UUID form of
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
func Parse(s string) (UUID, error) {
var uuid UUID
if len(s) != 36 {
if len(s) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
if v, ok := xtob(s[x], s[x+1]); !ok {
return uuid, errors.New("invalid UUID format")
} else {
uuid[i] = v
}
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
if len(b) != 36 {
if len(b) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
}
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
if v, ok := xtob(b[x], b[x+1]); !ok {
return uuid, errors.New("invalid UUID format")
} else {
uuid[i] = v
}
}
return uuid, nil
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst[:], uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implents io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}

526
vendor/github.com/google/uuid/uuid_test.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
"time"
"unsafe"
)
type test struct {
in string
version Version
variant Variant
isuuid bool
}
var tests = []test{
{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
}
var constants = []struct {
c interface{}
name string
}{
{Person, "Person"},
{Group, "Group"},
{Org, "Org"},
{Invalid, "Invalid"},
{RFC4122, "RFC4122"},
{Reserved, "Reserved"},
{Microsoft, "Microsoft"},
{Future, "Future"},
{Domain(17), "Domain17"},
{Variant(42), "BadVariant42"},
}
func testTest(t *testing.T, in string, tt test) {
uuid, err := Parse(in)
if ok := (err == nil); ok != tt.isuuid {
t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
}
if err != nil {
return
}
if v := uuid.Variant(); v != tt.variant {
t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
}
if v := uuid.Version(); v != tt.version {
t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
}
}
func testBytes(t *testing.T, in []byte, tt test) {
uuid, err := ParseBytes(in)
if ok := (err == nil); ok != tt.isuuid {
t.Errorf("ParseBytes(%s) got %v expected %v\b", in, ok, tt.isuuid)
}
if err != nil {
return
}
suuid, _ := Parse(string(in))
if uuid != suuid {
t.Errorf("ParseBytes(%s) got %v expected %v\b", in, uuid, suuid)
}
}
func TestUUID(t *testing.T) {
for _, tt := range tests {
testTest(t, tt.in, tt)
testTest(t, strings.ToUpper(tt.in), tt)
testBytes(t, []byte(tt.in), tt)
}
}
func TestConstants(t *testing.T) {
for x, tt := range constants {
v, ok := tt.c.(fmt.Stringer)
if !ok {
t.Errorf("%x: %v: not a stringer", x, v)
} else if s := v.String(); s != tt.name {
v, _ := tt.c.(int)
t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name)
}
}
}
func TestRandomUUID(t *testing.T) {
m := make(map[string]bool)
for x := 1; x < 32; x++ {
uuid := New()
s := uuid.String()
if m[s] {
t.Errorf("NewRandom returned duplicated UUID %s", s)
}
m[s] = true
if v := uuid.Version(); v != 4 {
t.Errorf("Random UUID of version %s", v)
}
if uuid.Variant() != RFC4122 {
t.Errorf("Random UUID is variant %d", uuid.Variant())
}
}
}
func TestNew(t *testing.T) {
m := make(map[UUID]bool)
for x := 1; x < 32; x++ {
s := New()
if m[s] {
t.Errorf("New returned duplicated UUID %s", s)
}
m[s] = true
uuid, err := Parse(s.String())
if err != nil {
t.Errorf("New.String() returned %q which does not decode", s)
continue
}
if v := uuid.Version(); v != 4 {
t.Errorf("Random UUID of version %s", v)
}
if uuid.Variant() != RFC4122 {
t.Errorf("Random UUID is variant %d", uuid.Variant())
}
}
}
func TestClockSeq(t *testing.T) {
// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
defer func(orig func() time.Time) { timeNow = orig }(timeNow)
monTime := time.Now()
timeNow = func() time.Time {
monTime = monTime.Add(1 * time.Second)
return monTime
}
SetClockSequence(-1)
uuid1, err := NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
uuid2, err := NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 != s2 {
t.Errorf("clock sequence %d != %d", s1, s2)
}
SetClockSequence(-1)
uuid2, err = NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
// Just on the very off chance we generated the same sequence
// two times we try again.
if uuid1.ClockSequence() == uuid2.ClockSequence() {
SetClockSequence(-1)
uuid2, err = NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
}
if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 == s2 {
t.Errorf("Duplicate clock sequence %d", s1)
}
SetClockSequence(0x1234)
uuid1, err = NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
if seq := uuid1.ClockSequence(); seq != 0x1234 {
t.Errorf("%s: expected seq 0x1234 got 0x%04x", uuid1, seq)
}
}
func TestCoding(t *testing.T) {
text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
data := UUID{
0x7d, 0x44, 0x48, 0x40,
0x9d, 0xc0,
0x11, 0xd1,
0xb2, 0x45,
0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
}
if v := data.String(); v != text {
t.Errorf("%x: encoded to %s, expected %s", data, v, text)
}
if v := data.URN(); v != urn {
t.Errorf("%x: urn is %s, expected %s", data, v, urn)
}
uuid, err := Parse(text)
if err != nil {
t.Errorf("Parse returned unexpected error %v", err)
}
if data != data {
t.Errorf("%s: decoded to %s, expected %s", text, uuid, data)
}
}
func TestVersion1(t *testing.T) {
uuid1, err := NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
uuid2, err := NewUUID()
if err != nil {
t.Fatalf("could not create UUID: %v", err)
}
if uuid1 == uuid2 {
t.Errorf("%s:duplicate uuid", uuid1)
}
if v := uuid1.Version(); v != 1 {
t.Errorf("%s: version %s expected 1", uuid1, v)
}
if v := uuid2.Version(); v != 1 {
t.Errorf("%s: version %s expected 1", uuid2, v)
}
n1 := uuid1.NodeID()
n2 := uuid2.NodeID()
if !bytes.Equal(n1, n2) {
t.Errorf("Different nodes %x != %x", n1, n2)
}
t1 := uuid1.Time()
t2 := uuid2.Time()
q1 := uuid1.ClockSequence()
q2 := uuid2.ClockSequence()
switch {
case t1 == t2 && q1 == q2:
t.Error("time stopped")
case t1 > t2 && q1 == q2:
t.Error("time reversed")
case t1 < t2 && q1 != q2:
t.Error("clock sequence chaned unexpectedly")
}
}
func TestNode(t *testing.T) {
// This test is mostly to make sure we don't leave nodeMu locked.
ifname = ""
if ni := NodeInterface(); ni != "" {
t.Errorf("NodeInterface got %q, want %q", ni, "")
}
if SetNodeInterface("xyzzy") {
t.Error("SetNodeInterface succeeded on a bad interface name")
}
if !SetNodeInterface("") {
t.Error("SetNodeInterface failed")
}
if ni := NodeInterface(); ni == "" {
t.Error("NodeInterface returned an empty string")
}
ni := NodeID()
if len(ni) != 6 {
t.Errorf("ni got %d bytes, want 6", len(ni))
}
hasData := false
for _, b := range ni {
if b != 0 {
hasData = true
}
}
if !hasData {
t.Error("nodeid is all zeros")
}
id := []byte{1, 2, 3, 4, 5, 6, 7, 8}
SetNodeID(id)
ni = NodeID()
if !bytes.Equal(ni, id[:6]) {
t.Errorf("got nodeid %v, want %v", ni, id[:6])
}
if ni := NodeInterface(); ni != "user" {
t.Errorf("got inteface %q, want %q", ni, "user")
}
}
func TestNodeAndTime(t *testing.T) {
// Time is February 5, 1998 12:30:23.136364800 AM GMT
uuid, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
if err != nil {
t.Fatalf("Parser returned unexpected error %v", err)
}
node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
ts := uuid.Time()
c := time.Unix(ts.UnixTime())
want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
if !c.Equal(want) {
t.Errorf("Got time %v, want %v", c, want)
}
if !bytes.Equal(node, uuid.NodeID()) {
t.Errorf("Expected node %v got %v", node, uuid.NodeID())
}
}
func TestMD5(t *testing.T) {
uuid := NewMD5(NameSpaceDNS, []byte("python.org")).String()
want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
if uuid != want {
t.Errorf("MD5: got %q expected %q", uuid, want)
}
}
func TestSHA1(t *testing.T) {
uuid := NewSHA1(NameSpaceDNS, []byte("python.org")).String()
want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
if uuid != want {
t.Errorf("SHA1: got %q expected %q", uuid, want)
}
}
func TestNodeID(t *testing.T) {
nid := []byte{1, 2, 3, 4, 5, 6}
SetNodeInterface("")
s := NodeInterface()
if s == "" || s == "user" {
t.Errorf("NodeInterface %q after SetInteface", s)
}
node1 := NodeID()
if node1 == nil {
t.Error("NodeID nil after SetNodeInterface", s)
}
SetNodeID(nid)
s = NodeInterface()
if s != "user" {
t.Errorf("Expected NodeInterface %q got %q", "user", s)
}
node2 := NodeID()
if node2 == nil {
t.Error("NodeID nil after SetNodeID", s)
}
if bytes.Equal(node1, node2) {
t.Error("NodeID not changed after SetNodeID", s)
} else if !bytes.Equal(nid, node2) {
t.Errorf("NodeID is %x, expected %x", node2, nid)
}
}
func testDCE(t *testing.T, name string, uuid UUID, err error, domain Domain, id uint32) {
if err != nil {
t.Errorf("%s failed: %v", name, err)
return
}
if v := uuid.Version(); v != 2 {
t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v)
return
}
if v := uuid.Domain(); v != domain {
t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v)
}
if v := uuid.ID(); v != id {
t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v)
}
}
func TestDCE(t *testing.T) {
uuid, err := NewDCESecurity(42, 12345678)
testDCE(t, "NewDCESecurity", uuid, err, 42, 12345678)
uuid, err = NewDCEPerson()
testDCE(t, "NewDCEPerson", uuid, err, Person, uint32(os.Getuid()))
uuid, err = NewDCEGroup()
testDCE(t, "NewDCEGroup", uuid, err, Group, uint32(os.Getgid()))
}
type badRand struct{}
func (r badRand) Read(buf []byte) (int, error) {
for i, _ := range buf {
buf[i] = byte(i)
}
return len(buf), nil
}
func TestBadRand(t *testing.T) {
SetRand(badRand{})
uuid1 := New()
uuid2 := New()
if uuid1 != uuid2 {
t.Errorf("execpted duplicates, got %q and %q", uuid1, uuid2)
}
SetRand(nil)
uuid1 = New()
uuid2 = New()
if uuid1 == uuid2 {
t.Errorf("unexecpted duplicates, got %q", uuid1)
}
}
var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var asBytes = []byte(asString)
func BenchmarkParse(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := Parse(asString)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkParseBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := ParseBytes(asBytes)
if err != nil {
b.Fatal(err)
}
}
}
// parseBytesUnsafe is to benchmark using unsafe.
func parseBytesUnsafe(b []byte) (UUID, error) {
return Parse(*(*string)(unsafe.Pointer(&b)))
}
func BenchmarkParseBytesUnsafe(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := parseBytesUnsafe(asBytes)
if err != nil {
b.Fatal(err)
}
}
}
// parseBytesCopy is to benchmark not using unsafe.
func parseBytesCopy(b []byte) (UUID, error) {
return Parse(string(b))
}
func BenchmarkParseBytesCopy(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := parseBytesCopy(asBytes)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkNew(b *testing.B) {
for i := 0; i < b.N; i++ {
New()
}
}
func BenchmarkUUID_String(b *testing.B) {
uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
if uuid.String() == "" {
b.Fatal("invalid uuid")
}
}
}
func BenchmarkUUID_URN(b *testing.B) {
uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
if uuid.URN() == "" {
b.Fatal("invalid uuid")
}
}
}

44
vendor/github.com/google/uuid/version1.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns Nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID[:])
return uuid, nil
}

38
vendor/github.com/google/uuid/version4.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New is creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewRandom returns a Random (Version 4) UUID or panics.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
var uuid UUID
_, err := io.ReadFull(rander, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

1
vendor/github.com/klauspost/compress/.gitattributes generated vendored Normal file
View File

@ -0,0 +1 @@
*.bin -text -diff

View File

@ -7,11 +7,9 @@ os:
- osx - osx
go: go:
- 1.4 - 1.9.x
- 1.5 - 1.10.x
- 1.6 - master
- 1.7
- tip
install: install:
- go get -t ./... - go get -t ./...
@ -22,3 +20,8 @@ script:
- go test -cpu=2 -tags=noasm ./... - go test -cpu=2 -tags=noasm ./...
- go test -cpu=1,2,4 -short -race ./... - go test -cpu=1,2,4 -short -race ./...
- go test -cpu=2,4 -short -race -tags=noasm ./... - go test -cpu=2,4 -short -race -tags=noasm ./...
matrix:
allow_failures:
- go: 'master'
fast_finish: true

View File

@ -10,9 +10,18 @@ It offers slightly better compression at lower compression settings, and up to 3
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) * [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress) [![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# changelog # changelog
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625(https://github.com/golang/go/issues/18625).
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
* May 28, 2017: Reduce allocations when resetting decoder.
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. * Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. * Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. * Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
@ -50,7 +59,7 @@ The packages are drop-in replacements for standard libraries. Simply replace the
| `compress/gzip` | `github.com/klauspost/compress/gzip` | | `compress/gzip` | `github.com/klauspost/compress/gzip` |
| `compress/zlib` | `github.com/klauspost/compress/zlib` | | `compress/zlib` | `github.com/klauspost/compress/zlib` |
| `archive/zip` | `github.com/klauspost/compress/zip` | | `archive/zip` | `github.com/klauspost/compress/zip` |
| `compress/deflate` | `github.com/klauspost/compress/deflate` | | `compress/flate` | `github.com/klauspost/compress/flate` |
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
@ -58,151 +67,72 @@ The packages contains the same as the standard library, so you can use the godoc
Currently there is only minor speedup on decompression (mostly CRC32 calculation). Currently there is only minor speedup on decompression (mostly CRC32 calculation).
# deflate optimizations # Performance Update 2018
* Minimum matches are 4 bytes, this leads to fewer searches and better compression. (In Go 1.7) It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
* Stronger hash (iSCSI CRC32) for matches on x64 with SSE 4.2 support. This leads to fewer hash collisions. (Go 1.7 also has improved hashes)
* Literal byte matching using SSE 4.2 for faster match comparisons. (not in Go)
* Bulk hashing on matches. (In Go 1.7)
* Much faster dictionary indexing with `NewWriterDict()`/`Reset()`. (In Go 1.7)
* Make Bit Coder faster by assuming we are on a 64 bit CPU. (In Go 1.7)
* Level 1 compression replaced by converted "Snappy" algorithm. (In Go 1.7)
* Uncompressible content is detected and skipped faster. (Only in BestSpeed in Go)
* A lot of branching eliminated by having two encoders for levels 4-6 and 7-9. (not in Go)
* All heap memory allocations eliminated. (In Go 1.7)
``` The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
benchmark old ns/op new ns/op delta
BenchmarkEncodeDigitsSpeed1e4-4 554029 265175 -52.14%
BenchmarkEncodeDigitsSpeed1e5-4 3908558 2416595 -38.17%
BenchmarkEncodeDigitsSpeed1e6-4 37546692 24875330 -33.75%
BenchmarkEncodeDigitsDefault1e4-4 781510 486322 -37.77%
BenchmarkEncodeDigitsDefault1e5-4 15530248 6740175 -56.60%
BenchmarkEncodeDigitsDefault1e6-4 174915710 76498625 -56.27%
BenchmarkEncodeDigitsCompress1e4-4 769995 485652 -36.93%
BenchmarkEncodeDigitsCompress1e5-4 15450113 6929589 -55.15%
BenchmarkEncodeDigitsCompress1e6-4 175114660 73348495 -58.11%
BenchmarkEncodeTwainSpeed1e4-4 560122 275977 -50.73%
BenchmarkEncodeTwainSpeed1e5-4 3740978 2506095 -33.01%
BenchmarkEncodeTwainSpeed1e6-4 35542802 21904440 -38.37%
BenchmarkEncodeTwainDefault1e4-4 828534 549026 -33.74%
BenchmarkEncodeTwainDefault1e5-4 13667153 7528455 -44.92%
BenchmarkEncodeTwainDefault1e6-4 141191770 79952170 -43.37%
BenchmarkEncodeTwainCompress1e4-4 830050 545694 -34.26%
BenchmarkEncodeTwainCompress1e5-4 16620852 8460600 -49.10%
BenchmarkEncodeTwainCompress1e6-4 193326820 90808750 -53.03%
benchmark old MB/s new MB/s speedup The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
BenchmarkEncodeDigitsSpeed1e4-4 18.05 37.71 2.09x
BenchmarkEncodeDigitsSpeed1e5-4 25.58 41.38 1.62x
BenchmarkEncodeDigitsSpeed1e6-4 26.63 40.20 1.51x
BenchmarkEncodeDigitsDefault1e4-4 12.80 20.56 1.61x
BenchmarkEncodeDigitsDefault1e5-4 6.44 14.84 2.30x
BenchmarkEncodeDigitsDefault1e6-4 5.72 13.07 2.28x
BenchmarkEncodeDigitsCompress1e4-4 12.99 20.59 1.59x
BenchmarkEncodeDigitsCompress1e5-4 6.47 14.43 2.23x
BenchmarkEncodeDigitsCompress1e6-4 5.71 13.63 2.39x
BenchmarkEncodeTwainSpeed1e4-4 17.85 36.23 2.03x
BenchmarkEncodeTwainSpeed1e5-4 26.73 39.90 1.49x
BenchmarkEncodeTwainSpeed1e6-4 28.14 45.65 1.62x
BenchmarkEncodeTwainDefault1e4-4 12.07 18.21 1.51x
BenchmarkEncodeTwainDefault1e5-4 7.32 13.28 1.81x
BenchmarkEncodeTwainDefault1e6-4 7.08 12.51 1.77x
BenchmarkEncodeTwainCompress1e4-4 12.05 18.33 1.52x
BenchmarkEncodeTwainCompress1e5-4 6.02 11.82 1.96x
BenchmarkEncodeTwainCompress1e6-4 5.17 11.01 2.13x
```
* "Speed" is compression level 1
* "Default" is compression level 6
* "Compress" is compression level 9
* Test files are [Digits](https://github.com/klauspost/compress/blob/master/testdata/e.txt) (no matches) and [Twain](https://github.com/klauspost/compress/blob/master/testdata/Mark.Twain-Tom.Sawyer.txt) (plain text) .
As can be seen it shows a very good speedup all across the line. The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
`Twain` is a much more realistic benchmark, and will be closer to JSON/HTML performance. Here speed is equivalent or faster, up to 2 times.
**Without assembly**. This is what you can expect on systems that does not have amd64 and SSE 4: ## Overall differences.
```
benchmark old ns/op new ns/op delta
BenchmarkEncodeDigitsSpeed1e4-4 554029 249558 -54.96%
BenchmarkEncodeDigitsSpeed1e5-4 3908558 2295216 -41.28%
BenchmarkEncodeDigitsSpeed1e6-4 37546692 22594905 -39.82%
BenchmarkEncodeDigitsDefault1e4-4 781510 579850 -25.80%
BenchmarkEncodeDigitsDefault1e5-4 15530248 10096561 -34.99%
BenchmarkEncodeDigitsDefault1e6-4 174915710 111470780 -36.27%
BenchmarkEncodeDigitsCompress1e4-4 769995 579708 -24.71%
BenchmarkEncodeDigitsCompress1e5-4 15450113 10266373 -33.55%
BenchmarkEncodeDigitsCompress1e6-4 175114660 110170120 -37.09%
BenchmarkEncodeTwainSpeed1e4-4 560122 260679 -53.46%
BenchmarkEncodeTwainSpeed1e5-4 3740978 2097372 -43.94%
BenchmarkEncodeTwainSpeed1e6-4 35542802 20353449 -42.74%
BenchmarkEncodeTwainDefault1e4-4 828534 646016 -22.03%
BenchmarkEncodeTwainDefault1e5-4 13667153 10056369 -26.42%
BenchmarkEncodeTwainDefault1e6-4 141191770 105268770 -25.44%
BenchmarkEncodeTwainCompress1e4-4 830050 642401 -22.61%
BenchmarkEncodeTwainCompress1e5-4 16620852 11157081 -32.87%
BenchmarkEncodeTwainCompress1e6-4 193326820 121780770 -37.01%
benchmark old MB/s new MB/s speedup There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
BenchmarkEncodeDigitsSpeed1e4-4 18.05 40.07 2.22x
BenchmarkEncodeDigitsSpeed1e5-4 25.58 43.57 1.70x
BenchmarkEncodeDigitsSpeed1e6-4 26.63 44.26 1.66x
BenchmarkEncodeDigitsDefault1e4-4 12.80 17.25 1.35x
BenchmarkEncodeDigitsDefault1e5-4 6.44 9.90 1.54x
BenchmarkEncodeDigitsDefault1e6-4 5.72 8.97 1.57x
BenchmarkEncodeDigitsCompress1e4-4 12.99 17.25 1.33x
BenchmarkEncodeDigitsCompress1e5-4 6.47 9.74 1.51x
BenchmarkEncodeDigitsCompress1e6-4 5.71 9.08 1.59x
BenchmarkEncodeTwainSpeed1e4-4 17.85 38.36 2.15x
BenchmarkEncodeTwainSpeed1e5-4 26.73 47.68 1.78x
BenchmarkEncodeTwainSpeed1e6-4 28.14 49.13 1.75x
BenchmarkEncodeTwainDefault1e4-4 12.07 15.48 1.28x
BenchmarkEncodeTwainDefault1e5-4 7.32 9.94 1.36x
BenchmarkEncodeTwainDefault1e6-4 7.08 9.50 1.34x
BenchmarkEncodeTwainCompress1e4-4 12.05 15.57 1.29x
BenchmarkEncodeTwainCompress1e5-4 6.02 8.96 1.49x
BenchmarkEncodeTwainCompress1e6-4 5.17 8.21 1.59x
```
So even without the assembly optimizations there is a general speedup across the board.
## level 1-3 "snappy" compression The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
Levels 1 "Best Speed", 2 and 3 are completely replaced by a converted version of the algorithm found in Snappy, modified to be fully This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
compatible with the deflate bitstream (and thus still compatible with all existing zlib/gzip libraries and tools).
This version is considerably faster than the "old" deflate at level 1. It does however come at a compression loss, usually in the order of 3-4% compared to the old level 1. However, the speed is usually 1.75 times that of the fastest deflate mode.
In my previous experiments the most common case for "level 1" was that it provided no significant speedup, only lower compression compared to level 2 and sometimes even 3. However, the modified Snappy algorithm provides a very good sweet spot. Usually about 75% faster and with only little compression loss. Therefore I decided to *replace* level 1 with this mode entirely. There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
Input is split into blocks of 64kb of, and they are encoded independently (no backreferences across blocks) for the best speed. Contrary to Snappy the output is entropy-encoded, so you will almost always see better compression than Snappy. But Snappy is still about twice as fast as Snappy in deflate mode. ## Web Content
Level 2 and 3 have also been replaced. Level 2 is capable is matching between blocks and level 3 checks up to two hashes for matches it will try. This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
## compression levels Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
This table shows the compression at each level, and the percentage of the output size compared to output Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
at the similar level with the standard library. Compression data is `Twain`, see above.
(Not up-to-date after rebalancing) ## Object files
| Level | Bytes | % size | This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|-------|--------|--------|
| 1 | 194622 | 103.7% |
| 2 | 174684 | 96.85% |
| 3 | 170301 | 98.45% |
| 4 | 165253 | 97.69% |
| 5 | 161274 | 98.65% |
| 6 | 160464 | 99.71% |
| 7 | 160304 | 99.87% |
| 8 | 160279 | 99.99% |
| 9 | 160279 | 99.99% |
To interpret and example, this version of deflate compresses input of 407287 bytes to 161274 bytes at level 5, which is 98.6% of the size of what the standard library produces; 161274 bytes. The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
This means that from level 4 you can expect a compression level increase of a few percent. Level 1 is about 3% worse, as descibed above.
# linear time compression (huffman only) # linear time compression (huffman only)
This compression library adds a special compression level, named `ConstantCompression`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
@ -217,66 +147,6 @@ For more information see my blog post on [Fast Linear Time Compression](http://b
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# gzip/zip optimizations
* Uses the faster deflate
* Uses SSE 4.2 CRC32 calculations.
Speed increase is up to 3x of the standard library, but usually around 2x.
This is close to a real world benchmark as you will get. A 2.3MB JSON file. (NOTE: not up-to-date)
```
benchmark old ns/op new ns/op delta
BenchmarkGzipL1-4 95212470 59938275 -37.05%
BenchmarkGzipL2-4 102069730 76349195 -25.20%
BenchmarkGzipL3-4 115472770 82492215 -28.56%
BenchmarkGzipL4-4 153197780 107570890 -29.78%
BenchmarkGzipL5-4 203930260 134387930 -34.10%
BenchmarkGzipL6-4 233172100 145495400 -37.60%
BenchmarkGzipL7-4 297190260 197926950 -33.40%
BenchmarkGzipL8-4 512819750 376244733 -26.63%
BenchmarkGzipL9-4 563366800 403266833 -28.42%
benchmark old MB/s new MB/s speedup
BenchmarkGzipL1-4 52.11 82.78 1.59x
BenchmarkGzipL2-4 48.61 64.99 1.34x
BenchmarkGzipL3-4 42.97 60.15 1.40x
BenchmarkGzipL4-4 32.39 46.13 1.42x
BenchmarkGzipL5-4 24.33 36.92 1.52x
BenchmarkGzipL6-4 21.28 34.10 1.60x
BenchmarkGzipL7-4 16.70 25.07 1.50x
BenchmarkGzipL8-4 9.68 13.19 1.36x
BenchmarkGzipL9-4 8.81 12.30 1.40x
```
Multithreaded compression using [pgzip](https://github.com/klauspost/pgzip) comparison, Quadcore, CPU = 8:
(Not updated, old numbers)
```
benchmark old ns/op new ns/op delta
BenchmarkGzipL1 96155500 25981486 -72.98%
BenchmarkGzipL2 101905830 24601408 -75.86%
BenchmarkGzipL3 113506490 26321506 -76.81%
BenchmarkGzipL4 143708220 31761818 -77.90%
BenchmarkGzipL5 188210770 39602266 -78.96%
BenchmarkGzipL6 209812000 40402313 -80.74%
BenchmarkGzipL7 270015440 56103210 -79.22%
BenchmarkGzipL8 461359700 91255220 -80.22%
BenchmarkGzipL9 498361833 88755075 -82.19%
benchmark old MB/s new MB/s speedup
BenchmarkGzipL1 51.60 190.97 3.70x
BenchmarkGzipL2 48.69 201.69 4.14x
BenchmarkGzipL3 43.71 188.51 4.31x
BenchmarkGzipL4 34.53 156.22 4.52x
BenchmarkGzipL5 26.36 125.29 4.75x
BenchmarkGzipL6 23.65 122.81 5.19x
BenchmarkGzipL7 18.38 88.44 4.81x
BenchmarkGzipL8 10.75 54.37 5.06x
BenchmarkGzipL9 9.96 55.90 5.61x
```
# snappy package # snappy package
The standard snappy package has now been improved. This repo contains a copy of the snappy repo. The standard snappy package has now been improved. This repo contains a copy of the snappy repo.

63
vendor/github.com/klauspost/compress/compressible.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package compress
import "math"
// Estimate returns a normalized compressibility estimate of block b.
// Values close to zero are likely uncompressible.
// Values above 0.1 are likely to be compressible.
// Values above 0.5 are very compressible.
// Very small lengths will return 0.
func Estimate(b []byte) float64 {
if len(b) < 16 {
return 0
}
// Correctly predicted order 1
hits := 0
lastMatch := false
var o1 [256]byte
var hist [256]int
c1 := byte(0)
for _, c := range b {
if c == o1[c1] {
// We only count a hit if there was two correct predictions in a row.
if lastMatch {
hits++
}
lastMatch = true
} else {
lastMatch = false
}
o1[c1] = c
c1 = c
hist[c]++
}
// Use x^0.6 to give better spread
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
// Calculate histogram distribution
variance := float64(0)
avg := float64(len(b)) / 256
for _, v := range hist {
Δ := float64(v) - avg
variance += Δ * Δ
}
stddev := math.Sqrt(float64(variance)) / float64(len(b))
exp := math.Sqrt(1 / float64(len(b)))
// Subtract expected stddev
stddev -= exp
if stddev < 0 {
stddev = 0
}
stddev *= 1 + exp
// Use x^0.4 to give better spread
entropy := math.Pow(stddev, 0.4)
// 50/50 weight between prediction and histogram distribution
return math.Pow((prediction+entropy)/2, 0.9)
}

View File

@ -0,0 +1,121 @@
package compress
import (
"crypto/rand"
"encoding/base32"
"testing"
)
func BenchmarkEstimate(b *testing.B) {
b.ReportAllocs()
// (predictable, low entropy distibution)
b.Run("zeroes-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (predictable, high entropy distibution)
b.Run("predictable-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
for i := range testData {
testData[i] = byte(float64(i) / float64(len(testData)) * 256)
}
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (not predictable, high entropy distibution)
b.Run("random-500b", func(b *testing.B) {
var testData = make([]byte, 500)
rand.Read(testData)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (not predictable, high entropy distibution)
b.Run("random-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (not predictable, high entropy distibution)
b.Run("random-50k", func(b *testing.B) {
var testData = make([]byte, 50000)
rand.Read(testData)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (not predictable, high entropy distibution)
b.Run("random-500k", func(b *testing.B) {
var testData = make([]byte, 500000)
rand.Read(testData)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (not predictable, medium entropy distibution)
b.Run("base-32-5k", func(b *testing.B) {
var testData = make([]byte, 5000)
rand.Read(testData)
s := base32.StdEncoding.EncodeToString(testData)
testData = []byte(s)
testData = testData[:5000]
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
// (medium predictable, medium entropy distibution)
b.Run("text", func(b *testing.B) {
var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks.
This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process:
With "naive" CDC, a "known plaintext" file can be verified to exist within the backup if the size of individual blocks can be observed by an attacker, by using CDC on the file in parallel and comparing the resulting amount of chunks and individual chunk lengths.
As discussed earlier, this can be somewhat mitigated by salting the CDC algorithm with a secret value, as done in attic.
With salted CDC, I assume compression would happen on each individual chunk, after splitting the problematic file into chunks. Restic chunks are in the range of 512 KB to 8 MB (but not evenly distributed - right?).
Attacker knows that the CDC algorithm uses a secret salt, so the attacker generates a range of chunks consisting of the first 512 KB to 8 MB of the file, one for each valid chunk length. The attacker is also able to determine the lengths of compressed chunks.
The attacker then compresses that chunk using the compression algorithm.
The attacker compares the lengths of the resulting chunks to the first chunk in the restic backup sets.
IF a matching block length is found, the attacker repeats the exercise with the next chunk, and the next chunk, and the next chunk, ... and the next chunk.
It is my belief that with sufficiently large files, and considering the fact that the CDC algorithm is "biased" (in lack of better of words) towards generating blocks of about 1 MB, this would be sufficient to ascertain whether or not a certain large file exists in the backup.
AS always, a paranoid and highly unscientific stream of consciousness.
Thoughts?`)
testData = append(testData, testData...)
testData = append(testData, testData...)
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Estimate(testData)
}
b.Log(Estimate(testData))
})
}

View File

@ -10,12 +10,14 @@ package flate
import ( import (
"bufio" "bufio"
"io" "io"
"math/bits"
"strconv" "strconv"
"sync" "sync"
) )
const ( const (
maxCodeLen = 16 // max length of Huffman code maxCodeLen = 16 // max length of Huffman code
maxCodeLenMask = 15 // mask for max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the // The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes // additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data. // 30 and 31 should never occur in compressed data.
@ -102,7 +104,7 @@ const (
type huffmanDecoder struct { type huffmanDecoder struct {
min int // the minimum code length min int // the minimum code length
chunks [huffmanNumChunks]uint32 // chunks as described above chunks *[huffmanNumChunks]uint32 // chunks as described above
links [][]uint32 // overflow links links [][]uint32 // overflow links
linkMask uint32 // mask the width of the link table linkMask uint32 // mask the width of the link table
} }
@ -112,21 +114,24 @@ type huffmanDecoder struct {
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty // degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted. // trees are permitted.
func (h *huffmanDecoder) init(bits []int) bool { func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman // Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during // table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests. // development to supplement the currently ad-hoc unit tests.
const sanity = false const sanity = false
if h.chunks == nil {
h.chunks = &[huffmanNumChunks]uint32{}
}
if h.min != 0 { if h.min != 0 {
*h = huffmanDecoder{} *h = huffmanDecoder{chunks: h.chunks, links: h.links}
} }
// Count number of codes of each length, // Count number of codes of each length,
// compute min and max length. // compute min and max length.
var count [maxCodeLen]int var count [maxCodeLen]int
var min, max int var min, max int
for _, n := range bits { for _, n := range lengths {
if n == 0 { if n == 0 {
continue continue
} }
@ -136,7 +141,7 @@ func (h *huffmanDecoder) init(bits []int) bool {
if n > max { if n > max {
max = n max = n
} }
count[n]++ count[n&maxCodeLenMask]++
} }
// Empty tree. The decompressor.huffSym function will fail later if the tree // Empty tree. The decompressor.huffSym function will fail later if the tree
@ -154,8 +159,8 @@ func (h *huffmanDecoder) init(bits []int) bool {
var nextcode [maxCodeLen]int var nextcode [maxCodeLen]int
for i := min; i <= max; i++ { for i := min; i <= max; i++ {
code <<= 1 code <<= 1
nextcode[i] = code nextcode[i&maxCodeLenMask] = code
code += count[i] code += count[i&maxCodeLenMask]
} }
// Check that the coding is complete (i.e., that we've // Check that the coding is complete (i.e., that we've
@ -168,33 +173,49 @@ func (h *huffmanDecoder) init(bits []int) bool {
} }
h.min = min h.min = min
chunks := h.chunks[:]
for i := range chunks {
chunks[i] = 0
}
if max > huffmanChunkBits { if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits) numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1) h.linkMask = uint32(numLinks - 1)
// create link tables // create link tables
link := nextcode[huffmanChunkBits+1] >> 1 link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint32, huffmanNumChunks-link) h.links = make([][]uint32, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
for j := uint(link); j < huffmanNumChunks; j++ { for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits) reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link) off := j - uint(link)
if sanity && h.chunks[reverse] != 0 { if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk") panic("impossible: overwriting existing chunk")
} }
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1)) h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint32, numLinks) h.links[off] = make([]uint32, numLinks)
} else {
links := h.links[off][:0]
h.links[off] = links[:numLinks]
} }
} }
} else {
h.links = h.links[:0]
}
for i, n := range bits { for i, n := range lengths {
if n == 0 { if n == 0 {
continue continue
} }
code := nextcode[n] code := nextcode[n]
nextcode[n]++ nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n) chunk := uint32(i<<huffmanValueShift | n)
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8 reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n) reverse >>= uint(16 - n)
if n <= huffmanChunkBits { if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) { for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
@ -589,7 +610,7 @@ readLiteral:
return return
} }
} }
dist = int(reverseByte[(f.b&0x1F)<<3]) dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5 f.b >>= 5
f.nb -= 5 f.nb -= 5
} else { } else {
@ -661,10 +682,7 @@ func (f *decompressor) dataBlock() {
nr, err := io.ReadFull(f.r, f.buf[0:4]) nr, err := io.ReadFull(f.r, f.buf[0:4])
f.roffset += int64(nr) f.roffset += int64(nr)
if err != nil { if err != nil {
if err == io.EOF { f.err = noEOF(err)
err = io.ErrUnexpectedEOF
}
f.err = err
return return
} }
n := int(f.buf[0]) | int(f.buf[1])<<8 n := int(f.buf[0]) | int(f.buf[1])<<8
@ -697,10 +715,7 @@ func (f *decompressor) copyData() {
f.copyLen -= cnt f.copyLen -= cnt
f.dict.writeMark(cnt) f.dict.writeMark(cnt)
if err != nil { if err != nil {
if err == io.EOF { f.err = noEOF(err)
err = io.ErrUnexpectedEOF
}
f.err = err
return return
} }
@ -722,13 +737,18 @@ func (f *decompressor) finishBlock() {
f.step = (*decompressor).nextBlock f.step = (*decompressor).nextBlock
} }
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error { func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte() c, err := f.r.ReadByte()
if err != nil { if err != nil {
if err == io.EOF { return noEOF(err)
err = io.ErrUnexpectedEOF
}
return err
} }
f.roffset++ f.roffset++
f.b |= uint32(c) << f.nb f.b |= uint32(c) << f.nb
@ -743,25 +763,37 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// cases, the chunks slice will be 0 for the invalid sequence, leading it // cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below. // satisfy the n == 0 check below.
n := uint(h.min) n := uint(h.min)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for { for {
for f.nb < n { for nb < n {
if err := f.moreBits(); err != nil { c, err := f.r.ReadByte()
return 0, err if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
} }
f.roffset++
b |= uint32(c) << (nb & 31)
nb += 8
} }
chunk := h.chunks[f.b&(huffmanNumChunks-1)] chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask) n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits { if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask) n = uint(chunk & huffmanCountMask)
} }
if n <= f.nb { if n <= nb {
if n == 0 { if n == 0 {
f.b = b
f.nb = nb
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return 0, f.err return 0, f.err
} }
f.b >>= n f.b = b >> (n & 31)
f.nb -= n f.nb = nb - n
return int(chunk >> huffmanValueShift), nil return int(chunk >> huffmanValueShift), nil
} }
} }
@ -799,6 +831,8 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
r: makeReader(r), r: makeReader(r),
bits: f.bits, bits: f.bits,
codebits: f.codebits, codebits: f.codebits,
h1: f.h1,
h2: f.h2,
dict: f.dict, dict: f.dict,
step: (*decompressor).nextBlock, step: (*decompressor).nextBlock,
} }

View File

@ -25,6 +25,7 @@ func TestNlitOutOfRange(t *testing.T) {
const ( const (
digits = iota digits = iota
twain twain
random
) )
var testfiles = []string{ var testfiles = []string{
@ -34,6 +35,8 @@ var testfiles = []string{
digits: "../testdata/e.txt", digits: "../testdata/e.txt",
// Twain is Project Gutenberg's edition of Mark Twain's classic English novel. // Twain is Project Gutenberg's edition of Mark Twain's classic English novel.
twain: "../testdata/Mark.Twain-Tom.Sawyer.txt", twain: "../testdata/Mark.Twain-Tom.Sawyer.txt",
// Random bytes
random: "../testdata/sharnd.out",
} }
func benchmarkDecode(b *testing.B, testfile, level, n int) { func benchmarkDecode(b *testing.B, testfile, level, n int) {
@ -63,8 +66,11 @@ func benchmarkDecode(b *testing.B, testfile, level, n int) {
buf0, compressed, w = nil, nil, nil buf0, compressed, w = nil, nil, nil
runtime.GC() runtime.GC()
b.StartTimer() b.StartTimer()
r := NewReader(bytes.NewReader(buf1))
res := r.(Resetter)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1))) res.Reset(bytes.NewReader(buf1), nil)
io.Copy(ioutil.Discard, r)
} }
} }
@ -95,3 +101,6 @@ func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain,
func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) } func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) }
func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) } func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) }
func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) } func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) }
func BenchmarkDecodeRandomSpeed1e4(b *testing.B) { benchmarkDecode(b, random, speed, 1e4) }
func BenchmarkDecodeRandomSpeed1e5(b *testing.B) { benchmarkDecode(b, random, speed, 1e5) }
func BenchmarkDecodeRandomSpeed1e6(b *testing.B) { benchmarkDecode(b, random, speed, 1e6) }

79
vendor/github.com/klauspost/compress/fse/README.md generated vendored Normal file
View File

@ -0,0 +1,79 @@
# Finite State Entropy
This package provides Finite State Entropy encoding and decoding.
Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS))
encoding provides a fast near-optimal symbol encoding/decoding
for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).
This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding.
* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)
## News
* Feb 2018: First implementation released. Consider this beta software for now.
# Usage
This package provides a low level interface that allows to compress single independent blocks.
Each block is separate, and there is no built in integrity checks.
This means that the caller should keep track of block sizes and also do checksums if needed.
Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.
You must provide input and will receive the output and maybe an error.
These error values can be returned:
| Error | Description |
|---------------------|-----------------------------------------------------------------------------|
| `<nil>` | Everything ok, output is returned |
| `ErrIncompressible` | Returned when input is judged to be too hard to compress |
| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated |
| `(error)` | An internal error occurred. |
As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.
To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object
that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same
object can be used for both.
Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.
You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
your input was likely corrupted.
It is important to note that a successful decoding does *not* mean your output matches your original input.
There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).
# Performance
A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.
All compression functions are currently only running on the calling goroutine so only one core will be used per block.
The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input
is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be
beneficial to transpose all your input values down by 64.
With moderate block sizes around 64k speed are typically 200MB/s per core for compression and
around 300MB/s decompression speed.
The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s.
# Plans
At one point, more internals will be exposed to facilitate more "expert" usage of the components.
A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).
# Contributing
Contributions are always welcome. Be aware that adding public functions will require good justification and breaking
changes will likely not be accepted. If in doubt open an issue before writing the PR.

107
vendor/github.com/klauspost/compress/fse/bitreader.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import (
"errors"
"io"
)
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReader struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReader) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.bitsRead = 64
b.value = 0
b.fill()
b.fill()
b.bitsRead += 8 - uint8(highBits(uint32(v)))
return nil
}
// getBits will return n bits. n can be 0.
func (b *bitReader) getBits(n uint8) uint16 {
if n == 0 || b.bitsRead >= 64 {
return 0
}
return b.getBitsFast(n)
}
// getBitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReader) getBitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
// Do single re-slice to avoid bounds checks.
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1])
b.bitsRead -= 8
b.off--
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}

168
vendor/github.com/klauspost/compress/fse/bitwriter.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
bitContainer uint64
nBits uint8
out []byte
}
// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
return
}
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24))
b.nBits -= 32
b.bitContainer >>= 32
}
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
b.bitContainer = 0
}
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() error {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
return nil
}
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}

56
vendor/github.com/klauspost/compress/fse/bytereader.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
// byteReader provides a byte reader that reads
// little endian values from a byte stream.
// The input stream is manually advanced.
// The reader performs no bounds checks.
type byteReader struct {
b []byte
off int
}
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
b2 := b.b[b.off : b.off+4 : b.off+4]
v3 := int32(b2[3])
v2 := int32(b2[2])
v1 := int32(b2[1])
v0 := int32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// Uint32 returns a little endian uint32 starting at current offset.
func (b byteReader) Uint32() uint32 {
b2 := b.b[b.off : b.off+4 : b.off+4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
v0 := uint32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
}

684
vendor/github.com/klauspost/compress/fse/compress.go generated vendored Normal file
View File

@ -0,0 +1,684 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import (
"errors"
"fmt"
)
// Compress the input bytes. Input must be < 2GB.
// Provide a Scratch buffer to avoid memory allocations.
// Note that the output is also kept in the scratch buffer.
// If input is too hard to compress, ErrIncompressible is returned.
// If input is a single byte value repeated ErrUseRLE is returned.
func Compress(in []byte, s *Scratch) ([]byte, error) {
if len(in) <= 1 {
return nil, ErrIncompressible
}
if len(in) >= 2<<30 {
return nil, errors.New("input too big, must be < 2GB")
}
s, err := s.prepare(in)
if err != nil {
return nil, err
}
// Create histogram, if none was provided.
maxCount := s.maxCount
if maxCount == 0 {
maxCount = s.countSimple(in)
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount == len(in) {
// One symbol, use RLE
return nil, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return nil, ErrIncompressible
}
s.optimalTableLog()
err = s.normalizeCount()
if err != nil {
return nil, err
}
err = s.writeCount()
if err != nil {
return nil, err
}
if false {
err = s.validateNorm()
if err != nil {
return nil, err
}
}
err = s.buildCTable()
if err != nil {
return nil, err
}
err = s.compress(in)
if err != nil {
return nil, err
}
s.Out = s.bw.out
// Check if we compressed.
if len(s.Out) >= len(in) {
return nil, ErrIncompressible
}
return s.Out, nil
}
// cState contains the compression state of a stream.
type cState struct {
bw *bitWriter
stateTable []uint16
state uint16
}
// init will initialize the compression state to the first symbol of the stream.
func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
c.bw = bw
c.stateTable = ct.stateTable
nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
im := int32((nbBitsOut << 16) - first.deltaNbBits)
lu := (im >> nbBitsOut) + first.deltaFindState
c.state = c.stateTable[lu]
return
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encodeZero(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
c.bw.addBits16NC(c.state, tableLog)
c.bw.flush()
}
// compress is the main compression loop that will encode the input from the last byte to the first.
func (s *Scratch) compress(src []byte) error {
if len(src) <= 2 {
return errors.New("compress: src too small")
}
tt := s.ct.symbolTT[:256]
s.bw.reset(s.Out)
// Our two states each encodes every second byte.
// Last byte encoded (first byte decoded) will always be encoded by c1.
var c1, c2 cState
// Encode so remaining size is divisible by 4.
ip := len(src)
if ip&1 == 1 {
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
c1.encodeZero(tt[src[ip-3]])
ip -= 3
} else {
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
ip -= 2
}
if ip&2 != 0 {
c2.encodeZero(tt[src[ip-1]])
c1.encodeZero(tt[src[ip-2]])
ip -= 2
}
// Main compression loop.
switch {
case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits.
for ip >= 4 {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
c2.encode(tt[v2])
c1.encode(tt[v3])
ip -= 4
}
case !s.zeroBits:
// We do not need to check if any output is 0 bits.
for ip >= 4 {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
s.bw.flush32()
c2.encode(tt[v2])
c1.encode(tt[v3])
ip -= 4
}
case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush
for ip >= 4 {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
ip -= 4
}
default:
for ip >= 4 {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
s.bw.flush32()
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
ip -= 4
}
}
// Flush final state.
// Used to initialize state when decoding.
c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog)
return s.bw.close()
}
// writeCount will write the normalized histogram count to header.
// This is read back by readNCount.
func (s *Scratch) writeCount() error {
var (
tableLog = s.actualTableLog
tableSize = 1 << tableLog
previous0 bool
charnum uint16
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)
bitCount = uint(4)
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
threshold = int16(tableSize)
nbBits = uint(tableLog + 1)
)
if cap(s.Out) < maxHeaderSize {
s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
}
outP := uint(0)
out := s.Out[:maxHeaderSize]
// stops at 1
for remaining > 1 {
if previous0 {
start := charnum
for s.norm[charnum] == 0 {
charnum++
}
for charnum >= start+24 {
start += 24
bitStream += uint32(0xFFFF) << bitCount
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
}
for charnum >= start+3 {
start += 3
bitStream += 3 << bitCount
bitCount += 2
}
bitStream += uint32(charnum-start) << bitCount
bitCount += 2
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
count := s.norm[charnum]
charnum++
max := (2*threshold - 1) - remaining
if count < 0 {
remaining += count
} else {
remaining -= count
}
count++ // +1 for extra accuracy
if count >= threshold {
count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
}
bitStream += uint32(count) << bitCount
bitCount += nbBits
if count < max {
bitCount--
}
previous0 = count == 1
if remaining < 1 {
return errors.New("internal error: remaining<1")
}
for remaining < threshold {
nbBits--
threshold >>= 1
}
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += (bitCount + 7) / 8
if uint16(charnum) > s.symbolLen {
return errors.New("internal error: charnum > s.symbolLen")
}
s.Out = out[:outP]
return nil
}
// symbolTransform contains the state transform for a symbol.
type symbolTransform struct {
deltaFindState int32
deltaNbBits uint32
}
// String prints values as a human readable string.
func (s symbolTransform) String() string {
return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
}
// cTable contains tables used for compression.
type cTable struct {
tableSymbol []byte
stateTable []uint16
symbolTT []symbolTransform
}
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *Scratch) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
if cap(s.ct.tableSymbol) < int(tableSize) {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
ctSize := tableSize
if cap(s.ct.stateTable) < ctSize {
s.ct.stateTable = make([]uint16, ctSize)
}
s.ct.stateTable = s.ct.stateTable[:ctSize]
if cap(s.ct.symbolTT) < int(s.symbolLen) {
s.ct.symbolTT = make([]symbolTransform, 256)
}
s.ct.symbolTT = s.ct.symbolTT[:256]
}
// buildCTable will populate the compression table so it is ready to be used.
func (s *Scratch) buildCTable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
var cumul [maxSymbolValue + 2]int16
s.allocCtable()
tableSymbol := s.ct.tableSymbol[:tableSize]
// symbol start positions
{
cumul[0] = 0
for ui, v := range s.norm[:s.symbolLen-1] {
u := byte(ui) // one less than reference
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = u
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
}
// Encode last symbol separately to avoid overflowing u
u := int(s.symbolLen - 1)
v := s.norm[s.symbolLen-1]
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = byte(u)
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
if uint32(cumul[s.symbolLen]) != tableSize {
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = int32(total - 1)
total++
default:
maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = int32(total - v)
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
// countSimple will create a simple histogram in s.count.
// Returns the biggest count.
// Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int) {
for _, v := range in {
s.count[v]++
}
m := uint32(0)
for i, v := range s.count[:] {
if v > m {
m = v
}
if v > 0 {
s.symbolLen = uint16(i) + 1
}
}
return int(m)
}
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
}
return uint8(minBitsSymbols)
}
// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
}
if minBits > tableLog {
tableLog = minBits
}
// Need a minimum to safely represent all symbol values
if tableLog < minTablelog {
tableLog = minTablelog
}
if tableLog > maxTableLog {
tableLog = maxTableLog
}
s.actualTableLog = tableLog
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
func (s *Scratch) normalizeCount() error {
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(s.br.remain())
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(s.br.remain() >> tableLog)
)
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
return s.normalizeCount2()
}
s.norm[largest] += stillToDistribute
return nil
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *Scratch) normalizeCount2() error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(s.br.remain())
tableLog = s.actualTableLog
lowThreshold = uint32(total >> tableLog)
lowOne = uint32((total * 3) >> (tableLog + 1))
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
s.norm[i] = notYetAssigned
}
toDistribute := (1 << tableLog) - distributed
if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = uint32((total * 3) / (toDistribute * 2))
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
}
toDistribute = (1 << tableLog) - distributed
}
if distributed == uint32(s.symbolLen)+1 {
// all values are pretty poor;
// probably incompressible data (should have already been detected);
// find max, then give all remaining points to max
var maxV int
var maxC uint32
for i, cnt := range s.count[:s.symbolLen] {
if cnt > maxC {
maxV = i
maxC = cnt
}
}
s.norm[maxV] += int16(toDistribute)
return nil
}
if total == 0 {
// all of the symbols were low enough for the lowOne or lowThreshold
for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
if s.norm[i] > 0 {
toDistribute--
s.norm[i]++
}
}
return nil
}
var (
vStepLog = 62 - uint64(tableLog)
mid = uint64((1 << (vStepLog - 1)) - 1)
rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
tmpTotal = mid
)
for i, cnt := range s.count[:s.symbolLen] {
if s.norm[i] == notYetAssigned {
var (
end = tmpTotal + uint64(cnt)*rStep
sStart = uint32(tmpTotal >> vStepLog)
sEnd = uint32(end >> vStepLog)
weight = sEnd - sStart
)
if weight < 1 {
return errors.New("weight < 1")
}
s.norm[i] = int16(weight)
tmpTotal = end
}
}
return nil
}
// validateNorm validates the normalized histogram table.
func (s *Scratch) validateNorm() (err error) {
var total int
for _, v := range s.norm[:s.symbolLen] {
if v >= 0 {
total += int(v)
} else {
total -= int(v)
}
}
defer func() {
if err == nil {
return
}
fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
for i, v := range s.norm[:s.symbolLen] {
fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
}
}()
if total != (1 << s.actualTableLog) {
return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
}
for i, v := range s.count[s.symbolLen:] {
if v != 0 {
return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
}
}
return nil
}

370
vendor/github.com/klauspost/compress/fse/decompress.go generated vendored Normal file
View File

@ -0,0 +1,370 @@
package fse
import (
"errors"
"fmt"
)
const (
tablelogAbsoluteMax = 15
)
// Decompress a block of data.
// You can provide a scratch buffer to avoid allocations.
// If nil is provided a temporary one will be allocated.
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
// Use a predefined Scrach to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
return nil, err
}
s.Out = s.Out[:0]
err = s.readNCount()
if err != nil {
return nil, err
}
err = s.buildDtable()
if err != nil {
return nil, err
}
err = s.decompress()
if err != nil {
return nil, err
}
return s.Out, nil
}
// readNCount will read the symbol distribution so decoding tables can be constructed.
func (s *Scratch) readNCount() error {
var (
charnum uint16
previous0 bool
b = &s.br
)
iend := b.remain()
if iend < 4 {
return errors.New("input too small")
}
bitStream := b.Uint32()
nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
if nbBits > tablelogAbsoluteMax {
return errors.New("tableLog too large")
}
bitStream >>= 4
bitCount := uint(4)
s.actualTableLog = uint8(nbBits)
remaining := int32((1 << nbBits) + 1)
threshold := int32(1 << nbBits)
gotTotal := int32(0)
nbBits++
for remaining > 1 {
if previous0 {
n0 := charnum
for (bitStream & 0xFFFF) == 0xFFFF {
n0 += 24
if b.off < iend-5 {
b.advance(2)
bitStream = b.Uint32() >> bitCount
} else {
bitStream >>= 16
bitCount += 16
}
}
for (bitStream & 3) == 3 {
n0 += 3
bitStream >>= 2
bitCount += 2
}
n0 += uint16(bitStream & 3)
bitCount += 2
if n0 > maxSymbolValue {
return errors.New("maxSymbolValue too small")
}
for charnum < n0 {
s.norm[charnum&0xff] = 0
charnum++
}
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
b.advance(bitCount >> 3)
bitCount &= 7
bitStream = b.Uint32() >> bitCount
} else {
bitStream >>= 2
}
}
max := (2*(threshold) - 1) - (remaining)
var count int32
if (int32(bitStream) & (threshold - 1)) < max {
count = int32(bitStream) & (threshold - 1)
bitCount += nbBits - 1
} else {
count = int32(bitStream) & (2*threshold - 1)
if count >= threshold {
count -= max
}
bitCount += nbBits
}
count-- // extra accuracy
if count < 0 {
// -1 means +1
remaining += count
gotTotal -= count
} else {
remaining -= count
gotTotal += count
}
s.norm[charnum&0xff] = int16(count)
charnum++
previous0 = count == 0
for remaining < threshold {
nbBits--
threshold >>= 1
}
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
b.advance(bitCount >> 3)
bitCount &= 7
} else {
bitCount -= (uint)(8 * (iend - 4 - b.off))
b.off = iend - 4
}
bitStream = b.Uint32() >> (bitCount & 31)
}
s.symbolLen = charnum
if s.symbolLen <= 1 {
return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
}
if s.symbolLen > maxSymbolValue+1 {
return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
}
if remaining != 1 {
return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
}
if bitCount > 32 {
return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
}
if gotTotal != 1<<s.actualTableLog {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
return nil
}
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
type decSymbol struct {
newState uint16
symbol uint8
nbBits uint8
}
// allocDtable will allocate decoding tables if they are not big enough.
func (s *Scratch) allocDtable() {
tableSize := 1 << s.actualTableLog
if cap(s.decTable) < int(tableSize) {
s.decTable = make([]decSymbol, tableSize)
}
s.decTable = s.decTable[:tableSize]
if cap(s.ct.tableSymbol) < 256 {
s.ct.tableSymbol = make([]byte, 256)
}
s.ct.tableSymbol = s.ct.tableSymbol[:256]
if cap(s.ct.stateTable) < 256 {
s.ct.stateTable = make([]uint16, 256)
}
s.ct.stateTable = s.ct.stateTable[:256]
}
// buildDtable will build the decoding table.
func (s *Scratch) buildDtable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
s.allocDtable()
symbolNext := s.ct.stateTable[:256]
// Init, lay down lowprob symbols
s.zeroBits = false
{
largeLimit := int16(1 << (s.actualTableLog - 1))
for i, v := range s.norm[:s.symbolLen] {
if v == -1 {
s.decTable[highThreshold].symbol = uint8(i)
highThreshold--
symbolNext[i] = 1
} else {
if v >= largeLimit {
s.zeroBits = true
}
symbolNext[i] = uint16(v)
}
}
}
// Spread symbols
{
tableMask := tableSize - 1
step := tableStep(tableSize)
position := uint32(0)
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.decTable[position].symbol = uint8(ss)
position = (position + step) & tableMask
for position > highThreshold {
// lowprob area
position = (position + step) & tableMask
}
}
}
if position != 0 {
// position must reach all cells once, otherwise normalizedCounter is incorrect
return errors.New("corrupted input (position != 0)")
}
}
// Build Decoding table
{
tableSize := uint16(1 << s.actualTableLog)
for u, v := range s.decTable {
symbol := v.symbol
nextState := symbolNext[symbol]
symbolNext[symbol] = nextState + 1
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
s.decTable[u].nbBits = nBits
newState := (nextState << nBits) - tableSize
if newState > tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
}
if newState == uint16(u) && nBits == 0 {
// Seems weird that this is possible with nbits > 0.
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
}
s.decTable[u].newState = newState
}
}
return nil
}
// decompress will decompress the bitstream.
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
br.init(s.br.unread())
var s1, s2 decoder
// Initialize and decode first state and symbol.
s1.init(br, s.decTable, s.actualTableLog)
s2.init(br, s.decTable, s.actualTableLog)
// Use temp table to avoid bound checks/append penalty.
var tmp = s.ct.tableSymbol[:256]
var off uint8
// Main part
if !s.zeroBits {
for br.off >= 8 {
br.fillFast()
tmp[off+0] = s1.nextFast()
tmp[off+1] = s2.nextFast()
br.fillFast()
tmp[off+2] = s1.nextFast()
tmp[off+3] = s2.nextFast()
off += 4
if off == 0 {
s.Out = append(s.Out, tmp...)
}
}
} else {
for br.off >= 8 {
br.fillFast()
tmp[off+0] = s1.next()
tmp[off+1] = s2.next()
br.fillFast()
tmp[off+2] = s1.next()
tmp[off+3] = s2.next()
off += 4
if off == 0 {
s.Out = append(s.Out, tmp...)
off = 0
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
}
}
}
s.Out = append(s.Out, tmp[:off]...)
// Final bits, a bit more expensive check
for {
if s1.finished() {
s.Out = append(s.Out, s1.final(), s2.final())
break
}
br.fill()
s.Out = append(s.Out, s1.next())
if s2.finished() {
s.Out = append(s.Out, s2.final(), s1.final())
break
}
s.Out = append(s.Out, s2.next())
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
}
return br.close()
}
// decoder keeps track of the current state and updates it from the bitstream.
type decoder struct {
state uint16
br *bitReader
dt []decSymbol
}
// init will initialize the decoder and read the first state from the stream.
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
d.dt = dt
d.br = in
d.state = uint16(in.getBits(tableLog))
}
// next returns the next symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (d *decoder) next() uint8 {
n := &d.dt[d.state]
lowBits := d.br.getBits(n.nbBits)
d.state = n.newState + lowBits
return n.symbol
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (d *decoder) finished() bool {
return d.br.finished() && d.dt[d.state].nbBits > 0
}
// final returns the current state symbol without decoding the next.
func (d *decoder) final() uint8 {
return d.dt[d.state].symbol
}
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (d *decoder) nextFast() uint8 {
n := d.dt[d.state]
lowBits := d.br.getBitsFast(n.nbBits)
d.state = n.newState + lowBits
return n.symbol
}

138
vendor/github.com/klauspost/compress/fse/fse.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
// Package fse provides Finite State Entropy encoding and decoding.
//
// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
// for byte blocks as implemented in zstd.
//
// See https://github.com/klauspost/compress/tree/master/fse for more information.
package fse
import (
"errors"
"fmt"
)
const (
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
maxMemoryUsage = 14
defaultMemoryUsage = 13
maxTableLog = maxMemoryUsage - 2
maxTablesize = 1 << maxTableLog
defaultTablelog = defaultMemoryUsage - 2
minTablelog = 5
maxSymbolValue = 255
)
var (
// ErrIncompressible is returned when input is judged to be too hard to compress.
ErrIncompressible = errors.New("input is not compressible")
// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
ErrUseRLE = errors.New("input is single value repeated")
)
// Scratch provides temporary storage for compression and decompression.
type Scratch struct {
// Private
count [maxSymbolValue + 1]uint32
norm [maxSymbolValue + 1]int16
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
br byteReader
bits bitReader
bw bitWriter
ct cTable // Compression tables.
decTable []decSymbol // Decompression table.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
maxCount int // count of the most probable symbol
// Per block parameters.
// These can be used to override compression parameters of the block.
// Do not touch, unless you know what you are doing.
// Out is output buffer.
// If the scratch is re-used before the caller is done processing the output,
// set this field to nil.
// Otherwise the output buffer will be re-used for next Compression/Decompression step
// and allocation will be avoided.
Out []byte
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
// DecompressLimit limits the maximum decoded size acceptable.
// If > 0 decompression will stop when approximately this many bytes
// has been decoded.
// If 0, maximum size will be 2GB.
DecompressLimit int
}
// Histogram allows to populate the histogram and skip that step in the compression,
// It otherwise allows to inspect the histogram when compression is done.
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
// The returned slice will always be length 256.
func (s *Scratch) Histogram() []uint32 {
return s.count[:]
}
// HistogramFinished can be called to indicate that the histogram has been populated.
// maxSymbol is the index of the highest set symbol of the next data segment.
// maxCount is the number of entries in the most populated entry.
// These are accepted at face value.
func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
s.maxCount = maxCount
s.symbolLen = uint16(maxSymbol) + 1
s.clearCount = maxCount != 0
}
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s == nil {
s = &Scratch{}
}
if s.MaxSymbolValue == 0 {
s.MaxSymbolValue = 255
}
if s.TableLog == 0 {
s.TableLog = defaultTablelog
}
if s.TableLog > maxTableLog {
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
}
if cap(s.Out) == 0 {
s.Out = make([]byte, 0, len(in))
}
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
s.br.init(in)
if s.DecompressLimit == 0 {
// Max size 2GB.
s.DecompressLimit = 2 << 30
}
return s, nil
}
// tableStep returns the next table index.
func tableStep(tableSize uint32) uint32 {
return (tableSize >> 1) + (tableSize >> 3) + 3
}

310
vendor/github.com/klauspost/compress/fse/fse_test.go generated vendored Normal file
View File

@ -0,0 +1,310 @@
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
type inputFn func() ([]byte, error)
var testfiles = []struct {
name string
fn inputFn
err error
}{
// gettysburg.txt is a small plain text.
{name: "gettysburg", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/gettysburg.txt") }},
// Digits is the digits of the irrational number e. Its decimal representation
// does not repeat, but there are only 10 possible digits, so it should be
// reasonably compressible.
{name: "digits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/e.txt") }},
// Twain is Project Gutenberg's edition of Mark Twain's classic English novel.
{name: "twain", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/Mark.Twain-Tom.Sawyer.txt") }},
// Random bytes
{name: "random", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/sharnd.out") }, err: ErrIncompressible},
// Low entropy
{name: "low-ent", fn: func() ([]byte, error) { return []byte(strings.Repeat("1221", 10000)), nil }},
// Super Low entropy
{name: "superlow-ent", fn: func() ([]byte, error) { return []byte(strings.Repeat("1", 10000) + strings.Repeat("2", 500)), nil }},
// Zero bytes
{name: "zeroes", fn: func() ([]byte, error) { return make([]byte, 10000), nil }, err: ErrUseRLE},
{name: "crash1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash1.bin") }, err: ErrIncompressible},
{name: "crash2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash2.bin") }, err: ErrIncompressible},
{name: "crash3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash3.bin") }, err: ErrIncompressible},
{name: "endzerobits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endzerobits.bin") }, err: nil},
{name: "endnonzero", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endnonzero.bin") }, err: ErrIncompressible},
{name: "case1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case1.bin") }, err: ErrIncompressible},
{name: "case2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case2.bin") }, err: ErrIncompressible},
{name: "case3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case3.bin") }, err: ErrIncompressible},
{name: "pngdata.001", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/pngdata.bin") }, err: nil},
{name: "normcount2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/normcount2.bin") }, err: nil},
}
var decTestfiles = []struct {
name string
fn inputFn
err string
}{
// gettysburg.txt is a small plain text.
{name: "hang1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang1.bin") }, err: "corruption detected (bitCount 252 > 32)"},
{name: "hang2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang2.bin") }, err: "newState (0) == oldState (0) and no bits"},
{name: "hang3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang3.bin") }, err: "maxSymbolValue too small"},
{name: "symlen1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-symlen1.bin") }, err: "symbolLen (257) too big"},
{name: "crash4", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash4.bin") }, err: "symbolLen (1) too small"},
{name: "crash5", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash5.bin") }, err: "symbolLen (1) too small"},
}
func TestCompress(t *testing.T) {
for _, test := range testfiles {
t.Run(test.name, func(t *testing.T) {
var s Scratch
buf0, err := test.fn()
if err != nil {
t.Fatal(err)
}
b, err := Compress(buf0, &s)
if err != test.err {
t.Errorf("want error %v (%T), got %v (%T)", test.err, test.err, err, err)
}
if b == nil {
t.Log(test.name + ": not compressible")
return
}
t.Logf("%s: %d -> %d bytes (%.2f:1)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)))
})
}
}
func ExampleCompress() {
// Read data
data, err := ioutil.ReadFile("../testdata/e.txt")
if err != nil {
panic(err)
}
// Create re-usable scratch buffer.
var s Scratch
b, err := Compress(data, &s)
if err != nil {
panic(err)
}
fmt.Printf("Compress: %d -> %d bytes (%.2f:1)\n", len(data), len(b), float64(len(data))/float64(len(b)))
// OUTPUT: Compress: 100003 -> 41564 bytes (2.41:1)
}
func TestDecompress(t *testing.T) {
for _, test := range decTestfiles {
t.Run(test.name, func(t *testing.T) {
var s Scratch
buf0, err := test.fn()
if err != nil {
t.Fatal(err)
}
b, err := Decompress(buf0, &s)
if fmt.Sprint(err) != test.err {
t.Errorf("want error %q, got %q (%T)", test.err, err, err)
return
}
if err != nil {
return
}
if len(b) == 0 {
t.Error(test.name + ": no output")
return
}
t.Logf("%s: %d -> %d bytes (1:%.2f)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)))
})
}
}
func ExampleDecompress() {
// Read data
data, err := ioutil.ReadFile("../testdata/e.txt")
if err != nil {
panic(err)
}
// Create re-usable scratch buffer.
var s Scratch
b, err := Compress(data, &s)
if err != nil {
panic(err)
}
// Since we use the output of compression, it cannot be used as output for decompression.
s.Out = make([]byte, 0, len(data))
d, err := Decompress(b, &s)
if err != nil {
panic(err)
}
fmt.Printf("Input matches: %t\n", bytes.Equal(d, data))
// OUTPUT: Input matches: true
}
func TestGenCorpus(t *testing.T) {
t.Skip("only for generating decompress corpus")
filepath.Walk("fuzz/compress/corpus", func(path string, info os.FileInfo, err error) error {
t.Run(path, func(t *testing.T) {
var s Scratch
buf0, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
b, err := Compress(buf0, &s)
if err != nil {
t.Skip("skipping")
return
}
t.Logf("%s: %d -> %d bytes (%.2f:1)", path, len(buf0), len(b), float64(len(buf0))/float64(len(b)))
dstP := strings.Replace(path, "compress", "decompress", 1)
ioutil.WriteFile(dstP, b, os.ModePerm)
})
return nil
})
}
func BenchmarkCompress(b *testing.B) {
for _, tt := range testfiles {
test := tt
b.Run(test.name, func(b *testing.B) {
var s Scratch
buf0, err := test.fn()
if err != nil {
b.Fatal(err)
}
_, err = Compress(buf0, &s)
if err != test.err {
b.Fatal("unexpected error:", err)
}
if err != nil {
b.Skip("skipping benchmark: ", err)
return
}
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(int64(len(buf0)))
for i := 0; i < b.N; i++ {
_, _ = Compress(buf0, &s)
}
})
}
}
func TestReadNCount(t *testing.T) {
for i := range testfiles {
var s Scratch
test := testfiles[i]
t.Run(test.name, func(t *testing.T) {
name := test.name + ": "
buf0, err := testfiles[i].fn()
if err != nil {
t.Fatal(err)
}
b, err := Compress(buf0, &s)
if err != test.err {
t.Error(err)
return
}
if err != nil {
t.Skip(name + err.Error())
return
}
t.Logf("%s: %d -> %d bytes (%.2f:1)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)))
//t.Logf("%v", b)
var s2 Scratch
dc, err := Decompress(b, &s2)
if err != nil {
t.Fatal(err)
}
want := s.norm[:s.symbolLen]
got := s2.norm[:s2.symbolLen]
if !cmp.Equal(want, got) {
if s.actualTableLog != s2.actualTableLog {
t.Errorf(name+"norm table, want tablelog: %d, got %d", s.actualTableLog, s2.actualTableLog)
}
if s.symbolLen != s2.symbolLen {
t.Errorf(name+"norm table, want size: %d, got %d", s.symbolLen, s2.symbolLen)
}
t.Errorf(name+"norm table, got delta: \n%s", cmp.Diff(want, got))
return
}
for i, dec := range s2.decTable {
dd := dec.symbol
ee := s.ct.tableSymbol[i]
if dd != ee {
t.Errorf("table symbol mismatch. idx %d, enc: %v, dec:%v", i, ee, dd)
break
}
}
if dc != nil {
if len(buf0) != len(dc) {
t.Errorf(name+"decompressed, want size: %d, got %d", len(buf0), len(dc))
if len(buf0) > len(dc) {
buf0 = buf0[:len(dc)]
} else {
dc = dc[:len(buf0)]
}
if !cmp.Equal(buf0, dc) {
t.Errorf(name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc)
}
return
}
if !cmp.Equal(buf0, dc) {
t.Errorf(name+"decompressed, got delta: \n%s", cmp.Diff(buf0, dc))
}
if !t.Failed() {
t.Log("... roundtrip ok!")
}
}
})
}
}
func BenchmarkDecompress(b *testing.B) {
for _, tt := range testfiles {
test := tt
b.Run(test.name, func(b *testing.B) {
var s, s2 Scratch
buf0, err := test.fn()
if err != nil {
b.Fatal(err)
}
out, err := Compress(buf0, &s)
if err != test.err {
b.Fatal(err)
}
if err != nil {
b.Skip(test.name + ": " + err.Error())
return
}
got, err := Decompress(out, &s2)
if err != nil {
b.Fatal(err)
}
if !bytes.Equal(buf0, got) {
b.Fatal("output mismatch")
}
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(int64(len(buf0)))
for i := 0; i < b.N; i++ {
_, err = Decompress(out, &s2)
if err != nil {
b.Fatal(err)
}
}
})
}
}

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸0¿Ì^&øB55548270565ââ|ËnöÄâƒ|Ëne

View File

@ -0,0 +1 @@
<EFBFBD>|Ыu|l888178419700125232338905334425НПяpcит<D0B8><D182>В?nџџџџ

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸70¿Ì^&øB5555555482705664©ö0123456789abcdefghijklmnpq828VË12-0rstuvwxyz¿ï¿]=!(BADPREC)68@š'E>103ĉs){µÃ1Kboo�l

View File

@ -0,0 +1 @@
--)!)')44441402363-40e1a8Aea03B00161__1A-!_1________8)-0414''

View File

@ -0,0 +1 @@
24055111512312578270211815834045410156250650722545140161410432351365152036224040CE3BdA64ABbeDddDfd9eF6C55511151231257827021181583404� ï54101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDcE92fbE6

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸7%¿B5S©0123456789 İ~¿å<abcdefghijklmnpqrstuvwxyz¿ï¿]=!(BADPREC)68@š'E>1

View File

@ -0,0 +1 @@
<55555555559

View File

@ -0,0 +1 @@
窿|ア廃W2zヒu|

View File

@ -0,0 +1 @@
2555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABbeDddDfd9eF6C55511151231257827021181583404� ï54101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDc

View File

@ -0,0 +1 @@
<EFBFBD>4651953614188823848962783813

View File

@ -0,0 +1 @@
5555555555555555555555555555555555555555555555555555555555555555554

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸7%¿B5S©0123456789abcdefghijklmnpqrstuvwxyz¿ï¿]=!(BADPREC)68@š'E>1031Kboo�l

View File

@ -0,0 +1 @@
-86840441164045277856725051e0xFA0FAF9b9dd1FFF87fb9BACa3aF62FDac8

View File

@ -0,0 +1 @@
cØâƒ|±”pW £¸s%!(BADPREC)5?hç<68>™ÖÏĞÕû63837019buo�lo

View File

@ -0,0 +1 @@
cリ窿|ア廃W2zヒu|

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸7%¿B482705664©0123456789abcdefghijklmnpqrstuvwxyz¿ï¿]=!(BADPREC)68@š'E>103ĉs){µÃ1Kboo�l

View File

@ -0,0 +1 @@
5555555555555555554

View File

@ -0,0 +1 @@
cÄâƒ|ËöÄâƒ|Ënm flag fieldu

View File

@ -0,0 +1 @@
Øâƒ|±”W £¸%S©0123479abcdefghijklmnopqrstuvwxyzソï]=!(BADPREC)568@š'

View File

@ -0,0 +1 @@
リ窿|ヒu|

View File

@ -0,0 +1 @@
c<EFBFBD><EFBFBD><EFBFBD>|<7C><>pW <0C><>s'<27><><EFBFBD><EFBFBD><EFBFBD>)0567819432<33>u|

View File

@ -0,0 +1 @@
Øâƒ\±”W £¸7%¿B8673617379884035472059622406959533691406255S©0123456789abcdefghijklmnpqrstuvwxyz¿ï¿]=!(BADPREC)68@š'E>1031Kboo�

View File

@ -0,0 +1 @@
55555555559

View File

@ -0,0 +1 @@
fØâƒ|±”½W £¸s'˜Žp¿ï)05678too few operands for format '%19432Ëu2

View File

@ -0,0 +1 @@
ƒ<EFBFBD><EFBFBD>ο€θ

View File

@ -0,0 +1 @@
5555555555555etProcessMemoryInfo5217012461354e46015555555555555555555555555555554

View File

@ -0,0 +1 @@
55511151225

View File

@ -0,0 +1 @@
555111512312578270211815834045410156250xD9beDddDfd9eF6C5551115123125782702118158340454101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDcE92fbE6F

View File

@ -0,0 +1 @@
Øâƒ|±”W £¸7%¿B5S©0123456789acdefghijklmnpqrstuvwxyz¿ï¿]=!(BADPREC)8@š'E>1031Kboo�l

View File

@ -0,0 +1 @@
ポ|346519536141888238489627838134765(25l

View File

@ -0,0 +1 @@
55555555555555555545555

View File

@ -0,0 +1 @@
cリ粐<EFBFBD>窿|ヒn<EFBE8B>窿|ヒname flag fieldu

View File

@ -0,0 +1 @@
c<EFBFBD><EFBFBD><EFBFBD>|<7C><>pW <0C><><EFBFBD>2z<32>u|

View File

@ -0,0 +1 @@
cØâƒ|±”W £¸s%¿ïB�5S½¿ï©¿ï½¿ï]=!(BADPREC)568@š'E>17031Kboo�l

View File

@ -0,0 +1 @@
5555555555555555555555555555554045217012461354e46015555555555555555555555555555554

View File

@ -0,0 +1 @@
55511110156

View File

@ -0,0 +1 @@
55555555ο55555555555555555

Some files were not shown because too many files have changed in this diff Show More