mirror of
https://github.com/k8snetworkplumbingwg/whereabouts.git
synced 2025-06-03 06:42:26 +00:00
parent
10707e4243
commit
47fadb0720
12
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
12
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
@ -56,8 +56,8 @@ func (n *IPNet) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetConfType describes a network.
|
||||
type NetConfType struct {
|
||||
// NetConf describes a network.
|
||||
type NetConf struct {
|
||||
CNIVersion string `json:"cniVersion,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
@ -73,9 +73,6 @@ type NetConfType struct {
|
||||
ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"`
|
||||
}
|
||||
|
||||
// NetConf is defined as different type as custom MarshalJSON() and issue #1096
|
||||
type NetConf NetConfType
|
||||
|
||||
// GCAttachment is the parameters to a GC call -- namely,
|
||||
// the container ID and ifname pair that represents a
|
||||
// still-valid attachment.
|
||||
@ -86,11 +83,11 @@ type GCAttachment struct {
|
||||
|
||||
// Note: DNS should be omit if DNS is empty but default Marshal function
|
||||
// will output empty structure hence need to write a Marshal function
|
||||
func (n *NetConfType) MarshalJSON() ([]byte, error) {
|
||||
func (n *NetConf) MarshalJSON() ([]byte, error) {
|
||||
// use type alias to escape recursion for json.Marshal() to MarshalJSON()
|
||||
type fixObjType = NetConf
|
||||
|
||||
bytes, err := json.Marshal(fixObjType(*n))
|
||||
bytes, err := json.Marshal(fixObjType(*n)) //nolint:all
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -122,7 +119,6 @@ type NetConfList struct {
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
DisableCheck bool `json:"disableCheck,omitempty"`
|
||||
DisableGC bool `json:"disableGC,omitempty"`
|
||||
Plugins []*NetConf `json:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
|
24
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
24
vendor/github.com/containernetworking/cni/pkg/version/plugin.go
generated
vendored
@ -142,27 +142,3 @@ func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GreaterThan returns true if the first version is greater than the second
|
||||
func GreaterThan(version, otherVersion string) (bool, error) {
|
||||
firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if firstMajor > secondMajor {
|
||||
return true, nil
|
||||
} else if firstMajor == secondMajor {
|
||||
if firstMinor > secondMinor {
|
||||
return true, nil
|
||||
} else if firstMinor == secondMinor && firstMicro > secondMicro {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
6
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
6
vendor/github.com/containernetworking/plugins/pkg/ns/README.md
generated
vendored
@ -13,10 +13,10 @@ The `ns.Do()` method provides **partial** control over network namespaces for yo
|
||||
|
||||
```go
|
||||
err = targetNs.Do(func(hostNs ns.NetNS) error {
|
||||
linkAttrs := netlink.NewLinkAttrs()
|
||||
linkAttrs.Name = "dummy0"
|
||||
dummy := &netlink.Dummy{
|
||||
LinkAttrs: linkAttrs,
|
||||
LinkAttrs: netlink.LinkAttrs{
|
||||
Name: "dummy0",
|
||||
},
|
||||
}
|
||||
return netlink.LinkAdd(dummy)
|
||||
})
|
||||
|
54
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
54
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
generated
vendored
@ -31,10 +31,6 @@ func GetCurrentNS() (NetNS, error) {
|
||||
// return an unexpected network namespace.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
return getCurrentNSNoLock()
|
||||
}
|
||||
|
||||
func getCurrentNSNoLock() (NetNS, error) {
|
||||
return GetNS(getCurrentThreadNetNSPath())
|
||||
}
|
||||
|
||||
@ -156,54 +152,6 @@ func GetNS(nspath string) (NetNS, error) {
|
||||
return &netNS{file: fd}, nil
|
||||
}
|
||||
|
||||
// Returns a new empty NetNS.
|
||||
// Calling Close() let the kernel garbage collect the network namespace.
|
||||
func TempNetNS() (NetNS, error) {
|
||||
var tempNS NetNS
|
||||
var err error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Create the new namespace in a new goroutine so that if we later fail
|
||||
// to switch the namespace back to the original one, we can safely
|
||||
// leave the thread locked to die without a risk of the current thread
|
||||
// left lingering with incorrect namespace.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
runtime.LockOSThread()
|
||||
|
||||
var threadNS NetNS
|
||||
// save a handle to current network namespace
|
||||
threadNS, err = getCurrentNSNoLock()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open current namespace: %v", err)
|
||||
return
|
||||
}
|
||||
defer threadNS.Close()
|
||||
|
||||
// create the temporary network namespace
|
||||
err = unix.Unshare(unix.CLONE_NEWNET)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// get a handle to the temporary network namespace
|
||||
tempNS, err = getCurrentNSNoLock()
|
||||
|
||||
err2 := threadNS.Set()
|
||||
if err2 == nil {
|
||||
// Unlock the current thread only when we successfully switched back
|
||||
// to the original namespace; otherwise leave the thread locked which
|
||||
// will force the runtime to scrap the current thread, that is maybe
|
||||
// not as optimal but at least always safe to do.
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return tempNS, err
|
||||
}
|
||||
|
||||
func (ns *netNS) Path() string {
|
||||
return ns.file.Name()
|
||||
}
|
||||
@ -225,7 +173,7 @@ func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||
}
|
||||
|
||||
containedCall := func(hostNS NetNS) error {
|
||||
threadNS, err := getCurrentNSNoLock()
|
||||
threadNS, err := GetCurrentNS()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open current netns: %v", err)
|
||||
}
|
||||
|
9
vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go
generated
vendored
9
vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go
generated
vendored
@ -114,12 +114,3 @@ func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error {
|
||||
func CmdDelWithArgs(args *skel.CmdArgs, f func() error) error {
|
||||
return CmdDel(args.Netns, args.ContainerID, args.IfName, f)
|
||||
}
|
||||
|
||||
func CmdStatus(f func() error) error {
|
||||
os.Setenv("CNI_COMMAND", "STATUS")
|
||||
os.Setenv("CNI_PATH", os.Getenv("PATH"))
|
||||
os.Setenv("CNI_NETNS_OVERRIDE", "1")
|
||||
defer envCleanup()
|
||||
|
||||
return f()
|
||||
}
|
||||
|
9
vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go
generated
vendored
9
vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go
generated
vendored
@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// AllSpecVersions contains all CNI spec version numbers
|
||||
var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0"}
|
||||
var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0"}
|
||||
|
||||
// SpecVersionHasIPVersion returns true if the given CNI specification version
|
||||
// includes the "version" field in the IP address elements
|
||||
@ -39,13 +39,6 @@ func SpecVersionHasCHECK(ver string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// SpecVersionHasSTATUS returns true if the given CNI specification version
|
||||
// supports the STATUS command
|
||||
func SpecVersionHasSTATUS(ver string) bool {
|
||||
ok, _ := version.GreaterThanOrEqualTo(ver, "1.1.0")
|
||||
return ok
|
||||
}
|
||||
|
||||
// SpecVersionHasChaining returns true if the given CNI specification version
|
||||
// supports plugin chaining
|
||||
func SpecVersionHasChaining(ver string) bool {
|
||||
|
@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error {
|
||||
return errors.Wrapf(err, "replace operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
val := op.value()
|
||||
|
||||
if val.which == eRaw {
|
||||
if !val.tryDoc() {
|
||||
if !val.tryAry() {
|
||||
return errors.Wrapf(err, "replace operation value must be object or array")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch val.which {
|
||||
case eAry:
|
||||
*doc = &val.ary
|
||||
case eDoc:
|
||||
*doc = &val.doc
|
||||
case eRaw:
|
||||
return errors.Wrapf(err, "replace operation hit impossible case")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error {
|
||||
return errors.Wrapf(err, "test operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
var self lazyNode
|
||||
|
||||
switch sv := (*doc).(type) {
|
||||
case *partialDoc:
|
||||
self.doc = *sv
|
||||
self.which = eDoc
|
||||
case *partialArray:
|
||||
self.ary = *sv
|
||||
self.which = eAry
|
||||
}
|
||||
|
||||
if self.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
@ -1,12 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
@ -1,104 +0,0 @@
|
||||
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
prevent_unmaintained_packages:
|
||||
list-mode: strict
|
||||
files:
|
||||
- $all
|
||||
- "!$test"
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/x448/float16
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
dupl:
|
||||
threshold: 100
|
||||
funlen:
|
||||
lines: 100
|
||||
statements: 50
|
||||
goconst:
|
||||
ignore-tests: true
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- commentedOutCode
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- paramTypeCombine
|
||||
- whyNoLint
|
||||
gofmt:
|
||||
simplify: false
|
||||
goimports:
|
||||
local-prefixes: github.com/fxamacker/cbor
|
||||
golint:
|
||||
min-confidence: 0
|
||||
govet:
|
||||
check-shadowing: true
|
||||
lll:
|
||||
line-length: 140
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks: ["all"]
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- depguard
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilerr
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
issues:
|
||||
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||
max-issues-per-linter: 0
|
||||
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
- path: decode.go
|
||||
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
@ -1,133 +0,0 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
faye.github@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
@ -1,41 +0,0 @@
|
||||
# How to contribute
|
||||
|
||||
You can contribute by using the library, opening issues, or opening pull requests.
|
||||
|
||||
## Bug reports and security vulnerabilities
|
||||
|
||||
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||
|
||||
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||
|
||||
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||
|
||||
## Pull requests
|
||||
|
||||
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||
|
||||
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||
|
||||
Pull requests have a greater chance of being approved if:
|
||||
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||
- it has > 97% code coverage.
|
||||
|
||||
## Describe your issue
|
||||
|
||||
Clearly describe the issue:
|
||||
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||
|
||||
## Please don't
|
||||
|
||||
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||
|
||||
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||
|
||||
## Credits
|
||||
|
||||
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-present Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
@ -1,691 +0,0 @@
|
||||
# CBOR Codec in Go
|
||||
|
||||
<!-- [](#cbor-library-in-go) -->
|
||||
|
||||
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||
|
||||
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
|
||||
## fxamacker/cbor
|
||||
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||
[](#fuzzing-and-code-coverage)
|
||||
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||
|
||||
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||
|
||||
<details><summary>Highlights</summary><p/>
|
||||
|
||||
__🚀 Speed__
|
||||
|
||||
Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
|
||||
|
||||
__🔒 Security__
|
||||
|
||||
Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
|
||||
|
||||
__🗜️ Data Size__
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
|
||||
__:jigsaw: Usability__
|
||||
|
||||
API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
|
||||
|
||||
Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
|
||||
|
||||
__📆 Extensibility__
|
||||
|
||||
Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Secure Decoding with Configurable Settings
|
||||
|
||||
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||
|
||||
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||
|
||||
```Go
|
||||
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
// while decoding 181 bytes.
|
||||
package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Example data is from https://github.com/golang/go/issues/24446
|
||||
// (shortened to 181 bytes).
|
||||
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
"303030303030303030303030303030303030303030303030303030303030" +
|
||||
"30"
|
||||
|
||||
type X struct {
|
||||
J *X
|
||||
K map[string]int
|
||||
}
|
||||
|
||||
func main() {
|
||||
raw, _ := hex.DecodeString(data)
|
||||
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
|
||||
var x X
|
||||
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
fmt.Println("Decoding finished.")
|
||||
}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||
|
||||
<details><summary>Benchmark details</summary><p/>
|
||||
|
||||
Latest comparison used:
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
#### Prior comparisons
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Smaller Encodings with Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
Example using different struct tags together:
|
||||
|
||||

|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||
|
||||
## Quick Start
|
||||
|
||||
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||
|
||||
### Key Points
|
||||
|
||||
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||
|
||||
- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
|
||||
- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
|
||||
|
||||
Configurable limits and options can be used to balance trade-offs.
|
||||
|
||||
- Encoding and decoding modes are created from options (settings).
|
||||
- Modes can be created at startup and reused.
|
||||
- Modes are safe for concurrent use.
|
||||
|
||||
### Default Mode
|
||||
|
||||
Package level functions only use this library's default settings.
|
||||
They provide the "default mode" of encoding and decoding.
|
||||
|
||||
```go
|
||||
// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
|
||||
b, err = cbor.Marshal(v) // encode v to []byte b
|
||||
err = cbor.Unmarshal(b, &v) // decode []byte b to v
|
||||
decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
|
||||
err = decoder.Decode(&v) // decode a CBOR data item to v
|
||||
|
||||
// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
|
||||
err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
|
||||
|
||||
// v2.5.0 added new functions that return remaining bytes.
|
||||
|
||||
// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
|
||||
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
|
||||
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||
|
||||
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||
```
|
||||
|
||||
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
|
||||
- Different CBOR libraries may use different default settings.
|
||||
- CBOR-based formats or protocols usually require specific settings.
|
||||
|
||||
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
|
||||
### Presets
|
||||
|
||||
Presets can be used as-is or as a starting point for custom settings.
|
||||
|
||||
```go
|
||||
// EncOptions is a struct of encoder settings.
|
||||
func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
|
||||
func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
|
||||
func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
|
||||
func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
|
||||
```
|
||||
|
||||
Presets are used to create custom modes.
|
||||
|
||||
### Custom Modes
|
||||
|
||||
Modes are created from settings. Once created, modes have immutable settings.
|
||||
|
||||
💡 Create the mode at startup and reuse it. It is safe for concurrent use.
|
||||
|
||||
```Go
|
||||
// Create encoding mode.
|
||||
opts := cbor.CoreDetEncOptions() // use preset options as a starting point
|
||||
opts.Time = cbor.TimeUnix // change any settings if needed
|
||||
em, err := opts.EncMode() // create an immutable encoding mode
|
||||
|
||||
// Reuse the encoding mode. It is safe for concurrent use.
|
||||
|
||||
// API matches encoding/json.
|
||||
b, err := em.Marshal(v) // encode v to []byte b
|
||||
encoder := em.NewEncoder(w) // create encoder with io.Writer w
|
||||
err := encoder.Encode(v) // encode v to io.Writer w
|
||||
```
|
||||
|
||||
Default mode and custom modes automatically apply struct tags.
|
||||
|
||||
### User Specified Buffer for Encoding (v2.7.0)
|
||||
|
||||
`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
|
||||
|
||||
```Go
|
||||
em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||
```
|
||||
|
||||
### Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Example using several struct tags</summary><p/>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
|
||||
### CBOR Tags
|
||||
|
||||
CBOR tags are specified in a `TagSet`.
|
||||
|
||||
Custom modes can be created with a `TagSet` to handle CBOR tags.
|
||||
|
||||
```go
|
||||
em, err := opts.EncMode() // no CBOR tags
|
||||
em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
|
||||
em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||
```
|
||||
|
||||
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||
|
||||
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||
|
||||
```go
|
||||
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||
|
||||
// Create TagSet (safe for concurrency).
|
||||
tags := cbor.NewTagSet()
|
||||
// Register tag COSE_Sign1 18 with signedCWT type.
|
||||
tags.Add(
|
||||
cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
|
||||
reflect.TypeOf(signedCWT{}),
|
||||
18)
|
||||
|
||||
// Create DecMode with immutable tags.
|
||||
dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
|
||||
|
||||
// Unmarshal to signedCWT with tag support.
|
||||
var v signedCWT
|
||||
if err := dm.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create EncMode with immutable tags.
|
||||
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||
|
||||
// Marshal signedCWT with tag number.
|
||||
if data, err := cbor.Marshal(v); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Functions and Interfaces
|
||||
|
||||
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||
|
||||
Common functions with same API as `encoding/json`:
|
||||
- `Marshal`, `Unmarshal`
|
||||
- `NewEncoder`, `(*Encoder).Encode`
|
||||
- `NewDecoder`, `(*Decoder).Decode`
|
||||
|
||||
NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
|
||||
because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||
- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
|
||||
|
||||
Other useful functions:
|
||||
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||
|
||||
Interfaces identical or comparable to Go `encoding` packages include:
|
||||
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||
|
||||
The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
|
||||
|
||||
</details>
|
||||
|
||||
### Security Tips
|
||||
|
||||
🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
|
||||
|
||||
Default limits may need to be increased for systems handling very large data (e.g. blockchains).
|
||||
|
||||
`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
|
||||
|
||||
## Status
|
||||
|
||||
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
|
||||
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||
|
||||
### Prior Release
|
||||
|
||||
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||
|
||||
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||
|
||||
See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
|
||||
|
||||
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||
|
||||
<!--
|
||||
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
|
||||
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||
|
||||
Comparison of v2.4.0 vs v2.5.0-beta2 provided by @448 (edited to fit width).
|
||||
|
||||
PR [#382](https://github.com/fxamacker/cbor/pull/382) returns buffer to pool in `Encode()`. It adds a bit of overhead to `Encode()` but `NewEncoder().Encode()` is a lot faster and uses less memory as shown here:
|
||||
|
||||
```
|
||||
$ benchstat bench-v2.4.0.log bench-f9e6291.log
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/fxamacker/cbor/v2
|
||||
cpu: 12th Gen Intel(R) Core(TM) i7-12700H
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ sec/op │ sec/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 236.70n ± 2% 58.04n ± 1% -75.48% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 238.00n ± 2% 63.93n ± 1% -73.14% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 238.65n ± 2% 64.88n ± 1% -72.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 242.00n ± 2% 63.00n ± 1% -73.97% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 245.60n ± 1% 68.55n ± 1% -72.09% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 243.20n ± 3% 68.39n ± 1% -71.88% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 563.0n ± 2% 378.3n ± 0% -32.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 2.043µ ± 2% 1.906µ ± 2% -6.75% (p=0.000 n=10)
|
||||
geomean 349.7n 122.7n -64.92%
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ B/op │ B/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 544.0 ± 0% 416.0 ± 0% -23.53% (p=0.000 n=10)
|
||||
geomean 153.4 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ allocs/op │ allocs/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 28.00 ± 0% 26.00 ± 0% -7.14% (p=0.000 n=10)
|
||||
geomean 2.782 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
```
|
||||
|
||||
</details>
|
||||
-->
|
||||
|
||||
## Who uses fxamacker/cbor
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||
|
||||
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||
|
||||
## Standards
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Notable CBOR features include:
|
||||
|
||||
| CBOR Feature | Description |
|
||||
| :--- | :--- |
|
||||
| CBOR tags | API supports built-in and user-defined tags. |
|
||||
| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
|
||||
| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
|
||||
| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
|
||||
| Indefinite length data | Option to allow/forbid for encoding and decoding. |
|
||||
| Well-formedness | Always checked and enforced. |
|
||||
| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
|
||||
| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
|
||||
|
||||
Known limitations are noted in the [Limitations section](#limitations).
|
||||
|
||||
Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
|
||||
|
||||
Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
|
||||
|
||||
After well-formedness is verified, basic validity errors are handled as follows:
|
||||
|
||||
* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
|
||||
* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
|
||||
|
||||
When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
|
||||
|
||||
By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
|
||||
|
||||
__Click to expand topic:__
|
||||
|
||||
<details>
|
||||
<summary>Duplicate Map Keys</summary><p>
|
||||
|
||||
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||
|
||||
`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
|
||||
|
||||
`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
|
||||
|
||||
APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Tag Validity</summary><p>
|
||||
|
||||
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||
|
||||
* Inadmissible type for tag content
|
||||
* Inadmissible value for tag content
|
||||
|
||||
Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
|
||||
|
||||
* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
|
||||
* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
|
||||
|
||||
Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
|
||||
|
||||
For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
|
||||
|
||||
</details>
|
||||
|
||||
## Limitations
|
||||
|
||||
If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
|
||||
|
||||
* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
|
||||
* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
|
||||
* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
|
||||
|
||||
## Fuzzing and Code Coverage
|
||||
|
||||
__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
|
||||
|
||||
__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
|
||||
|
||||
<hr>
|
||||
|
||||
## Versions and API Changes
|
||||
This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
|
||||
|
||||
These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
|
||||
`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
|
||||
|
||||
Exclusions from SemVer:
|
||||
- Newly added API documented as "subject to change".
|
||||
- Newly added API in the master branch that has never been tagged in non-beta release.
|
||||
- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
For more info, see [How to Contribute](CONTRIBUTING.md).
|
||||
|
||||
## Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Many thanks to all the contributors on this project!
|
||||
|
||||
I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
|
||||
|
||||
I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
|
||||
|
||||
Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
|
||||
|
||||
This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
|
||||
|
||||
Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
|
||||
|
||||
Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
|
||||
|
||||
This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
|
||||
|
||||
fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
|
||||
|
||||
<hr>
|
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
If the security vulnerability is already known to the public, then you can open an issue as a bug report.
|
||||
|
||||
To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
|
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ByteString represents CBOR byte string (major type 2). ByteString can be used
|
||||
// when using a Go []byte is not possible or convenient. For example, Go doesn't
|
||||
// allow []byte as map key, so ByteString can be used to support data formats
|
||||
// having CBOR map with byte string keys. ByteString can also be used to
|
||||
// encode invalid UTF-8 string as CBOR byte string.
|
||||
// See DecOption.MapKeyByteStringMode for more details.
|
||||
type ByteString string
|
||||
|
||||
// Bytes returns bytes representing ByteString.
|
||||
func (bs ByteString) Bytes() []byte {
|
||||
return []byte(bs)
|
||||
}
|
||||
|
||||
// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
|
||||
func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||
e := getEncodeBuffer()
|
||||
defer putEncodeBuffer(e)
|
||||
|
||||
// Encode length
|
||||
encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
|
||||
|
||||
// Encode data
|
||||
buf := make([]byte, e.Len()+len(bs))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], bs)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
*bs = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check if CBOR data type is byte string
|
||||
if typ := d.nextCBORType(); typ != cborTypeByteString {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
|
||||
}
|
||||
|
||||
b, _ := d.parseByteString()
|
||||
*bs = ByteString(b)
|
||||
return nil
|
||||
}
|
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
@ -1,363 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type encodeFuncs struct {
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
}
|
||||
|
||||
var (
|
||||
decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
|
||||
encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
|
||||
encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
|
||||
typeInfoCache sync.Map // map[reflect.Type]*typeInfo
|
||||
)
|
||||
|
||||
type specialType int
|
||||
|
||||
const (
|
||||
specialTypeNone specialType = iota
|
||||
specialTypeUnmarshalerIface
|
||||
specialTypeEmptyIface
|
||||
specialTypeIface
|
||||
specialTypeTag
|
||||
specialTypeTime
|
||||
)
|
||||
|
||||
type typeInfo struct {
|
||||
elemTypeInfo *typeInfo
|
||||
keyTypeInfo *typeInfo
|
||||
typ reflect.Type
|
||||
kind reflect.Kind
|
||||
nonPtrType reflect.Type
|
||||
nonPtrKind reflect.Kind
|
||||
spclType specialType
|
||||
}
|
||||
|
||||
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
k := t.Kind()
|
||||
|
||||
tInfo.nonPtrType = t
|
||||
tInfo.nonPtrKind = k
|
||||
|
||||
if k == reflect.Interface {
|
||||
if t.NumMethod() == 0 {
|
||||
tInfo.spclType = specialTypeEmptyIface
|
||||
} else {
|
||||
tInfo.spclType = specialTypeIface
|
||||
}
|
||||
} else if t == typeTag {
|
||||
tInfo.spclType = specialTypeTag
|
||||
} else if t == typeTime {
|
||||
tInfo.spclType = specialTypeTime
|
||||
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnmarshalerIface
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Array, reflect.Slice:
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
case reflect.Map:
|
||||
tInfo.keyTypeInfo = getTypeInfo(t.Key())
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
}
|
||||
|
||||
return &tInfo
|
||||
}
|
||||
|
||||
type decodingStructType struct {
|
||||
fields fields
|
||||
fieldIndicesByName map[string]int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
|
||||
// here's a very basic implementation of an aggregated error.
|
||||
type multierror []error
|
||||
|
||||
func (m multierror) Error() string {
|
||||
var sb strings.Builder
|
||||
for i, err := range m {
|
||||
sb.WriteString(err.Error())
|
||||
if i < len(m)-1 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getDecodingStructType(t reflect.Type) *decodingStructType {
|
||||
if v, _ := decodingStructTypeCache.Load(t); v != nil {
|
||||
return v.(*decodingStructType)
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
toArray := hasToArrayOption(structOptions)
|
||||
|
||||
var errs []error
|
||||
for i := 0; i < len(flds); i++ {
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
}
|
||||
|
||||
flds[i].typInfo = getTypeInfo(flds[i].typ)
|
||||
}
|
||||
|
||||
fieldIndicesByName := make(map[string]int, len(flds))
|
||||
for i, fld := range flds {
|
||||
if _, ok := fieldIndicesByName[fld.name]; ok {
|
||||
errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
|
||||
continue
|
||||
}
|
||||
fieldIndicesByName[fld.name] = i
|
||||
}
|
||||
|
||||
var err error
|
||||
{
|
||||
var multi multierror
|
||||
for _, each := range errs {
|
||||
if each != nil {
|
||||
multi = append(multi, each)
|
||||
}
|
||||
}
|
||||
if len(multi) == 1 {
|
||||
err = multi[0]
|
||||
} else if len(multi) > 1 {
|
||||
err = multi
|
||||
}
|
||||
}
|
||||
|
||||
structType := &decodingStructType{
|
||||
fields: flds,
|
||||
fieldIndicesByName: fieldIndicesByName,
|
||||
err: err,
|
||||
toArray: toArray,
|
||||
}
|
||||
decodingStructTypeCache.Store(t, structType)
|
||||
return structType
|
||||
}
|
||||
|
||||
type encodingStructType struct {
|
||||
fields fields
|
||||
bytewiseFields fields
|
||||
lengthFirstFields fields
|
||||
omitEmptyFieldsIdx []int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
func (st *encodingStructType) getFields(em *encMode) fields {
|
||||
switch em.sort {
|
||||
case SortNone, SortFastShuffle:
|
||||
return st.fields
|
||||
case SortLengthFirst:
|
||||
return st.lengthFirstFields
|
||||
default:
|
||||
return st.bytewiseFields
|
||||
}
|
||||
}
|
||||
|
||||
type bytewiseFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Less(i, j int) bool {
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
type lengthFirstFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Less(i, j int) bool {
|
||||
if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
|
||||
return len(x.fields[i].cborName) < len(x.fields[j].cborName)
|
||||
}
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||
if v, _ := encodingStructTypeCache.Load(t); v != nil {
|
||||
structType := v.(*encodingStructType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
if hasToArrayOption(structOptions) {
|
||||
return getEncodingStructToArrayType(t, flds)
|
||||
}
|
||||
|
||||
var err error
|
||||
var hasKeyAsInt bool
|
||||
var hasKeyAsStr bool
|
||||
var omitEmptyIdx []int
|
||||
e := getEncodeBuffer()
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
err = &UnsupportedTypeError{t}
|
||||
break
|
||||
}
|
||||
|
||||
// Encode field name
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
if nameAsInt >= 0 {
|
||||
encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
|
||||
} else {
|
||||
n := nameAsInt*(-1) - 1
|
||||
encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
|
||||
}
|
||||
flds[i].cborName = make([]byte, e.Len())
|
||||
copy(flds[i].cborName, e.Bytes())
|
||||
e.Reset()
|
||||
|
||||
hasKeyAsInt = true
|
||||
} else {
|
||||
encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
|
||||
flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
|
||||
n := copy(flds[i].cborName, e.Bytes())
|
||||
copy(flds[i].cborName[n:], flds[i].name)
|
||||
e.Reset()
|
||||
|
||||
// If cborName contains a text string, then cborNameByteString contains a
|
||||
// string that has the byte string major type but is otherwise identical to
|
||||
// cborName.
|
||||
flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
|
||||
copy(flds[i].cborNameByteString, flds[i].cborName)
|
||||
// Reset encoded CBOR type to byte string, preserving the "additional
|
||||
// information" bits:
|
||||
flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
|
||||
getAdditionalInformation(flds[i].cborNameByteString[0])
|
||||
|
||||
hasKeyAsStr = true
|
||||
}
|
||||
|
||||
// Check if field can be omitted when empty
|
||||
if flds[i].omitEmpty {
|
||||
omitEmptyIdx = append(omitEmptyIdx, i)
|
||||
}
|
||||
}
|
||||
putEncodeBuffer(e)
|
||||
|
||||
if err != nil {
|
||||
structType := &encodingStructType{err: err}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
// Sort fields by canonical order
|
||||
bytewiseFields := make(fields, len(flds))
|
||||
copy(bytewiseFields, flds)
|
||||
sort.Sort(&bytewiseFieldSorter{bytewiseFields})
|
||||
|
||||
lengthFirstFields := bytewiseFields
|
||||
if hasKeyAsInt && hasKeyAsStr {
|
||||
lengthFirstFields = make(fields, len(flds))
|
||||
copy(lengthFirstFields, flds)
|
||||
sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
bytewiseFields: bytewiseFields,
|
||||
lengthFirstFields: lengthFirstFields,
|
||||
omitEmptyFieldsIdx: omitEmptyIdx,
|
||||
}
|
||||
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
toArray: true,
|
||||
}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||
fs := v.(encodeFuncs)
|
||||
return fs.ef, fs.ief
|
||||
}
|
||||
ef, ief := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||
return ef, ief
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
if v, _ := typeInfoCache.Load(t); v != nil {
|
||||
return v.(*typeInfo)
|
||||
}
|
||||
tInfo := newTypeInfo(t)
|
||||
typeInfoCache.Store(t, tInfo)
|
||||
return tInfo
|
||||
}
|
||||
|
||||
func hasToArrayOption(tag string) bool {
|
||||
s := ",toarray"
|
||||
idx := strings.Index(tag, s)
|
||||
return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
|
||||
}
|
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type cborType uint8
|
||||
|
||||
const (
|
||||
cborTypePositiveInt cborType = 0x00
|
||||
cborTypeNegativeInt cborType = 0x20
|
||||
cborTypeByteString cborType = 0x40
|
||||
cborTypeTextString cborType = 0x60
|
||||
cborTypeArray cborType = 0x80
|
||||
cborTypeMap cborType = 0xa0
|
||||
cborTypeTag cborType = 0xc0
|
||||
cborTypePrimitives cborType = 0xe0
|
||||
)
|
||||
|
||||
func (t cborType) String() string {
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
return "positive integer"
|
||||
case cborTypeNegativeInt:
|
||||
return "negative integer"
|
||||
case cborTypeByteString:
|
||||
return "byte string"
|
||||
case cborTypeTextString:
|
||||
return "UTF-8 text string"
|
||||
case cborTypeArray:
|
||||
return "array"
|
||||
case cborTypeMap:
|
||||
return "map"
|
||||
case cborTypeTag:
|
||||
return "tag"
|
||||
case cborTypePrimitives:
|
||||
return "primitives"
|
||||
default:
|
||||
return "Invalid type " + strconv.Itoa(int(t))
|
||||
}
|
||||
}
|
||||
|
||||
type additionalInformation uint8
|
||||
|
||||
const (
|
||||
maxAdditionalInformationWithoutArgument = 23
|
||||
additionalInformationWith1ByteArgument = 24
|
||||
additionalInformationWith2ByteArgument = 25
|
||||
additionalInformationWith4ByteArgument = 26
|
||||
additionalInformationWith8ByteArgument = 27
|
||||
|
||||
// For major type 7.
|
||||
additionalInformationAsFalse = 20
|
||||
additionalInformationAsTrue = 21
|
||||
additionalInformationAsNull = 22
|
||||
additionalInformationAsUndefined = 23
|
||||
additionalInformationAsFloat16 = 25
|
||||
additionalInformationAsFloat32 = 26
|
||||
additionalInformationAsFloat64 = 27
|
||||
|
||||
// For major type 2, 3, 4, 5.
|
||||
additionalInformationAsIndefiniteLengthFlag = 31
|
||||
)
|
||||
|
||||
const (
|
||||
maxSimpleValueInAdditionalInformation = 23
|
||||
minSimpleValueIn1ByteArgument = 32
|
||||
)
|
||||
|
||||
func (ai additionalInformation) isIndefiniteLength() bool {
|
||||
return ai == additionalInformationAsIndefiniteLengthFlag
|
||||
}
|
||||
|
||||
const (
|
||||
// From RFC 8949 Section 3:
|
||||
// "The initial byte of each encoded data item contains both information about the major type
|
||||
// (the high-order 3 bits, described in Section 3.1) and additional information
|
||||
// (the low-order 5 bits)."
|
||||
|
||||
// typeMask is used to extract major type in initial byte of encoded data item.
|
||||
typeMask = 0xe0
|
||||
|
||||
// additionalInformationMask is used to extract additional information in initial byte of encoded data item.
|
||||
additionalInformationMask = 0x1f
|
||||
)
|
||||
|
||||
func getType(raw byte) cborType {
|
||||
return cborType(raw & typeMask)
|
||||
}
|
||||
|
||||
func getAdditionalInformation(raw byte) byte {
|
||||
return raw & additionalInformationMask
|
||||
}
|
||||
|
||||
func isBreakFlag(raw byte) bool {
|
||||
return raw == cborBreakFlag
|
||||
}
|
||||
|
||||
func parseInitialByte(b byte) (t cborType, ai byte) {
|
||||
return getType(b), getAdditionalInformation(b)
|
||||
}
|
||||
|
||||
const (
|
||||
tagNumRFC3339Time = 0
|
||||
tagNumEpochTime = 1
|
||||
tagNumUnsignedBignum = 2
|
||||
tagNumNegativeBignum = 3
|
||||
tagNumExpectedLaterEncodingBase64URL = 21
|
||||
tagNumExpectedLaterEncodingBase64 = 22
|
||||
tagNumExpectedLaterEncodingBase16 = 23
|
||||
tagNumSelfDescribedCBOR = 55799
|
||||
)
|
||||
|
||||
const (
|
||||
cborBreakFlag = byte(0xff)
|
||||
cborByteStringWithIndefiniteLengthHead = byte(0x5f)
|
||||
cborTextStringWithIndefiniteLengthHead = byte(0x7f)
|
||||
cborArrayWithIndefiniteLengthHead = byte(0x9f)
|
||||
cborMapWithIndefiniteLengthHead = byte(0xbf)
|
||||
)
|
||||
|
||||
var (
|
||||
cborFalse = []byte{0xf4}
|
||||
cborTrue = []byte{0xf5}
|
||||
cborNil = []byte{0xf6}
|
||||
cborNaN = []byte{0xf9, 0x7e, 0x00}
|
||||
cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
|
||||
cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
|
||||
)
|
||||
|
||||
// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
|
||||
func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||
t := getType(contentHead)
|
||||
switch tagNum {
|
||||
case tagNumRFC3339Time:
|
||||
// Tag content (date/time text string in RFC 3339 format) must be string type.
|
||||
if t != cborTypeTextString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumRFC3339Time,
|
||||
"text string",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumEpochTime:
|
||||
// Tag content (epoch date/time) must be uint, int, or float type.
|
||||
if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumEpochTime,
|
||||
"integer or floating-point number",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumUnsignedBignum, tagNumNegativeBignum:
|
||||
// Tag content (bignum) must be byte type.
|
||||
if t != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeErrorf(
|
||||
fmt.Sprintf(
|
||||
"tag number %d or %d must be followed by byte string, got %s",
|
||||
tagNumUnsignedBignum,
|
||||
tagNumNegativeBignum,
|
||||
t.String(),
|
||||
))
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
|
||||
// From RFC 8949 3.4.5.2:
|
||||
// The data item tagged can be a byte string or any other data item. In the latter
|
||||
// case, the tag applies to all of the byte string data items contained in the data
|
||||
// item, except for those contained in a nested data item tagged with an expected
|
||||
// conversion.
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
@ -1,724 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// DiagMode is the main interface for CBOR diagnostic notation.
|
||||
type DiagMode interface {
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
|
||||
Diagnose([]byte) (string, error)
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
DiagnoseFirst([]byte) (string, []byte, error)
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
DiagOptions() DiagOptions
|
||||
}
|
||||
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
type ByteStringEncoding uint8
|
||||
|
||||
const (
|
||||
// ByteStringBase16Encoding encodes byte strings in base16, without padding.
|
||||
ByteStringBase16Encoding ByteStringEncoding = iota
|
||||
|
||||
// ByteStringBase32Encoding encodes byte strings in base32, without padding.
|
||||
ByteStringBase32Encoding
|
||||
|
||||
// ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
|
||||
ByteStringBase32HexEncoding
|
||||
|
||||
// ByteStringBase64Encoding encodes byte strings in base64url, without padding.
|
||||
ByteStringBase64Encoding
|
||||
|
||||
maxByteStringEncoding
|
||||
)
|
||||
|
||||
func (bse ByteStringEncoding) valid() error {
|
||||
if bse >= maxByteStringEncoding {
|
||||
return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiagOptions specifies Diag options.
|
||||
type DiagOptions struct {
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
// Default is ByteStringBase16Encoding.
|
||||
ByteStringEncoding ByteStringEncoding
|
||||
|
||||
// ByteStringHexWhitespace specifies notating with whitespace in byte string
|
||||
// when ByteStringEncoding is ByteStringBase16Encoding.
|
||||
ByteStringHexWhitespace bool
|
||||
|
||||
// ByteStringText specifies notating with text in byte string
|
||||
// if it is a valid UTF-8 text.
|
||||
ByteStringText bool
|
||||
|
||||
// ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
|
||||
// if it is a valid CBOR bytes.
|
||||
ByteStringEmbeddedCBOR bool
|
||||
|
||||
// CBORSequence specifies notating CBOR sequences.
|
||||
// otherwise, it returns an error if there are more bytes after the first CBOR.
|
||||
CBORSequence bool
|
||||
|
||||
// FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
|
||||
FloatPrecisionIndicator bool
|
||||
|
||||
// MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
|
||||
// Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
|
||||
// require larger amounts of stack to deserialize. Don't increase this higher than you require.
|
||||
MaxNestedLevels int
|
||||
|
||||
// MaxArrayElements specifies the max number of elements for CBOR arrays.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxArrayElements int
|
||||
|
||||
// MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxMapPairs int
|
||||
}
|
||||
|
||||
// DiagMode returns a DiagMode with immutable options.
|
||||
func (opts DiagOptions) DiagMode() (DiagMode, error) {
|
||||
return opts.diagMode()
|
||||
}
|
||||
|
||||
func (opts DiagOptions) diagMode() (*diagMode, error) {
|
||||
if err := opts.ByteStringEncoding.valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decMode, err := DecOptions{
|
||||
MaxNestedLevels: opts.MaxNestedLevels,
|
||||
MaxArrayElements: opts.MaxArrayElements,
|
||||
MaxMapPairs: opts.MaxMapPairs,
|
||||
}.decMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &diagMode{
|
||||
byteStringEncoding: opts.ByteStringEncoding,
|
||||
byteStringHexWhitespace: opts.ByteStringHexWhitespace,
|
||||
byteStringText: opts.ByteStringText,
|
||||
byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
|
||||
cborSequence: opts.CBORSequence,
|
||||
floatPrecisionIndicator: opts.FloatPrecisionIndicator,
|
||||
decMode: decMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type diagMode struct {
|
||||
byteStringEncoding ByteStringEncoding
|
||||
byteStringHexWhitespace bool
|
||||
byteStringText bool
|
||||
byteStringEmbeddedCBOR bool
|
||||
cborSequence bool
|
||||
floatPrecisionIndicator bool
|
||||
decMode *decMode
|
||||
}
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
func (dm *diagMode) DiagOptions() DiagOptions {
|
||||
return DiagOptions{
|
||||
ByteStringEncoding: dm.byteStringEncoding,
|
||||
ByteStringHexWhitespace: dm.byteStringHexWhitespace,
|
||||
ByteStringText: dm.byteStringText,
|
||||
ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
|
||||
CBORSequence: dm.cborSequence,
|
||||
FloatPrecisionIndicator: dm.floatPrecisionIndicator,
|
||||
MaxNestedLevels: dm.decMode.maxNestedLevels,
|
||||
MaxArrayElements: dm.decMode.maxArrayElements,
|
||||
MaxMapPairs: dm.decMode.maxMapPairs,
|
||||
}
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
|
||||
func (dm *diagMode) Diagnose(data []byte) (string, error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
|
||||
}
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diagFirst()
|
||||
}
|
||||
|
||||
var defaultDiagMode, _ = DiagOptions{}.diagMode()
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
|
||||
// using the default diagnostic mode.
|
||||
//
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
|
||||
func Diagnose(data []byte) (string, error) {
|
||||
return defaultDiagMode.Diagnose(data)
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return defaultDiagMode.DiagnoseFirst(data)
|
||||
}
|
||||
|
||||
type diagnose struct {
|
||||
dm *diagMode
|
||||
d *decoder
|
||||
w *bytes.Buffer
|
||||
}
|
||||
|
||||
func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
|
||||
return &diagnose{
|
||||
dm: diagm,
|
||||
d: &decoder{data: data, dm: decm},
|
||||
w: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diag(cborSequence bool) (string, error) {
|
||||
// CBOR Sequence
|
||||
firstItem := true
|
||||
for {
|
||||
switch err := di.wellformed(cborSequence); err {
|
||||
case nil:
|
||||
if !firstItem {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
firstItem = false
|
||||
if itemErr := di.item(); itemErr != nil {
|
||||
return di.w.String(), itemErr
|
||||
}
|
||||
|
||||
case io.EOF:
|
||||
if firstItem {
|
||||
return di.w.String(), err
|
||||
}
|
||||
return di.w.String(), nil
|
||||
|
||||
default:
|
||||
return di.w.String(), err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
|
||||
err = di.wellformed(true)
|
||||
if err == nil {
|
||||
err = di.item()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Return EDN and the rest of the data slice (which might be len 0)
|
||||
return di.w.String(), di.d.data[di.d.off:], nil
|
||||
}
|
||||
|
||||
return di.w.String(), nil, err
|
||||
}
|
||||
|
||||
func (di *diagnose) wellformed(allowExtraData bool) error {
|
||||
off := di.d.off
|
||||
err := di.d.wellformed(allowExtraData, false)
|
||||
di.d.off = off
|
||||
return err
|
||||
}
|
||||
|
||||
func (di *diagnose) item() error { //nolint:gocyclo
|
||||
initialByte := di.d.data[di.d.off]
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead,
|
||||
cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
|
||||
di.d.off++
|
||||
if isBreakFlag(di.d.data[di.d.off]) {
|
||||
di.d.off++
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead:
|
||||
// indefinite-length bytes with no chunks.
|
||||
di.w.WriteString(`''_`)
|
||||
return nil
|
||||
case cborTextStringWithIndefiniteLengthHead:
|
||||
// indefinite-length text with no chunks.
|
||||
di.w.WriteString(`""_`)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString("(_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// wellformedIndefiniteString() already checked that the next item is a byte/text string.
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
|
||||
case cborArrayWithIndefiniteLengthHead: // indefinite-length array
|
||||
di.d.off++
|
||||
di.w.WriteString("[_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborMapWithIndefiniteLengthHead: // indefinite-length map
|
||||
di.d.off++
|
||||
di.w.WriteString("{_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
di.w.WriteString(": ")
|
||||
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
|
||||
t := di.d.nextCBORType()
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
_, _, val := di.d.getHead()
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeNegativeInt:
|
||||
_, _, val := di.d.getHead()
|
||||
if val > math.MaxInt64 {
|
||||
// CBOR negative integer overflows int64, use big.Int to store value.
|
||||
bi := new(big.Int)
|
||||
bi.SetUint64(val)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
nValue := int64(-1) ^ int64(val)
|
||||
di.w.WriteString(strconv.FormatInt(nValue, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeByteString:
|
||||
b, _ := di.d.parseByteString()
|
||||
return di.encodeByteString(b)
|
||||
|
||||
case cborTypeTextString:
|
||||
b, err := di.d.parseTextString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return di.encodeTextString(string(b), '"')
|
||||
|
||||
case cborTypeArray:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('[')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborTypeMap:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('{')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteString(": ")
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
|
||||
case cborTypeTag:
|
||||
_, _, tagNum := di.d.getHead()
|
||||
switch tagNum {
|
||||
case tagNumUnsignedBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumUnsignedBignum,
|
||||
"byte string",
|
||||
nt.String())
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
case tagNumNegativeBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumNegativeBignum,
|
||||
"byte string",
|
||||
nt.String(),
|
||||
)
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
default:
|
||||
di.w.WriteString(strconv.FormatUint(tagNum, 10))
|
||||
di.w.WriteByte('(')
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
|
||||
case cborTypePrimitives:
|
||||
_, ai, val := di.d.getHead()
|
||||
switch ai {
|
||||
case additionalInformationAsFalse:
|
||||
di.w.WriteString("false")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsTrue:
|
||||
di.w.WriteString("true")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsNull:
|
||||
di.w.WriteString("null")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsUndefined:
|
||||
di.w.WriteString("undefined")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat16,
|
||||
additionalInformationAsFloat32,
|
||||
additionalInformationAsFloat64:
|
||||
return di.encodeFloat(ai, val)
|
||||
|
||||
default:
|
||||
di.w.WriteString("simple(")
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeU16 format a rune as "\uxxxx"
|
||||
func (di *diagnose) writeU16(val rune) {
|
||||
di.w.WriteString("\\u")
|
||||
var in [2]byte
|
||||
in[0] = byte(val >> 8)
|
||||
in[1] = byte(val)
|
||||
sz := hex.EncodedLen(len(in))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, in[:])
|
||||
di.w.Write(dst)
|
||||
}
|
||||
|
||||
var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||
var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
func (di *diagnose) encodeByteString(val []byte) error {
|
||||
if len(val) > 0 {
|
||||
if di.dm.byteStringText && utf8.Valid(val) {
|
||||
return di.encodeTextString(string(val), '\'')
|
||||
}
|
||||
|
||||
if di.dm.byteStringEmbeddedCBOR {
|
||||
di2 := newDiagnose(val, di.dm.decMode, di.dm)
|
||||
// should always notating embedded CBOR sequence.
|
||||
if str, err := di2.diag(true); err == nil {
|
||||
di.w.WriteString("<<")
|
||||
di.w.WriteString(str)
|
||||
di.w.WriteString(">>")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch di.dm.byteStringEncoding {
|
||||
case ByteStringBase16Encoding:
|
||||
di.w.WriteString("h'")
|
||||
if di.dm.byteStringHexWhitespace {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
if len(val) > 0 {
|
||||
sz += len(val) - 1
|
||||
}
|
||||
di.w.Grow(sz)
|
||||
|
||||
dst := di.w.Bytes()[di.w.Len():]
|
||||
for i := range val {
|
||||
if i > 0 {
|
||||
dst = append(dst, ' ')
|
||||
}
|
||||
hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
|
||||
dst = dst[:len(dst)+2]
|
||||
}
|
||||
di.w.Write(dst)
|
||||
} else {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
}
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32Encoding:
|
||||
di.w.WriteString("b32'")
|
||||
sz := rawBase32Encoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32Encoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32HexEncoding:
|
||||
di.w.WriteString("h32'")
|
||||
sz := rawBase32HexEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32HexEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase64Encoding:
|
||||
di.w.WriteString("b64'")
|
||||
sz := base64.RawURLEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
base64.RawURLEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
default:
|
||||
// It should not be possible for users to construct a *diagMode with an invalid byte
|
||||
// string encoding.
|
||||
panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
|
||||
}
|
||||
}
|
||||
|
||||
const utf16SurrSelf = rune(0x10000)
|
||||
|
||||
// quote should be either `'` or `"`
|
||||
func (di *diagnose) encodeTextString(val string, quote byte) error {
|
||||
di.w.WriteByte(quote)
|
||||
|
||||
for i := 0; i < len(val); {
|
||||
if b := val[i]; b < utf8.RuneSelf {
|
||||
switch {
|
||||
case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
|
||||
di.w.WriteByte('\\')
|
||||
|
||||
switch b {
|
||||
case '\t':
|
||||
b = 't'
|
||||
case '\n':
|
||||
b = 'n'
|
||||
case '\r':
|
||||
b = 'r'
|
||||
}
|
||||
di.w.WriteByte(b)
|
||||
|
||||
case b >= ' ' && b <= '~':
|
||||
di.w.WriteByte(b)
|
||||
|
||||
default:
|
||||
di.writeU16(rune(b))
|
||||
}
|
||||
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
c, size := utf8.DecodeRuneInString(val[i:])
|
||||
switch {
|
||||
case c == utf8.RuneError:
|
||||
return &SemanticError{"cbor: invalid UTF-8 string"}
|
||||
|
||||
case c < utf16SurrSelf:
|
||||
di.writeU16(c)
|
||||
|
||||
default:
|
||||
c1, c2 := utf16.EncodeRune(c)
|
||||
di.writeU16(c1)
|
||||
di.writeU16(c2)
|
||||
}
|
||||
|
||||
i += size
|
||||
}
|
||||
|
||||
di.w.WriteByte(quote)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (di *diagnose) encodeFloat(ai byte, val uint64) error {
|
||||
f64 := float64(0)
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
f16 := float16.Frombits(uint16(val))
|
||||
switch {
|
||||
case f16.IsNaN():
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f16.IsInf(1):
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f16.IsInf(-1):
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f16.Float32())
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
f32 := math.Float32frombits(uint32(val))
|
||||
switch {
|
||||
case f32 != f32:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f32 > math.MaxFloat32:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f32 < -math.MaxFloat32:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f32)
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
f64 = math.Float64frombits(val)
|
||||
switch {
|
||||
case f64 != f64:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f64 > math.MaxFloat64:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f64 < -math.MaxFloat64:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Use ES6 number to string conversion which should match most JSON generators.
|
||||
// Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
|
||||
const bitSize = 64
|
||||
b := make([]byte, 0, 32)
|
||||
if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||
b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
|
||||
// clean up e-09 to e-9
|
||||
n := len(b)
|
||||
if n >= 4 && string(b[n-4:n-1]) == "e-0" {
|
||||
b = append(b[:n-2], b[n-1])
|
||||
}
|
||||
} else {
|
||||
b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
|
||||
}
|
||||
|
||||
// add decimal point and trailing zero if needed
|
||||
if bytes.IndexByte(b, '.') < 0 {
|
||||
if i := bytes.IndexByte(b, 'e'); i < 0 {
|
||||
b = append(b, '.', '0')
|
||||
} else {
|
||||
b = append(b[:i+2], b[i:]...)
|
||||
b[i] = '.'
|
||||
b[i+1] = '0'
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString(string(b))
|
||||
|
||||
if di.dm.floatPrecisionIndicator {
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
di.w.WriteString("_1")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
di.w.WriteString("_2")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
di.w.WriteString("_3")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
@ -1,129 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
/*
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||
|
||||
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||
to their smallest forms (e.g. float16) when values fit.
|
||||
|
||||
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||
and easier to use with structs.
|
||||
|
||||
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||
|
||||
Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
|
||||
|
||||
# Basics
|
||||
|
||||
The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||
|
||||
Function signatures identical to encoding/json include:
|
||||
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||
|
||||
Standard interfaces include:
|
||||
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||
|
||||
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||
user-defined Go types.
|
||||
|
||||
Codec functions are available at package-level (using defaults options) or by
|
||||
creating modes from options at runtime.
|
||||
|
||||
"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
|
||||
|
||||
EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
|
||||
|
||||
em, err := cbor.EncOptions{...}.EncMode()
|
||||
em, err := cbor.CanonicalEncOptions().EncMode()
|
||||
em, err := cbor.CTAP2EncOptions().EncMode()
|
||||
|
||||
Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
|
||||
modes won't accidentally change at runtime after they're created.
|
||||
|
||||
Modes are intended to be reused and are safe for concurrent use.
|
||||
|
||||
EncMode and DecMode Interfaces
|
||||
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions // returns copy of options
|
||||
}
|
||||
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewDecoder(r io.Reader) *Decoder
|
||||
DecOptions() DecOptions // returns copy of options
|
||||
}
|
||||
|
||||
Using Default Encoding Mode
|
||||
|
||||
b, err := cbor.Marshal(v)
|
||||
|
||||
encoder := cbor.NewEncoder(w)
|
||||
err = encoder.Encode(v)
|
||||
|
||||
Using Default Decoding Mode
|
||||
|
||||
err := cbor.Unmarshal(b, &v)
|
||||
|
||||
decoder := cbor.NewDecoder(r)
|
||||
err = decoder.Decode(&v)
|
||||
|
||||
Creating and Using Encoding Modes
|
||||
|
||||
// Create EncOptions using either struct literal or a function.
|
||||
opts := cbor.CanonicalEncOptions()
|
||||
|
||||
// If needed, modify encoding options
|
||||
opts.Time = cbor.TimeUnix
|
||||
|
||||
// Create reusable EncMode interface with immutable options, safe for concurrent use.
|
||||
em, err := opts.EncMode()
|
||||
|
||||
// Use EncMode like encoding/json, with same function signatures.
|
||||
b, err := em.Marshal(v)
|
||||
// or
|
||||
encoder := em.NewEncoder(w)
|
||||
err := encoder.Encode(v)
|
||||
|
||||
// NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
|
||||
// specified during creation of em (encoding mode).
|
||||
|
||||
# CBOR Options
|
||||
|
||||
Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
|
||||
|
||||
Encoding Options: https://github.com/fxamacker/cbor#encoding-options
|
||||
|
||||
Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||
|
||||
# Struct Tags
|
||||
|
||||
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||
If both struct tags are specified then `cbor` is used.
|
||||
|
||||
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||
|
||||
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||
makes struct fields encode to elements of CBOR map with int keys.
|
||||
|
||||
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||
|
||||
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
|
||||
# Tests and Fuzzing
|
||||
|
||||
Over 375 tests are included in this package. Cover-guided fuzzing is handled by
|
||||
a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
|
||||
*/
|
||||
package cbor
|
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
@ -1,94 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
kpool, vpool sync.Pool
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
iterk := me.kpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterk.SetZero()
|
||||
me.kpool.Put(iterk)
|
||||
}()
|
||||
iterv := me.vpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterv.SetZero()
|
||||
me.vpool.Put(iterv)
|
||||
}()
|
||||
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{
|
||||
kf: kf,
|
||||
ef: ef,
|
||||
kpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rk := reflect.New(t.Key()).Elem()
|
||||
return &rk
|
||||
},
|
||||
},
|
||||
vpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rv := reflect.New(t.Elem()).Elem()
|
||||
return &rv
|
||||
},
|
||||
},
|
||||
}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// SimpleValue represents CBOR simple value.
|
||||
// CBOR simple value is:
|
||||
// - an extension point like CBOR tag.
|
||||
// - a subset of CBOR major type 7 that isn't floating-point.
|
||||
// - "identified by a number between 0 and 255, but distinct from that number itself".
|
||||
// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
|
||||
//
|
||||
// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
|
||||
// Other CBOR simple values are currently unassigned/reserved by IANA.
|
||||
type SimpleValue uint8
|
||||
|
||||
var (
|
||||
typeSimpleValue = reflect.TypeOf(SimpleValue(0))
|
||||
)
|
||||
|
||||
// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
|
||||
func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||
// RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
|
||||
// "An encoder MUST NOT issue two-byte sequences that start with 0xf8
|
||||
// (major type 7, additional information 24) and continue with a byte
|
||||
// less than 0x20 (32 decimal). Such sequences are not well-formed.
|
||||
// (This implies that an encoder cannot encode false, true, null, or
|
||||
// undefined in two-byte sequences and that only the one-byte variants
|
||||
// of these are well-formed; more generally speaking, each simple value
|
||||
// only has a single representation variant)."
|
||||
|
||||
switch {
|
||||
case sv <= maxSimpleValueInAdditionalInformation:
|
||||
return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
|
||||
|
||||
case sv >= minSimpleValueIn1ByteArgument:
|
||||
return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
|
||||
|
||||
default:
|
||||
return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
typ, ai, val := d.getHead()
|
||||
|
||||
if typ != cborTypePrimitives {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
|
||||
}
|
||||
if ai > additionalInformationWith1ByteArgument {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
|
||||
}
|
||||
|
||||
// It is safe to cast val to uint8 here because
|
||||
// - data is already verified to be well-formed CBOR simple value and
|
||||
// - val is <= math.MaxUint8.
|
||||
*sv = SimpleValue(val)
|
||||
return nil
|
||||
}
|
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
@ -1,277 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Decoder reads and decodes CBOR values from io.Reader.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
d decoder
|
||||
buf []byte
|
||||
off int // next read offset in buf
|
||||
bytesRead int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads and decodes from r using
|
||||
// the default decoding options.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return defaultDecMode.NewDecoder(r)
|
||||
}
|
||||
|
||||
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
_, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
err = dec.d.value(v)
|
||||
|
||||
// Increment dec.off even if decoding err is not nil because
|
||||
// dec.d.off points to the next CBOR data item if current
|
||||
// CBOR data item is valid but failed to be decoded into v.
|
||||
// This allows next CBOR data item to be decoded in next
|
||||
// call to this function.
|
||||
dec.off += dec.d.off
|
||||
dec.bytesRead += dec.d.off
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip skips to the next CBOR data item (if there is any),
|
||||
// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
|
||||
func (dec *Decoder) Skip() error {
|
||||
n, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.off += n
|
||||
dec.bytesRead += n
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumBytesRead returns the number of bytes read.
|
||||
func (dec *Decoder) NumBytesRead() int {
|
||||
return dec.bytesRead
|
||||
}
|
||||
|
||||
// Buffered returns a reader for data remaining in Decoder's buffer.
|
||||
// Returned reader is valid until the next call to Decode or Skip.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// readNext() reads next CBOR data item from Reader to buffer.
|
||||
// It returns the size of next CBOR data item.
|
||||
// It also returns validation error or read error if any.
|
||||
func (dec *Decoder) readNext() (int, error) {
|
||||
var readErr error
|
||||
var validErr error
|
||||
|
||||
for {
|
||||
// Process any unread data in dec.buf.
|
||||
if dec.off < len(dec.buf) {
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
off := dec.off // Save offset before data validation
|
||||
validErr = dec.d.wellformed(true, false)
|
||||
dec.off = off // Restore offset
|
||||
|
||||
if validErr == nil {
|
||||
return dec.d.off, nil
|
||||
}
|
||||
|
||||
if validErr != io.ErrUnexpectedEOF {
|
||||
return 0, validErr
|
||||
}
|
||||
|
||||
// Process last read error on io.ErrUnexpectedEOF.
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// More data is needed and there was no read error.
|
||||
var n int
|
||||
for n == 0 {
|
||||
n, readErr = dec.read()
|
||||
if n == 0 && readErr != nil {
|
||||
// No more data can be read and read error is encountered.
|
||||
// At this point, validErr is either nil or io.ErrUnexpectedEOF.
|
||||
if readErr == io.EOF {
|
||||
if validErr == io.ErrUnexpectedEOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, dec.buf contains new data from last read (n > 0).
|
||||
}
|
||||
}
|
||||
|
||||
// read() reads data from Reader to buffer.
|
||||
// It returns number of bytes read and any read error encountered.
|
||||
// Postconditions:
|
||||
// - dec.buf contains previously unread data and new data.
|
||||
// - dec.off is 0.
|
||||
func (dec *Decoder) read() (int, error) {
|
||||
// Grow buf if needed.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
|
||||
oldUnreadBuf := dec.buf[dec.off:]
|
||||
dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
|
||||
dec.overwriteBuf(oldUnreadBuf)
|
||||
}
|
||||
|
||||
// Copy unread data over read data and reset off to 0.
|
||||
if dec.off > 0 {
|
||||
dec.overwriteBuf(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// Read from reader and reslice buf.
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (dec *Decoder) overwriteBuf(newBuf []byte) {
|
||||
n := copy(dec.buf, newBuf)
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.off = 0
|
||||
}
|
||||
|
||||
// Encoder writes CBOR values to io.Writer.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
em *encMode
|
||||
indefTypes []cborType
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w using the default encoding options.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return defaultEncMode.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Encode writes the CBOR encoding of v.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if len(enc.indefTypes) > 0 && v != nil {
|
||||
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||
if indefType == cborTypeTextString {
|
||||
k := reflect.TypeOf(v).Kind()
|
||||
if k != reflect.String {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
|
||||
}
|
||||
} else if indefType == cborTypeByteString {
|
||||
t := reflect.TypeOf(v)
|
||||
k := t.Kind()
|
||||
if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf := getEncodeBuffer()
|
||||
|
||||
err := encode(buf, enc.em, reflect.ValueOf(v))
|
||||
if err == nil {
|
||||
_, err = enc.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
putEncodeBuffer(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// StartIndefiniteByteString starts byte string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteByteString() error {
|
||||
return enc.startIndefinite(cborTypeByteString)
|
||||
}
|
||||
|
||||
// StartIndefiniteTextString starts text string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteTextString() error {
|
||||
return enc.startIndefinite(cborTypeTextString)
|
||||
}
|
||||
|
||||
// StartIndefiniteArray starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the array
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteArray() error {
|
||||
return enc.startIndefinite(cborTypeArray)
|
||||
}
|
||||
|
||||
// StartIndefiniteMap starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the map
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteMap() error {
|
||||
return enc.startIndefinite(cborTypeMap)
|
||||
}
|
||||
|
||||
// EndIndefinite closes last opened indefinite length value.
|
||||
func (enc *Encoder) EndIndefinite() error {
|
||||
if len(enc.indefTypes) == 0 {
|
||||
return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
|
||||
}
|
||||
_, err := enc.w.Write([]byte{cborBreakFlag})
|
||||
if err == nil {
|
||||
enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var cborIndefHeader = map[cborType][]byte{
|
||||
cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
|
||||
cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
|
||||
cborTypeArray: {cborArrayWithIndefiniteLengthHead},
|
||||
cborTypeMap: {cborMapWithIndefiniteLengthHead},
|
||||
}
|
||||
|
||||
func (enc *Encoder) startIndefinite(typ cborType) error {
|
||||
if enc.em.indefLength == IndefLengthForbidden {
|
||||
return &IndefiniteLengthError{typ}
|
||||
}
|
||||
_, err := enc.w.Write(cborIndefHeader[typ])
|
||||
if err == nil {
|
||||
enc.indefTypes = append(enc.indefTypes, typ)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded CBOR value.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalCBOR returns m or CBOR nil if m is nil.
|
||||
func (m RawMessage) MarshalCBOR() ([]byte, error) {
|
||||
if len(m) == 0 {
|
||||
return cborNil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR creates a copy of data and saves to *m.
|
||||
func (m *RawMessage) UnmarshalCBOR(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
@ -1,260 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type field struct {
|
||||
name string
|
||||
nameAsInt int64 // used to decoder to match field name with CBOR int
|
||||
cborName []byte
|
||||
cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
|
||||
idx []int
|
||||
typ reflect.Type
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
typInfo *typeInfo // used to decoder to reuse type info
|
||||
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||
omitEmpty bool // used to skip empty field
|
||||
keyAsInt bool // used to encode/decode field name as int
|
||||
}
|
||||
|
||||
type fields []*field
|
||||
|
||||
// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
|
||||
type indexFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Less(i, j int) bool {
|
||||
iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
|
||||
for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
|
||||
if iIdx[k] != jIdx[k] {
|
||||
return iIdx[k] < jIdx[k]
|
||||
}
|
||||
}
|
||||
return len(iIdx) <= len(jIdx)
|
||||
}
|
||||
|
||||
// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
|
||||
type nameLevelAndTagFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
|
||||
fi, fj := x.fields[i], x.fields[j]
|
||||
if fi.name != fj.name {
|
||||
return fi.name < fj.name
|
||||
}
|
||||
if len(fi.idx) != len(fj.idx) {
|
||||
return len(fi.idx) < len(fj.idx)
|
||||
}
|
||||
if fi.tagged != fj.tagged {
|
||||
return fi.tagged
|
||||
}
|
||||
return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
|
||||
}
|
||||
|
||||
// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
|
||||
func getFields(t reflect.Type) (flds fields, structOptions string) {
|
||||
// Get special field "_" tag options
|
||||
if f, ok := t.FieldByName("_"); ok {
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag != "-" {
|
||||
structOptions = tag
|
||||
}
|
||||
}
|
||||
|
||||
// nTypes contains next level anonymous fields' types and indexes
|
||||
// (there can be multiple fields of the same type at the same level)
|
||||
flds, nTypes := appendFields(t, nil, nil, nil)
|
||||
|
||||
if len(nTypes) > 0 {
|
||||
|
||||
var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
|
||||
vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
|
||||
|
||||
for len(nTypes) > 0 {
|
||||
cTypes, nTypes = nTypes, nil
|
||||
|
||||
for t, idx := range cTypes {
|
||||
// If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
|
||||
if len(idx) > 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Anonymous field of the same type at deeper nested level is ignored.
|
||||
if vTypes[t] {
|
||||
continue
|
||||
}
|
||||
vTypes[t] = true
|
||||
|
||||
flds, nTypes = appendFields(t, idx[0], flds, nTypes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(&nameLevelAndTagFieldSorter{flds})
|
||||
|
||||
// Keep visible fields.
|
||||
j := 0 // index of next unique field
|
||||
for i := 0; i < len(flds); {
|
||||
name := flds[i].name
|
||||
if i == len(flds)-1 || // last field
|
||||
name != flds[i+1].name || // field i has unique field name
|
||||
len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
|
||||
(flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
|
||||
flds[j] = flds[i]
|
||||
j++
|
||||
}
|
||||
|
||||
// Skip fields with the same field name.
|
||||
for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
|
||||
}
|
||||
}
|
||||
if j != len(flds) {
|
||||
flds = flds[:j]
|
||||
}
|
||||
|
||||
// Sort fields by field index
|
||||
sort.Sort(&indexFieldSorter{flds})
|
||||
|
||||
return flds, structOptions
|
||||
}
|
||||
|
||||
// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
|
||||
func appendFields(
|
||||
t reflect.Type,
|
||||
idx []int,
|
||||
flds fields,
|
||||
nTypes map[reflect.Type][][]int,
|
||||
) (
|
||||
_flds fields,
|
||||
_nTypes map[reflect.Type][][]int,
|
||||
) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
if !isFieldExportable(f, ft.Kind()) {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag == "" {
|
||||
tag = f.Tag.Get("json")
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
tagged := tag != ""
|
||||
|
||||
// Parse field tag options
|
||||
var tagFieldName string
|
||||
var omitempty, keyasint bool
|
||||
for j := 0; tag != ""; j++ {
|
||||
var token string
|
||||
idx := strings.IndexByte(tag, ',')
|
||||
if idx == -1 {
|
||||
token, tag = tag, ""
|
||||
} else {
|
||||
token, tag = tag[:idx], tag[idx+1:]
|
||||
}
|
||||
if j == 0 {
|
||||
tagFieldName = token
|
||||
} else {
|
||||
switch token {
|
||||
case "omitempty":
|
||||
omitempty = true
|
||||
case "keyasint":
|
||||
keyasint = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fieldName := tagFieldName
|
||||
if tagFieldName == "" {
|
||||
fieldName = f.Name
|
||||
}
|
||||
|
||||
fIdx := make([]int, len(idx)+1)
|
||||
copy(fIdx, idx)
|
||||
fIdx[len(fIdx)-1] = i
|
||||
|
||||
if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
|
||||
flds = append(flds, &field{
|
||||
name: fieldName,
|
||||
idx: fIdx,
|
||||
typ: f.Type,
|
||||
omitEmpty: omitempty,
|
||||
keyAsInt: keyasint,
|
||||
tagged: tagged})
|
||||
} else {
|
||||
if nTypes == nil {
|
||||
nTypes = make(map[reflect.Type][][]int)
|
||||
}
|
||||
nTypes[ft] = append(nTypes[ft], fIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return flds, nTypes
|
||||
}
|
||||
|
||||
// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
|
||||
// a nonexportable anonymous field of struct type.
|
||||
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||
exportable := f.PkgPath == ""
|
||||
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||
}
|
||||
|
||||
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||
|
||||
// getFieldValue returns field value of struct v by index. When encountering null pointer
|
||||
// to anonymous (embedded) struct field, f is called with the last traversed field value.
|
||||
func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
|
||||
fv = v
|
||||
for i, n := range idx {
|
||||
fv = fv.Field(n)
|
||||
|
||||
if i < len(idx)-1 {
|
||||
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.IsNil() {
|
||||
// Null pointer to embedded struct field
|
||||
fv, err = f(fv)
|
||||
if err != nil || !fv.IsValid() {
|
||||
return fv, err
|
||||
}
|
||||
}
|
||||
fv = fv.Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
return fv, nil
|
||||
}
|
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
@ -1,299 +0,0 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||
// enclosed data item if it were to appear outside of a tag.
|
||||
type Tag struct {
|
||||
Number uint64
|
||||
Content interface{}
|
||||
}
|
||||
|
||||
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||
type RawTag struct {
|
||||
Number uint64
|
||||
Content RawMessage
|
||||
}
|
||||
|
||||
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Unmarshal tag number.
|
||||
typ, _, num := d.getHead()
|
||||
if typ != cborTypeTag {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
|
||||
}
|
||||
t.Number = num
|
||||
|
||||
// Unmarshal tag content.
|
||||
c := d.data[d.off:]
|
||||
t.Content = make([]byte, len(c))
|
||||
copy(t.Content, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalCBOR returns CBOR encoding of t.
|
||||
func (t RawTag) MarshalCBOR() ([]byte, error) {
|
||||
if t.Number == 0 && len(t.Content) == 0 {
|
||||
// Marshal uninitialized cbor.RawTag
|
||||
b := make([]byte, len(cborNil))
|
||||
copy(b, cborNil)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
e := getEncodeBuffer()
|
||||
|
||||
encodeHead(e, byte(cborTypeTag), t.Number)
|
||||
|
||||
content := t.Content
|
||||
if len(content) == 0 {
|
||||
content = cborNil
|
||||
}
|
||||
|
||||
buf := make([]byte, len(e.Bytes())+len(content))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], content)
|
||||
|
||||
putEncodeBuffer(e)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// DecTagMode specifies how decoder handles tag number.
|
||||
type DecTagMode int
|
||||
|
||||
const (
|
||||
// DecTagIgnored makes decoder ignore tag number (skips if present).
|
||||
DecTagIgnored DecTagMode = iota
|
||||
|
||||
// DecTagOptional makes decoder verify tag number if it's present.
|
||||
DecTagOptional
|
||||
|
||||
// DecTagRequired makes decoder verify tag number and tag number must be present.
|
||||
DecTagRequired
|
||||
|
||||
maxDecTagMode
|
||||
)
|
||||
|
||||
func (dtm DecTagMode) valid() bool {
|
||||
return dtm >= 0 && dtm < maxDecTagMode
|
||||
}
|
||||
|
||||
// EncTagMode specifies how encoder handles tag number.
|
||||
type EncTagMode int
|
||||
|
||||
const (
|
||||
// EncTagNone makes encoder not encode tag number.
|
||||
EncTagNone EncTagMode = iota
|
||||
|
||||
// EncTagRequired makes encoder encode tag number.
|
||||
EncTagRequired
|
||||
|
||||
maxEncTagMode
|
||||
)
|
||||
|
||||
func (etm EncTagMode) valid() bool {
|
||||
return etm >= 0 && etm < maxEncTagMode
|
||||
}
|
||||
|
||||
// TagOptions specifies how encoder and decoder handle tag number.
|
||||
type TagOptions struct {
|
||||
DecTag DecTagMode
|
||||
EncTag EncTagMode
|
||||
}
|
||||
|
||||
// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
|
||||
// to provide CBOR tag support.
|
||||
type TagSet interface {
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
Remove(contentType reflect.Type)
|
||||
|
||||
tagProvider
|
||||
}
|
||||
|
||||
type tagProvider interface {
|
||||
getTagItemFromType(t reflect.Type) *tagItem
|
||||
getTypeFromTagNum(num []uint64) reflect.Type
|
||||
}
|
||||
|
||||
type tagItem struct {
|
||||
num []uint64
|
||||
cborTagNum []byte
|
||||
contentType reflect.Type
|
||||
opts TagOptions
|
||||
}
|
||||
|
||||
func (t *tagItem) equalTagNum(num []uint64) bool {
|
||||
// Fast path to compare 1 tag number
|
||||
if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(t.num) != len(num) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < len(t.num); i++ {
|
||||
if t.num[i] != num[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type (
|
||||
tagSet map[reflect.Type]*tagItem
|
||||
|
||||
syncTagSet struct {
|
||||
sync.RWMutex
|
||||
t tagSet
|
||||
}
|
||||
)
|
||||
|
||||
func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
return t[typ]
|
||||
}
|
||||
|
||||
func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
for typ, tag := range t {
|
||||
if tag.equalTagNum(num) {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTagSet returns TagSet (safe for concurrency).
|
||||
func NewTagSet() TagSet {
|
||||
return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
|
||||
}
|
||||
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
|
||||
if contentType == nil {
|
||||
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||
}
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for typ, ti := range t.t {
|
||||
if typ == contentType {
|
||||
return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
|
||||
}
|
||||
if ti.equalTagNum(tag.num) {
|
||||
return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
|
||||
}
|
||||
}
|
||||
t.t[contentType] = tag
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
t.Lock()
|
||||
delete(t.t, contentType)
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
t.RLock()
|
||||
ti := t.t[typ]
|
||||
t.RUnlock()
|
||||
return ti
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
t.RLock()
|
||||
rt := t.t.getTypeFromTagNum(num)
|
||||
t.RUnlock()
|
||||
return rt
|
||||
}
|
||||
|
||||
func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
|
||||
if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
|
||||
return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
|
||||
}
|
||||
if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
|
||||
return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
|
||||
}
|
||||
if contentType == typeTime {
|
||||
return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if contentType == typeBigInt {
|
||||
return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if contentType == typeTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
|
||||
}
|
||||
if contentType == typeRawTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
|
||||
}
|
||||
if num == 0 || num == 1 {
|
||||
return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if num == 2 || num == 3 {
|
||||
return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if num == tagNumSelfDescribedCBOR {
|
||||
return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
|
||||
}
|
||||
|
||||
te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
|
||||
te.num = append(te.num, nestedNum...)
|
||||
|
||||
// Cache encoded tag numbers
|
||||
e := getEncodeBuffer()
|
||||
for _, n := range te.num {
|
||||
encodeHead(e, byte(cborTypeTag), n)
|
||||
}
|
||||
te.cborTagNum = make([]byte, e.Len())
|
||||
copy(te.cborTagNum, e.Bytes())
|
||||
putEncodeBuffer(e)
|
||||
|
||||
return &te, nil
|
||||
}
|
||||
|
||||
var (
|
||||
typeTag = reflect.TypeOf(Tag{})
|
||||
typeRawTag = reflect.TypeOf(RawTag{})
|
||||
)
|
||||
|
||||
// WrongTagError describes mismatch between CBOR tag and registered tag.
|
||||
type WrongTagError struct {
|
||||
RegisteredType reflect.Type
|
||||
RegisteredTagNum []uint64
|
||||
TagNum []uint64
|
||||
}
|
||||
|
||||
func (e *WrongTagError) Error() string {
|
||||
return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
|
||||
}
|
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
@ -1,394 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// SyntaxError is a description of a CBOR syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// SemanticError is a description of a CBOR semantic error.
|
||||
type SemanticError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SemanticError) Error() string { return e.msg }
|
||||
|
||||
// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
|
||||
type MaxNestedLevelError struct {
|
||||
maxNestedLevels int
|
||||
}
|
||||
|
||||
func (e *MaxNestedLevelError) Error() string {
|
||||
return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
|
||||
}
|
||||
|
||||
// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
|
||||
type MaxArrayElementsError struct {
|
||||
maxArrayElements int
|
||||
}
|
||||
|
||||
func (e *MaxArrayElementsError) Error() string {
|
||||
return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
|
||||
}
|
||||
|
||||
// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
|
||||
type MaxMapPairsError struct {
|
||||
maxMapPairs int
|
||||
}
|
||||
|
||||
func (e *MaxMapPairsError) Error() string {
|
||||
return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
|
||||
}
|
||||
|
||||
// IndefiniteLengthError indicates found disallowed indefinite length items.
|
||||
type IndefiniteLengthError struct {
|
||||
t cborType
|
||||
}
|
||||
|
||||
func (e *IndefiniteLengthError) Error() string {
|
||||
return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
|
||||
}
|
||||
|
||||
// TagsMdError indicates found disallowed CBOR tags.
|
||||
type TagsMdError struct {
|
||||
}
|
||||
|
||||
func (e *TagsMdError) Error() string {
|
||||
return "cbor: CBOR tag isn't allowed"
|
||||
}
|
||||
|
||||
// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
|
||||
type ExtraneousDataError struct {
|
||||
numOfBytes int // number of bytes of extraneous data
|
||||
index int // location of extraneous data
|
||||
}
|
||||
|
||||
func (e *ExtraneousDataError) Error() string {
|
||||
return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
|
||||
}
|
||||
|
||||
// wellformed checks whether the CBOR data item is well-formed.
|
||||
// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
|
||||
// - use allowExtraData = true when using Decoder.Decode()
|
||||
// - use allowExtraData = false when using Unmarshal()
|
||||
func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
|
||||
if len(d.data) == d.off {
|
||||
return io.EOF
|
||||
}
|
||||
_, err := d.wellformedInternal(0, checkBuiltinTags)
|
||||
if err == nil {
|
||||
if !allowExtraData && d.off != len(d.data) {
|
||||
err = &ExtraneousDataError{len(d.data) - d.off, d.off}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wellformedInternal checks data's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
|
||||
t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case cborTypeByteString, cborTypeTextString:
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
|
||||
}
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
|
||||
}
|
||||
if len(d.data)-d.off < valInt { // valInt+off may overflow integer
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
d.off += valInt
|
||||
|
||||
case cborTypeArray, cborTypeMap:
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
|
||||
}
|
||||
|
||||
if t == cborTypeArray {
|
||||
if valInt > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if valInt > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
|
||||
count := 1
|
||||
if t == cborTypeMap {
|
||||
count = 2
|
||||
}
|
||||
maxDepth := depth
|
||||
for j := 0; j < count; j++ {
|
||||
for i := 0; i < valInt; i++ {
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt // Save max depth
|
||||
}
|
||||
}
|
||||
}
|
||||
depth = maxDepth
|
||||
|
||||
case cborTypeTag:
|
||||
if d.dm.tagsMd == TagsForbidden {
|
||||
return 0, &TagsMdError{}
|
||||
}
|
||||
|
||||
tagNum := val
|
||||
|
||||
// Scan nested tag numbers to avoid recursion.
|
||||
for {
|
||||
if len(d.data) == d.off { // Tag number must be followed by tag content.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if checkBuiltinTags {
|
||||
err = validBuiltinTag(tagNum, d.data[d.off])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
|
||||
return 0, &UnacceptableDataItemError{
|
||||
CBORType: cborTypeTag.String(),
|
||||
Message: "bignum",
|
||||
}
|
||||
}
|
||||
if getType(d.data[d.off]) != cborTypeTag {
|
||||
break
|
||||
}
|
||||
if _, _, tagNum, err = d.wellformedHead(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
}
|
||||
// Check tag content.
|
||||
return d.wellformedInternal(depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
// Peek ahead to get next type and indefinite length status.
|
||||
nt, ai := parseInitialByte(d.data[d.off])
|
||||
if t != nt {
|
||||
return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
|
||||
}
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
|
||||
}
|
||||
if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
maxDepth := depth
|
||||
i := 0
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt
|
||||
}
|
||||
i++
|
||||
if t == cborTypeArray {
|
||||
if i > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if i%2 == 0 && i/2 > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
}
|
||||
if t == cborTypeMap && i%2 == 1 {
|
||||
return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return maxDepth, nil
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
|
||||
t cborType,
|
||||
ai byte,
|
||||
val uint64,
|
||||
indefiniteLength bool,
|
||||
err error,
|
||||
) {
|
||||
t, ai, val, err = d.wellformedHead()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
indefiniteLength = additionalInformation(ai).isIndefiniteLength()
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
|
||||
dataLen := len(d.data) - d.off
|
||||
if dataLen == 0 {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
t, ai = parseInitialByte(d.data[d.off])
|
||||
val = uint64(ai)
|
||||
d.off++
|
||||
dataLen--
|
||||
|
||||
if ai <= maxAdditionalInformationWithoutArgument {
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith1ByteArgument {
|
||||
const argumentSize = 1
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(d.data[d.off])
|
||||
d.off++
|
||||
if t == cborTypePrimitives && val < 32 {
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith2ByteArgument {
|
||||
const argumentSize = 2
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith4ByteArgument {
|
||||
const argumentSize = 4
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith8ByteArgument {
|
||||
const argumentSize = 8
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
switch t {
|
||||
case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
|
||||
return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
// ai == 28, 29, 30
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
}
|
||||
|
||||
func (d *decoder) acceptableFloat(f float64) error {
|
||||
switch {
|
||||
case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point NaN",
|
||||
}
|
||||
case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point infinity",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
1
vendor/github.com/go-logr/logr/README.md
generated
vendored
1
vendor/github.com/go-logr/logr/README.md
generated
vendored
@ -1,7 +1,6 @@
|
||||
# A minimal logging API for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
[](https://goreportcard.com/report/github.com/go-logr/logr)
|
||||
[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
|
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
Normal file
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
|
||||
// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
|
||||
// APIs.
|
||||
//
|
||||
// See the README in the top-level [./logr] package for a discussion of
|
||||
// interoperability.
|
||||
//
|
||||
// Deprecated: use the main logr package instead.
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// NewLogr returns a logr.Logger which writes to the slog.Handler.
|
||||
//
|
||||
// Deprecated: use [logr.FromSlogHandler] instead.
|
||||
func NewLogr(handler slog.Handler) logr.Logger {
|
||||
return logr.FromSlogHandler(handler)
|
||||
}
|
||||
|
||||
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func NewSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func ToSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better.
|
||||
//
|
||||
// Deprecated: use [logr.SlogSink] instead.
|
||||
type SlogSink = logr.SlogSink
|
61
vendor/github.com/go-openapi/jsonpointer/.golangci.yml
generated
vendored
61
vendor/github.com/go-openapi/jsonpointer/.golangci.yml
generated
vendored
@ -1,61 +0,0 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- gofumpt
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
8
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
8
vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
@ -1,10 +1,6 @@
|
||||
# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
|
||||
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
|
||||
# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
|
||||
An implementation of JSON Pointer - Go language
|
||||
|
||||
## Status
|
||||
|
191
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
191
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
@ -26,7 +26,6 @@
|
||||
package jsonpointer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@ -41,7 +40,6 @@ const (
|
||||
pointerSeparator = `/`
|
||||
|
||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
notFound = `Can't find the pointer in the document`
|
||||
)
|
||||
|
||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
||||
@ -50,13 +48,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
|
||||
// JSONPointable is an interface for structs to implement when they need to customize the
|
||||
// json pointer process
|
||||
type JSONPointable interface {
|
||||
JSONLookup(string) (any, error)
|
||||
JSONLookup(string) (interface{}, error)
|
||||
}
|
||||
|
||||
// JSONSetable is an interface for structs to implement when they need to customize the
|
||||
// json pointer process
|
||||
type JSONSetable interface {
|
||||
JSONSet(string, any) error
|
||||
JSONSet(string, interface{}) error
|
||||
}
|
||||
|
||||
// New creates a new json pointer for the given string
|
||||
@ -83,7 +81,9 @@ func (p *Pointer) parse(jsonPointerString string) error {
|
||||
err = errors.New(invalidStart)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
||||
p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
|
||||
for _, referenceToken := range referenceTokens[1:] {
|
||||
p.referenceTokens = append(p.referenceTokens, referenceToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,58 +91,38 @@ func (p *Pointer) parse(jsonPointerString string) error {
|
||||
}
|
||||
|
||||
// Get uses the pointer to retrieve a value from a JSON document
|
||||
func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
|
||||
func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
||||
return p.get(document, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// Set uses the pointer to set a value from a JSON document
|
||||
func (p *Pointer) Set(document any, value any) (any, error) {
|
||||
func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
|
||||
return document, p.set(document, value, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// GetForToken gets a value for a json pointer token 1 level deep
|
||||
func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
|
||||
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
|
||||
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// SetForToken gets a value for a json pointer token 1 level deep
|
||||
func SetForToken(document any, decodedToken string, value any) (any, error) {
|
||||
func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
|
||||
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
func isNil(input any) bool {
|
||||
if input == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
kind := reflect.TypeOf(input).Kind()
|
||||
switch kind { //nolint:exhaustive
|
||||
case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
|
||||
return reflect.ValueOf(input).IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
|
||||
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind := rValue.Kind()
|
||||
if isNil(node) {
|
||||
return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
|
||||
}
|
||||
|
||||
switch typed := node.(type) {
|
||||
case JSONPointable:
|
||||
r, err := typed.JSONLookup(decodedToken)
|
||||
if rValue.Type().Implements(jsonPointableType) {
|
||||
r, err := node.(JSONPointable).JSONLookup(decodedToken)
|
||||
if err != nil {
|
||||
return nil, kind, err
|
||||
}
|
||||
return r, kind, nil
|
||||
case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
|
||||
return getSingleImpl(*typed, decodedToken, nameProvider)
|
||||
}
|
||||
|
||||
switch kind { //nolint:exhaustive
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
@ -179,7 +159,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
||||
|
||||
}
|
||||
|
||||
func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
|
||||
func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
|
||||
if ns, ok := node.(JSONSetable); ok { // pointer impl
|
||||
@ -190,7 +170,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
||||
return node.(JSONSetable).JSONSet(decodedToken, data)
|
||||
}
|
||||
|
||||
switch rValue.Kind() { //nolint:exhaustive
|
||||
switch rValue.Kind() {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
@ -230,7 +210,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
||||
|
||||
}
|
||||
|
||||
func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
|
||||
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
|
||||
if nameProvider == nil {
|
||||
nameProvider = swag.DefaultJSONNameProvider
|
||||
@ -251,7 +231,8 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
|
||||
if err != nil {
|
||||
return nil, knd, err
|
||||
}
|
||||
node = r
|
||||
node, kind = r, knd
|
||||
|
||||
}
|
||||
|
||||
rValue := reflect.ValueOf(node)
|
||||
@ -260,11 +241,11 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
|
||||
return node, kind, nil
|
||||
}
|
||||
|
||||
func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
||||
func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
|
||||
knd := reflect.ValueOf(node).Kind()
|
||||
|
||||
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
|
||||
return errors.New("only structs, pointers, maps and slices are supported for setting values")
|
||||
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
|
||||
}
|
||||
|
||||
if nameProvider == nil {
|
||||
@ -303,7 +284,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
||||
continue
|
||||
}
|
||||
|
||||
switch kind { //nolint:exhaustive
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
@ -382,128 +363,6 @@ func (p *Pointer) String() string {
|
||||
return pointerString
|
||||
}
|
||||
|
||||
func (p *Pointer) Offset(document string) (int64, error) {
|
||||
dec := json.NewDecoder(strings.NewReader(document))
|
||||
var offset int64
|
||||
for _, ttk := range p.DecodedTokens() {
|
||||
tk, err := dec.Token()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch tk := tk.(type) {
|
||||
case json.Delim:
|
||||
switch tk {
|
||||
case '{':
|
||||
offset, err = offsetSingleObject(dec, ttk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case '[':
|
||||
offset, err = offsetSingleArray(dec, ttk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
|
||||
for dec.More() {
|
||||
offset := dec.InputOffset()
|
||||
tk, err := dec.Token()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch tk := tk.(type) {
|
||||
case json.Delim:
|
||||
switch tk {
|
||||
case '{':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case '[':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
case string:
|
||||
if tk == decodedToken {
|
||||
return offset, nil
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
}
|
||||
|
||||
func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
|
||||
idx, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < idx && dec.More(); i++ {
|
||||
tk, err := dec.Token()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if delim, isDelim := tk.(json.Delim); isDelim {
|
||||
switch delim {
|
||||
case '{':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case '[':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !dec.More() {
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
}
|
||||
return dec.InputOffset(), nil
|
||||
}
|
||||
|
||||
// drainSingle drains a single level of object or array.
|
||||
// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
|
||||
func drainSingle(dec *json.Decoder) error {
|
||||
for dec.More() {
|
||||
tk, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if delim, isDelim := tk.(json.Delim); isDelim {
|
||||
switch delim {
|
||||
case '{':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return err
|
||||
}
|
||||
case '[':
|
||||
if err = drainSingle(dec); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Consumes the ending delim
|
||||
if _, err := dec.Token(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Specific JSON pointer encoding here
|
||||
// ~0 => ~
|
||||
// ~1 => /
|
||||
@ -518,14 +377,14 @@ const (
|
||||
|
||||
// Unescape unescapes a json pointer reference token string to the original representation
|
||||
func Unescape(token string) string {
|
||||
step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
|
||||
step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
|
||||
step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
|
||||
step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
|
||||
return step2
|
||||
}
|
||||
|
||||
// Escape escapes a pointer reference token string
|
||||
func Escape(token string) string {
|
||||
step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
|
||||
step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
|
||||
step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
|
||||
step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
|
||||
return step2
|
||||
}
|
||||
|
1
vendor/github.com/go-openapi/swag/.gitignore
generated
vendored
1
vendor/github.com/go-openapi/swag/.gitignore
generated
vendored
@ -2,4 +2,3 @@ secrets.yml
|
||||
vendor
|
||||
Godeps
|
||||
.idea
|
||||
*.out
|
||||
|
54
vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
54
vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
@ -4,14 +4,14 @@ linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
min-complexity: 25
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 3
|
||||
min-occurrences: 2
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
@ -20,41 +20,35 @@ linters:
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- testpackage
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- exhaustive
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- wsl
|
||||
- whitespace
|
||||
- gofumpt
|
||||
- godot
|
||||
- nestif
|
||||
- godox
|
||||
- funlen
|
||||
- gci
|
||||
- gocognit
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- gomoddirectives
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- forcetypeassert
|
||||
- ireturn
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- goimports
|
||||
- tenv
|
||||
- golint
|
||||
- exhaustruct
|
||||
- nilnil
|
||||
- nonamedreturns
|
||||
- nosnakecase
|
||||
|
52
vendor/github.com/go-openapi/swag/BENCHMARK.md
generated
vendored
52
vendor/github.com/go-openapi/swag/BENCHMARK.md
generated
vendored
@ -1,52 +0,0 @@
|
||||
# Benchmarks
|
||||
|
||||
## Name mangling utilities
|
||||
|
||||
```bash
|
||||
go test -bench XXX -run XXX -benchtime 30s
|
||||
```
|
||||
|
||||
### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
|
||||
|
||||
```
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/go-openapi/swag
|
||||
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
|
||||
BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
|
||||
BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
|
||||
BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
|
||||
BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
|
||||
```
|
||||
|
||||
### Benchmarks after PR #79
|
||||
|
||||
~ x10 performance improvement and ~ /100 memory allocations.
|
||||
|
||||
```
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/go-openapi/swag
|
||||
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
|
||||
BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
|
||||
BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
|
||||
```
|
||||
|
||||
```
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/go-openapi/swag
|
||||
cpu: AMD Ryzen 7 5800X 8-Core Processor
|
||||
BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
|
||||
BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
|
||||
BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
|
||||
```
|
8
vendor/github.com/go-openapi/swag/README.md
generated
vendored
8
vendor/github.com/go-openapi/swag/README.md
generated
vendored
@ -1,8 +1,7 @@
|
||||
# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
|
||||
# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/swag)
|
||||
[](http://godoc.org/github.com/go-openapi/swag)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/swag)
|
||||
|
||||
Contains a bunch of helper functions for go-openapi and go-swagger projects.
|
||||
@ -19,5 +18,4 @@ You may also use it standalone for your projects.
|
||||
|
||||
This repo has only few dependencies outside of the standard library:
|
||||
|
||||
* YAML utilities depend on `gopkg.in/yaml.v3`
|
||||
* `github.com/mailru/easyjson v0.7.7`
|
||||
* YAML utilities depend on gopkg.in/yaml.v2
|
||||
|
202
vendor/github.com/go-openapi/swag/initialism_index.go
generated
vendored
202
vendor/github.com/go-openapi/swag/initialism_index.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// commonInitialisms are common acronyms that are kept as whole uppercased words.
|
||||
commonInitialisms *indexOfInitialisms
|
||||
|
||||
// initialisms is a slice of sorted initialisms
|
||||
initialisms []string
|
||||
|
||||
// a copy of initialisms pre-baked as []rune
|
||||
initialismsRunes [][]rune
|
||||
initialismsUpperCased [][]rune
|
||||
|
||||
isInitialism func(string) bool
|
||||
|
||||
maxAllocMatches int
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
|
||||
configuredInitialisms := map[string]bool{
|
||||
"ACL": true,
|
||||
"API": true,
|
||||
"ASCII": true,
|
||||
"CPU": true,
|
||||
"CSS": true,
|
||||
"DNS": true,
|
||||
"EOF": true,
|
||||
"GUID": true,
|
||||
"HTML": true,
|
||||
"HTTPS": true,
|
||||
"HTTP": true,
|
||||
"ID": true,
|
||||
"IP": true,
|
||||
"IPv4": true,
|
||||
"IPv6": true,
|
||||
"JSON": true,
|
||||
"LHS": true,
|
||||
"OAI": true,
|
||||
"QPS": true,
|
||||
"RAM": true,
|
||||
"RHS": true,
|
||||
"RPC": true,
|
||||
"SLA": true,
|
||||
"SMTP": true,
|
||||
"SQL": true,
|
||||
"SSH": true,
|
||||
"TCP": true,
|
||||
"TLS": true,
|
||||
"TTL": true,
|
||||
"UDP": true,
|
||||
"UI": true,
|
||||
"UID": true,
|
||||
"UUID": true,
|
||||
"URI": true,
|
||||
"URL": true,
|
||||
"UTF8": true,
|
||||
"VM": true,
|
||||
"XML": true,
|
||||
"XMPP": true,
|
||||
"XSRF": true,
|
||||
"XSS": true,
|
||||
}
|
||||
|
||||
// a thread-safe index of initialisms
|
||||
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
|
||||
initialisms = commonInitialisms.sorted()
|
||||
initialismsRunes = asRunes(initialisms)
|
||||
initialismsUpperCased = asUpperCased(initialisms)
|
||||
maxAllocMatches = maxAllocHeuristic(initialismsRunes)
|
||||
|
||||
// a test function
|
||||
isInitialism = commonInitialisms.isInitialism
|
||||
}
|
||||
|
||||
func asRunes(in []string) [][]rune {
|
||||
out := make([][]rune, len(in))
|
||||
for i, initialism := range in {
|
||||
out[i] = []rune(initialism)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func asUpperCased(in []string) [][]rune {
|
||||
out := make([][]rune, len(in))
|
||||
|
||||
for i, initialism := range in {
|
||||
out[i] = []rune(upper(trim(initialism)))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func maxAllocHeuristic(in [][]rune) int {
|
||||
heuristic := make(map[rune]int)
|
||||
for _, initialism := range in {
|
||||
heuristic[initialism[0]]++
|
||||
}
|
||||
|
||||
var maxAlloc int
|
||||
for _, val := range heuristic {
|
||||
if val > maxAlloc {
|
||||
maxAlloc = val
|
||||
}
|
||||
}
|
||||
|
||||
return maxAlloc
|
||||
}
|
||||
|
||||
// AddInitialisms add additional initialisms
|
||||
func AddInitialisms(words ...string) {
|
||||
for _, word := range words {
|
||||
// commonInitialisms[upper(word)] = true
|
||||
commonInitialisms.add(upper(word))
|
||||
}
|
||||
// sort again
|
||||
initialisms = commonInitialisms.sorted()
|
||||
initialismsRunes = asRunes(initialisms)
|
||||
initialismsUpperCased = asUpperCased(initialisms)
|
||||
}
|
||||
|
||||
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
|
||||
// Since go1.9, this may be implemented with sync.Map.
|
||||
type indexOfInitialisms struct {
|
||||
sortMutex *sync.Mutex
|
||||
index *sync.Map
|
||||
}
|
||||
|
||||
func newIndexOfInitialisms() *indexOfInitialisms {
|
||||
return &indexOfInitialisms{
|
||||
sortMutex: new(sync.Mutex),
|
||||
index: new(sync.Map),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
for k, v := range initial {
|
||||
m.index.Store(k, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) isInitialism(key string) bool {
|
||||
_, ok := m.index.Load(key)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
||||
m.index.Store(key, true)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
m.index.Range(func(key, _ interface{}) bool {
|
||||
k := key.(string)
|
||||
result = append(result, k)
|
||||
return true
|
||||
})
|
||||
sort.Sort(sort.Reverse(byInitialism(result)))
|
||||
return
|
||||
}
|
||||
|
||||
type byInitialism []string
|
||||
|
||||
func (s byInitialism) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s byInitialism) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s byInitialism) Less(i, j int) bool {
|
||||
if len(s[i]) != len(s[j]) {
|
||||
return len(s[i]) < len(s[j])
|
||||
}
|
||||
|
||||
return strings.Compare(s[i], s[j]) > 0
|
||||
}
|
105
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
105
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -41,97 +40,43 @@ var LoadHTTPBasicAuthPassword = ""
|
||||
var LoadHTTPCustomHeaders = map[string]string{}
|
||||
|
||||
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
||||
func LoadFromFileOrHTTP(pth string) ([]byte, error) {
|
||||
return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
|
||||
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
|
||||
}
|
||||
|
||||
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
|
||||
// timeout arg allows for per request overriding of the request timeout
|
||||
func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
|
||||
return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
|
||||
func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
|
||||
}
|
||||
|
||||
// LoadStrategy returns a loader function for a given path or URI.
|
||||
//
|
||||
// The load strategy returns the remote load for any path starting with `http`.
|
||||
// So this works for any URI with a scheme `http` or `https`.
|
||||
//
|
||||
// The fallback strategy is to call the local loader.
|
||||
//
|
||||
// The local loader takes a local file system path (absolute or relative) as argument,
|
||||
// or alternatively a `file://...` URI, **without host** (see also below for windows).
|
||||
//
|
||||
// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
|
||||
// especially on windows.
|
||||
//
|
||||
// Before the local loader is called, the given path is transformed:
|
||||
// - percent-encoded characters are unescaped
|
||||
// - simple paths (e.g. `./folder/file`) are passed as-is
|
||||
// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
|
||||
//
|
||||
// For paths provided as URIs with the "file" scheme, please note that:
|
||||
// - `file://` is simply stripped.
|
||||
// This means that the host part of the URI is not parsed at all.
|
||||
// For example, `file:///folder/file" becomes "/folder/file`,
|
||||
// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
|
||||
// Similarly, `file://./folder/file` yields `./folder/file`.
|
||||
// - on windows, `file://...` can take a host so as to specify an UNC share location.
|
||||
//
|
||||
// Reminder about windows-specifics:
|
||||
// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
|
||||
// - `file:///c:/folder/file` becomes `C:\folder\file`
|
||||
// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
|
||||
func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
||||
if strings.HasPrefix(pth, "http") {
|
||||
// LoadStrategy returns a loader function for a given path or uri
|
||||
func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
||||
if strings.HasPrefix(path, "http") {
|
||||
return remote
|
||||
}
|
||||
|
||||
return func(p string) ([]byte, error) {
|
||||
upth, err := url.PathUnescape(p)
|
||||
return func(pth string) ([]byte, error) {
|
||||
upth, err := pathUnescape(pth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(p, `file://`) {
|
||||
// regular file path provided: just normalize slashes
|
||||
return local(filepath.FromSlash(upth))
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// crude processing: this leaves full URIs with a host with a (mostly) unexpected result
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
|
||||
return local(filepath.FromSlash(upth))
|
||||
}
|
||||
|
||||
// windows-only pre-processing of file://... URIs
|
||||
|
||||
// support for canonical file URIs on windows.
|
||||
u, err := url.Parse(filepath.ToSlash(upth))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if u.Host != "" {
|
||||
// assume UNC name (volume share)
|
||||
// NOTE: UNC port not yet supported
|
||||
|
||||
// when the "host" segment is a drive letter:
|
||||
// file://C:/folder/... => C:\folder
|
||||
upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
|
||||
if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
|
||||
// tolerance: if we have a leading dot, this can't be a host
|
||||
// file://host/share/folder\... ==> \\host\share\path\folder
|
||||
upth = "//" + upth
|
||||
}
|
||||
} else {
|
||||
// no host, let's figure out if this is a drive letter
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
|
||||
if strings.HasSuffix(first, ":") {
|
||||
// drive letter in the first segment:
|
||||
// file:///c:/folder/... ==> strip the leading slash
|
||||
upth = strings.TrimPrefix(upth, `/`)
|
||||
if strings.HasPrefix(pth, `file://`) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// support for canonical file URIs on windows.
|
||||
// Zero tolerance here for dodgy URIs.
|
||||
u, _ := url.Parse(upth)
|
||||
if u.Host != "" {
|
||||
// assume UNC name (volume share)
|
||||
// file://host/share/folder\... ==> \\host\share\path\folder
|
||||
// NOTE: UNC port not yet supported
|
||||
upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
|
||||
} else {
|
||||
// file:///c:/folder/... ==> just remove the leading slash
|
||||
upth = strings.TrimPrefix(upth, `file:///`)
|
||||
}
|
||||
} else {
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
}
|
||||
}
|
||||
|
||||
|
72
vendor/github.com/go-openapi/swag/name_lexem.go
generated
vendored
72
vendor/github.com/go-openapi/swag/name_lexem.go
generated
vendored
@ -14,80 +14,74 @@
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
import "unicode"
|
||||
|
||||
type (
|
||||
lexemKind uint8
|
||||
nameLexem interface {
|
||||
GetUnsafeGoName() string
|
||||
GetOriginal() string
|
||||
IsInitialism() bool
|
||||
}
|
||||
|
||||
nameLexem struct {
|
||||
initialismNameLexem struct {
|
||||
original string
|
||||
matchedInitialism string
|
||||
kind lexemKind
|
||||
}
|
||||
|
||||
casualNameLexem struct {
|
||||
original string
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
lexemKindCasualName lexemKind = iota
|
||||
lexemKindInitialismName
|
||||
)
|
||||
|
||||
func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
|
||||
return nameLexem{
|
||||
kind: lexemKindInitialismName,
|
||||
func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
|
||||
return &initialismNameLexem{
|
||||
original: original,
|
||||
matchedInitialism: matchedInitialism,
|
||||
}
|
||||
}
|
||||
|
||||
func newCasualNameLexem(original string) nameLexem {
|
||||
return nameLexem{
|
||||
kind: lexemKindCasualName,
|
||||
func newCasualNameLexem(original string) *casualNameLexem {
|
||||
return &casualNameLexem{
|
||||
original: original,
|
||||
}
|
||||
}
|
||||
|
||||
func (l nameLexem) GetUnsafeGoName() string {
|
||||
if l.kind == lexemKindInitialismName {
|
||||
return l.matchedInitialism
|
||||
}
|
||||
|
||||
var (
|
||||
first rune
|
||||
rest string
|
||||
)
|
||||
func (l *initialismNameLexem) GetUnsafeGoName() string {
|
||||
return l.matchedInitialism
|
||||
}
|
||||
|
||||
func (l *casualNameLexem) GetUnsafeGoName() string {
|
||||
var first rune
|
||||
var rest string
|
||||
for i, orig := range l.original {
|
||||
if i == 0 {
|
||||
first = orig
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
rest = l.original[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.original) > 1 {
|
||||
b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
|
||||
defer func() {
|
||||
poolOfBuffers.RedeemBuffer(b)
|
||||
}()
|
||||
b.WriteRune(unicode.ToUpper(first))
|
||||
b.WriteString(lower(rest))
|
||||
return b.String()
|
||||
return string(unicode.ToUpper(first)) + lower(rest)
|
||||
}
|
||||
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l nameLexem) GetOriginal() string {
|
||||
func (l *initialismNameLexem) GetOriginal() string {
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l nameLexem) IsInitialism() bool {
|
||||
return l.kind == lexemKindInitialismName
|
||||
func (l *casualNameLexem) GetOriginal() string {
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l *initialismNameLexem) IsInitialism() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *casualNameLexem) IsInitialism() bool {
|
||||
return false
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Copyright 2022 CNI authors
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -12,10 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ns
|
||||
//go:build go1.8
|
||||
// +build go1.8
|
||||
|
||||
import "github.com/containernetworking/cni/pkg/types"
|
||||
package swag
|
||||
|
||||
func CheckNetNS(nsPath string) (bool, *types.Error) {
|
||||
return false, nil
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.PathUnescape(path)
|
||||
}
|
68
vendor/github.com/go-openapi/swag/post_go19.go
generated
vendored
Normal file
68
vendor/github.com/go-openapi/swag/post_go19.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
|
||||
// Since go1.9, this may be implemented with sync.Map.
|
||||
type indexOfInitialisms struct {
|
||||
sortMutex *sync.Mutex
|
||||
index *sync.Map
|
||||
}
|
||||
|
||||
func newIndexOfInitialisms() *indexOfInitialisms {
|
||||
return &indexOfInitialisms{
|
||||
sortMutex: new(sync.Mutex),
|
||||
index: new(sync.Map),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
for k, v := range initial {
|
||||
m.index.Store(k, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) isInitialism(key string) bool {
|
||||
_, ok := m.index.Load(key)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
||||
m.index.Store(key, true)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
m.index.Range(func(key, value interface{}) bool {
|
||||
k := key.(string)
|
||||
result = append(result, k)
|
||||
return true
|
||||
})
|
||||
sort.Sort(sort.Reverse(byInitialism(result)))
|
||||
return
|
||||
}
|
24
vendor/github.com/go-openapi/swag/pre_go18.go
generated
vendored
Normal file
24
vendor/github.com/go-openapi/swag/pre_go18.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.8
|
||||
// +build !go1.8
|
||||
|
||||
package swag
|
||||
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.QueryUnescape(path)
|
||||
}
|
70
vendor/github.com/go-openapi/swag/pre_go19.go
generated
vendored
Normal file
70
vendor/github.com/go-openapi/swag/pre_go19.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
|
||||
// Before go1.9, this may be implemented with a mutex on the map.
|
||||
type indexOfInitialisms struct {
|
||||
getMutex *sync.Mutex
|
||||
index map[string]bool
|
||||
}
|
||||
|
||||
func newIndexOfInitialisms() *indexOfInitialisms {
|
||||
return &indexOfInitialisms{
|
||||
getMutex: new(sync.Mutex),
|
||||
index: make(map[string]bool, 50),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
for k, v := range initial {
|
||||
m.index[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) isInitialism(key string) bool {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
_, ok := m.index[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
m.index[key] = true
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
for k := range m.index {
|
||||
result = append(result, k)
|
||||
}
|
||||
sort.Sort(sort.Reverse(byInitialism(result)))
|
||||
return
|
||||
}
|
482
vendor/github.com/go-openapi/swag/split.go
generated
vendored
482
vendor/github.com/go-openapi/swag/split.go
generated
vendored
@ -15,269 +15,124 @@
|
||||
package swag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var nameReplaceTable = map[rune]string{
|
||||
'@': "At ",
|
||||
'&': "And ",
|
||||
'|': "Pipe ",
|
||||
'$': "Dollar ",
|
||||
'!': "Bang ",
|
||||
'-': "",
|
||||
'_': "",
|
||||
}
|
||||
|
||||
type (
|
||||
splitter struct {
|
||||
initialisms []string
|
||||
initialismsRunes [][]rune
|
||||
initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
|
||||
postSplitInitialismCheck bool
|
||||
initialisms []string
|
||||
}
|
||||
|
||||
splitterOption func(*splitter)
|
||||
|
||||
initialismMatch struct {
|
||||
body []rune
|
||||
start, end int
|
||||
complete bool
|
||||
}
|
||||
initialismMatches []initialismMatch
|
||||
splitterOption func(*splitter) *splitter
|
||||
)
|
||||
|
||||
type (
|
||||
// memory pools of temporary objects.
|
||||
//
|
||||
// These are used to recycle temporarily allocated objects
|
||||
// and relieve the GC from undue pressure.
|
||||
|
||||
matchesPool struct {
|
||||
*sync.Pool
|
||||
}
|
||||
|
||||
buffersPool struct {
|
||||
*sync.Pool
|
||||
}
|
||||
|
||||
lexemsPool struct {
|
||||
*sync.Pool
|
||||
}
|
||||
|
||||
splittersPool struct {
|
||||
*sync.Pool
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// poolOfMatches holds temporary slices for recycling during the initialism match process
|
||||
poolOfMatches = matchesPool{
|
||||
Pool: &sync.Pool{
|
||||
New: func() any {
|
||||
s := make(initialismMatches, 0, maxAllocMatches)
|
||||
|
||||
return &s
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
poolOfBuffers = buffersPool{
|
||||
Pool: &sync.Pool{
|
||||
New: func() any {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
poolOfLexems = lexemsPool{
|
||||
Pool: &sync.Pool{
|
||||
New: func() any {
|
||||
s := make([]nameLexem, 0, maxAllocMatches)
|
||||
|
||||
return &s
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
poolOfSplitters = splittersPool{
|
||||
Pool: &sync.Pool{
|
||||
New: func() any {
|
||||
s := newSplitter()
|
||||
|
||||
return &s
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// nameReplaceTable finds a word representation for special characters.
|
||||
func nameReplaceTable(r rune) (string, bool) {
|
||||
switch r {
|
||||
case '@':
|
||||
return "At ", true
|
||||
case '&':
|
||||
return "And ", true
|
||||
case '|':
|
||||
return "Pipe ", true
|
||||
case '$':
|
||||
return "Dollar ", true
|
||||
case '!':
|
||||
return "Bang ", true
|
||||
case '-':
|
||||
return "", true
|
||||
case '_':
|
||||
return "", true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// split calls the splitter.
|
||||
//
|
||||
// Use newSplitter for more control and options
|
||||
// split calls the splitter; splitter provides more control and post options
|
||||
func split(str string) []string {
|
||||
s := poolOfSplitters.BorrowSplitter()
|
||||
lexems := s.split(str)
|
||||
result := make([]string, 0, len(*lexems))
|
||||
lexems := newSplitter().split(str)
|
||||
result := make([]string, 0, len(lexems))
|
||||
|
||||
for _, lexem := range *lexems {
|
||||
for _, lexem := range lexems {
|
||||
result = append(result, lexem.GetOriginal())
|
||||
}
|
||||
poolOfLexems.RedeemLexems(lexems)
|
||||
poolOfSplitters.RedeemSplitter(s)
|
||||
|
||||
return result
|
||||
|
||||
}
|
||||
|
||||
func newSplitter(options ...splitterOption) splitter {
|
||||
s := splitter{
|
||||
func (s *splitter) split(str string) []nameLexem {
|
||||
return s.toNameLexems(str)
|
||||
}
|
||||
|
||||
func newSplitter(options ...splitterOption) *splitter {
|
||||
splitter := &splitter{
|
||||
postSplitInitialismCheck: false,
|
||||
initialisms: initialisms,
|
||||
initialismsRunes: initialismsRunes,
|
||||
initialismsUpperCased: initialismsUpperCased,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(&s)
|
||||
splitter = option(splitter)
|
||||
}
|
||||
|
||||
return s
|
||||
return splitter
|
||||
}
|
||||
|
||||
// withPostSplitInitialismCheck allows to catch initialisms after main split process
|
||||
func withPostSplitInitialismCheck(s *splitter) {
|
||||
func withPostSplitInitialismCheck(s *splitter) *splitter {
|
||||
s.postSplitInitialismCheck = true
|
||||
}
|
||||
|
||||
func (p matchesPool) BorrowMatches() *initialismMatches {
|
||||
s := p.Get().(*initialismMatches)
|
||||
*s = (*s)[:0] // reset slice, keep allocated capacity
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
|
||||
s := p.Get().(*bytes.Buffer)
|
||||
s.Reset()
|
||||
|
||||
if s.Cap() < size {
|
||||
s.Grow(size)
|
||||
type (
|
||||
initialismMatch struct {
|
||||
start, end int
|
||||
body []rune
|
||||
complete bool
|
||||
}
|
||||
initialismMatches []*initialismMatch
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (p lexemsPool) BorrowLexems() *[]nameLexem {
|
||||
s := p.Get().(*[]nameLexem)
|
||||
*s = (*s)[:0] // reset slice, keep allocated capacity
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
|
||||
s := p.Get().(*splitter)
|
||||
s.postSplitInitialismCheck = false // reset options
|
||||
for _, apply := range options {
|
||||
apply(s)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (p matchesPool) RedeemMatches(s *initialismMatches) {
|
||||
p.Put(s)
|
||||
}
|
||||
|
||||
func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
|
||||
p.Put(s)
|
||||
}
|
||||
|
||||
func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
|
||||
p.Put(s)
|
||||
}
|
||||
|
||||
func (p splittersPool) RedeemSplitter(s *splitter) {
|
||||
p.Put(s)
|
||||
}
|
||||
|
||||
func (m initialismMatch) isZero() bool {
|
||||
return m.start == 0 && m.end == 0
|
||||
}
|
||||
|
||||
func (s splitter) split(name string) *[]nameLexem {
|
||||
func (s *splitter) toNameLexems(name string) []nameLexem {
|
||||
nameRunes := []rune(name)
|
||||
matches := s.gatherInitialismMatches(nameRunes)
|
||||
if matches == nil {
|
||||
return poolOfLexems.BorrowLexems()
|
||||
}
|
||||
|
||||
return s.mapMatchesToNameLexems(nameRunes, matches)
|
||||
}
|
||||
|
||||
func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
|
||||
var matches *initialismMatches
|
||||
func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
|
||||
matches := make(initialismMatches, 0)
|
||||
|
||||
for currentRunePosition, currentRune := range nameRunes {
|
||||
// recycle these allocations as we loop over runes
|
||||
// with such recycling, only 2 slices should be allocated per call
|
||||
// instead of o(n).
|
||||
newMatches := poolOfMatches.BorrowMatches()
|
||||
newMatches := make(initialismMatches, 0, len(matches))
|
||||
|
||||
// check current initialism matches
|
||||
if matches != nil { // skip first iteration
|
||||
for _, match := range *matches {
|
||||
if keepCompleteMatch := match.complete; keepCompleteMatch {
|
||||
*newMatches = append(*newMatches, match)
|
||||
continue
|
||||
}
|
||||
|
||||
// drop failed match
|
||||
currentMatchRune := match.body[currentRunePosition-match.start]
|
||||
if currentMatchRune != currentRune {
|
||||
continue
|
||||
}
|
||||
|
||||
// try to complete ongoing match
|
||||
if currentRunePosition-match.start == len(match.body)-1 {
|
||||
// we are close; the next step is to check the symbol ahead
|
||||
// if it is a small letter, then it is not the end of match
|
||||
// but beginning of the next word
|
||||
|
||||
if currentRunePosition < len(nameRunes)-1 {
|
||||
nextRune := nameRunes[currentRunePosition+1]
|
||||
if newWord := unicode.IsLower(nextRune); newWord {
|
||||
// oh ok, it was the start of a new word
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
match.complete = true
|
||||
match.end = currentRunePosition
|
||||
}
|
||||
|
||||
*newMatches = append(*newMatches, match)
|
||||
for _, match := range matches {
|
||||
if keepCompleteMatch := match.complete; keepCompleteMatch {
|
||||
newMatches = append(newMatches, match)
|
||||
continue
|
||||
}
|
||||
|
||||
// drop failed match
|
||||
currentMatchRune := match.body[currentRunePosition-match.start]
|
||||
if !s.initialismRuneEqual(currentMatchRune, currentRune) {
|
||||
continue
|
||||
}
|
||||
|
||||
// try to complete ongoing match
|
||||
if currentRunePosition-match.start == len(match.body)-1 {
|
||||
// we are close; the next step is to check the symbol ahead
|
||||
// if it is a small letter, then it is not the end of match
|
||||
// but beginning of the next word
|
||||
|
||||
if currentRunePosition < len(nameRunes)-1 {
|
||||
nextRune := nameRunes[currentRunePosition+1]
|
||||
if newWord := unicode.IsLower(nextRune); newWord {
|
||||
// oh ok, it was the start of a new word
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
match.complete = true
|
||||
match.end = currentRunePosition
|
||||
}
|
||||
|
||||
newMatches = append(newMatches, match)
|
||||
}
|
||||
|
||||
// check for new initialism matches
|
||||
for i := range s.initialisms {
|
||||
initialismRunes := s.initialismsRunes[i]
|
||||
if initialismRunes[0] == currentRune {
|
||||
*newMatches = append(*newMatches, initialismMatch{
|
||||
for _, initialism := range s.initialisms {
|
||||
initialismRunes := []rune(initialism)
|
||||
if s.initialismRuneEqual(initialismRunes[0], currentRune) {
|
||||
newMatches = append(newMatches, &initialismMatch{
|
||||
start: currentRunePosition,
|
||||
body: initialismRunes,
|
||||
complete: false,
|
||||
@ -285,28 +140,24 @@ func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
|
||||
}
|
||||
}
|
||||
|
||||
if matches != nil {
|
||||
poolOfMatches.RedeemMatches(matches)
|
||||
}
|
||||
matches = newMatches
|
||||
}
|
||||
|
||||
// up to the caller to redeem this last slice
|
||||
return matches
|
||||
}
|
||||
|
||||
func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
|
||||
nameLexems := poolOfLexems.BorrowLexems()
|
||||
func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
|
||||
nameLexems := make([]nameLexem, 0)
|
||||
|
||||
var lastAcceptedMatch initialismMatch
|
||||
for _, match := range *matches {
|
||||
var lastAcceptedMatch *initialismMatch
|
||||
for _, match := range matches {
|
||||
if !match.complete {
|
||||
continue
|
||||
}
|
||||
|
||||
if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
|
||||
s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
|
||||
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
|
||||
if firstMatch := lastAcceptedMatch == nil; firstMatch {
|
||||
nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
|
||||
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
|
||||
|
||||
lastAcceptedMatch = match
|
||||
|
||||
@ -318,66 +169,63 @@ func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMa
|
||||
}
|
||||
|
||||
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
|
||||
s.appendBrokenDownCasualString(nameLexems, middle)
|
||||
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
|
||||
nameLexems = append(nameLexems, s.breakCasualString(middle)...)
|
||||
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
|
||||
|
||||
lastAcceptedMatch = match
|
||||
}
|
||||
|
||||
// we have not found any accepted matches
|
||||
if lastAcceptedMatch.isZero() {
|
||||
*nameLexems = (*nameLexems)[:0]
|
||||
s.appendBrokenDownCasualString(nameLexems, nameRunes)
|
||||
} else if lastAcceptedMatch.end+1 != len(nameRunes) {
|
||||
rest := nameRunes[lastAcceptedMatch.end+1:]
|
||||
s.appendBrokenDownCasualString(nameLexems, rest)
|
||||
if lastAcceptedMatch == nil {
|
||||
return s.breakCasualString(nameRunes)
|
||||
}
|
||||
|
||||
poolOfMatches.RedeemMatches(matches)
|
||||
if lastAcceptedMatch.end+1 != len(nameRunes) {
|
||||
rest := nameRunes[lastAcceptedMatch.end+1:]
|
||||
nameLexems = append(nameLexems, s.breakCasualString(rest)...)
|
||||
}
|
||||
|
||||
return nameLexems
|
||||
}
|
||||
|
||||
func (s splitter) breakInitialism(original string) nameLexem {
|
||||
func (s *splitter) initialismRuneEqual(a, b rune) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
func (s *splitter) breakInitialism(original string) nameLexem {
|
||||
return newInitialismNameLexem(original, original)
|
||||
}
|
||||
|
||||
func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
|
||||
currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
|
||||
defer func() {
|
||||
poolOfBuffers.RedeemBuffer(currentSegment)
|
||||
}()
|
||||
func (s *splitter) breakCasualString(str []rune) []nameLexem {
|
||||
segments := make([]nameLexem, 0)
|
||||
currentSegment := ""
|
||||
|
||||
addCasualNameLexem := func(original string) {
|
||||
*segments = append(*segments, newCasualNameLexem(original))
|
||||
segments = append(segments, newCasualNameLexem(original))
|
||||
}
|
||||
|
||||
addInitialismNameLexem := func(original, match string) {
|
||||
*segments = append(*segments, newInitialismNameLexem(original, match))
|
||||
segments = append(segments, newInitialismNameLexem(original, match))
|
||||
}
|
||||
|
||||
var addNameLexem func(string)
|
||||
if s.postSplitInitialismCheck {
|
||||
addNameLexem = func(original string) {
|
||||
for i := range s.initialisms {
|
||||
if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
|
||||
addInitialismNameLexem(original, s.initialisms[i])
|
||||
|
||||
addNameLexem := func(original string) {
|
||||
if s.postSplitInitialismCheck {
|
||||
for _, initialism := range s.initialisms {
|
||||
if upper(initialism) == upper(original) {
|
||||
addInitialismNameLexem(original, initialism)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
addCasualNameLexem(original)
|
||||
}
|
||||
} else {
|
||||
addNameLexem = addCasualNameLexem
|
||||
|
||||
addCasualNameLexem(original)
|
||||
}
|
||||
|
||||
for _, rn := range str {
|
||||
if replace, found := nameReplaceTable(rn); found {
|
||||
if currentSegment.Len() > 0 {
|
||||
addNameLexem(currentSegment.String())
|
||||
currentSegment.Reset()
|
||||
for _, rn := range string(str) {
|
||||
if replace, found := nameReplaceTable[rn]; found {
|
||||
if currentSegment != "" {
|
||||
addNameLexem(currentSegment)
|
||||
currentSegment = ""
|
||||
}
|
||||
|
||||
if replace != "" {
|
||||
@ -388,121 +236,27 @@ func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune
|
||||
}
|
||||
|
||||
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
|
||||
if currentSegment.Len() > 0 {
|
||||
addNameLexem(currentSegment.String())
|
||||
currentSegment.Reset()
|
||||
if currentSegment != "" {
|
||||
addNameLexem(currentSegment)
|
||||
currentSegment = ""
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsUpper(rn) {
|
||||
if currentSegment.Len() > 0 {
|
||||
addNameLexem(currentSegment.String())
|
||||
if currentSegment != "" {
|
||||
addNameLexem(currentSegment)
|
||||
}
|
||||
currentSegment.Reset()
|
||||
currentSegment = ""
|
||||
}
|
||||
|
||||
currentSegment.WriteRune(rn)
|
||||
currentSegment += string(rn)
|
||||
}
|
||||
|
||||
if currentSegment.Len() > 0 {
|
||||
addNameLexem(currentSegment.String())
|
||||
if currentSegment != "" {
|
||||
addNameLexem(currentSegment)
|
||||
}
|
||||
}
|
||||
|
||||
// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
|
||||
// it ignores leading and trailing blank spaces in the compared
|
||||
// string.
|
||||
//
|
||||
// base is assumed to be composed of upper-cased runes, and be already
|
||||
// trimmed.
|
||||
//
|
||||
// This code is heavily inspired from strings.EqualFold.
|
||||
func isEqualFoldIgnoreSpace(base []rune, str string) bool {
|
||||
var i, baseIndex int
|
||||
// equivalent to b := []byte(str), but without data copy
|
||||
b := hackStringBytes(str)
|
||||
|
||||
for i < len(b) {
|
||||
if c := b[i]; c < utf8.RuneSelf {
|
||||
// fast path for ASCII
|
||||
if c != ' ' && c != '\t' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// unicode case
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if !unicode.IsSpace(r) {
|
||||
break
|
||||
}
|
||||
i += size
|
||||
}
|
||||
|
||||
if i >= len(b) {
|
||||
return len(base) == 0
|
||||
}
|
||||
|
||||
for _, baseRune := range base {
|
||||
if i >= len(b) {
|
||||
break
|
||||
}
|
||||
|
||||
if c := b[i]; c < utf8.RuneSelf {
|
||||
// single byte rune case (ASCII)
|
||||
if baseRune >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
|
||||
baseChar := byte(baseRune)
|
||||
if c != baseChar &&
|
||||
!('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
|
||||
return false
|
||||
}
|
||||
|
||||
baseIndex++
|
||||
i++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// unicode case
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.ToUpper(r) != baseRune {
|
||||
return false
|
||||
}
|
||||
baseIndex++
|
||||
i += size
|
||||
}
|
||||
|
||||
if baseIndex != len(base) {
|
||||
return false
|
||||
}
|
||||
|
||||
// all passed: now we should only have blanks
|
||||
for i < len(b) {
|
||||
if c := b[i]; c < utf8.RuneSelf {
|
||||
// fast path for ASCII
|
||||
if c != ' ' && c != '\t' {
|
||||
return false
|
||||
}
|
||||
i++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// unicode case
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if !unicode.IsSpace(r) {
|
||||
return false
|
||||
}
|
||||
|
||||
i += size
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
return segments
|
||||
}
|
||||
|
8
vendor/github.com/go-openapi/swag/string_bytes.go
generated
vendored
8
vendor/github.com/go-openapi/swag/string_bytes.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package swag
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
|
||||
func hackStringBytes(str string) []byte {
|
||||
return unsafe.Slice(unsafe.StringData(str), len(str))
|
||||
}
|
224
vendor/github.com/go-openapi/swag/util.go
generated
vendored
224
vendor/github.com/go-openapi/swag/util.go
generated
vendored
@ -18,25 +18,76 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// commonInitialisms are common acronyms that are kept as whole uppercased words.
|
||||
var commonInitialisms *indexOfInitialisms
|
||||
|
||||
// initialisms is a slice of sorted initialisms
|
||||
var initialisms []string
|
||||
|
||||
var isInitialism func(string) bool
|
||||
|
||||
// GoNamePrefixFunc sets an optional rule to prefix go names
|
||||
// which do not start with a letter.
|
||||
//
|
||||
// The prefix function is assumed to return a string that starts with an upper case letter.
|
||||
//
|
||||
// e.g. to help convert "123" into "{prefix}123"
|
||||
//
|
||||
// The default is to prefix with "X"
|
||||
var GoNamePrefixFunc func(string) string
|
||||
|
||||
func prefixFunc(name, in string) string {
|
||||
if GoNamePrefixFunc == nil {
|
||||
return "X" + in
|
||||
func init() {
|
||||
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
|
||||
var configuredInitialisms = map[string]bool{
|
||||
"ACL": true,
|
||||
"API": true,
|
||||
"ASCII": true,
|
||||
"CPU": true,
|
||||
"CSS": true,
|
||||
"DNS": true,
|
||||
"EOF": true,
|
||||
"GUID": true,
|
||||
"HTML": true,
|
||||
"HTTPS": true,
|
||||
"HTTP": true,
|
||||
"ID": true,
|
||||
"IP": true,
|
||||
"IPv4": true,
|
||||
"IPv6": true,
|
||||
"JSON": true,
|
||||
"LHS": true,
|
||||
"OAI": true,
|
||||
"QPS": true,
|
||||
"RAM": true,
|
||||
"RHS": true,
|
||||
"RPC": true,
|
||||
"SLA": true,
|
||||
"SMTP": true,
|
||||
"SQL": true,
|
||||
"SSH": true,
|
||||
"TCP": true,
|
||||
"TLS": true,
|
||||
"TTL": true,
|
||||
"UDP": true,
|
||||
"UI": true,
|
||||
"UID": true,
|
||||
"UUID": true,
|
||||
"URI": true,
|
||||
"URL": true,
|
||||
"UTF8": true,
|
||||
"VM": true,
|
||||
"XML": true,
|
||||
"XMPP": true,
|
||||
"XSRF": true,
|
||||
"XSS": true,
|
||||
}
|
||||
|
||||
return GoNamePrefixFunc(name) + in
|
||||
// a thread-safe index of initialisms
|
||||
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
|
||||
initialisms = commonInitialisms.sorted()
|
||||
|
||||
// a test function
|
||||
isInitialism = commonInitialisms.isInitialism
|
||||
}
|
||||
|
||||
const (
|
||||
@ -105,9 +156,25 @@ func SplitByFormat(data, format string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
type byInitialism []string
|
||||
|
||||
func (s byInitialism) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s byInitialism) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s byInitialism) Less(i, j int) bool {
|
||||
if len(s[i]) != len(s[j]) {
|
||||
return len(s[i]) < len(s[j])
|
||||
}
|
||||
|
||||
return strings.Compare(s[i], s[j]) > 0
|
||||
}
|
||||
|
||||
// Removes leading whitespaces
|
||||
func trim(str string) string {
|
||||
return strings.TrimSpace(str)
|
||||
return strings.Trim(str, " ")
|
||||
}
|
||||
|
||||
// Shortcut to strings.ToUpper()
|
||||
@ -121,20 +188,15 @@ func lower(str string) string {
|
||||
}
|
||||
|
||||
// Camelize an uppercased word
|
||||
func Camelize(word string) string {
|
||||
camelized := poolOfBuffers.BorrowBuffer(len(word))
|
||||
defer func() {
|
||||
poolOfBuffers.RedeemBuffer(camelized)
|
||||
}()
|
||||
|
||||
func Camelize(word string) (camelized string) {
|
||||
for pos, ru := range []rune(word) {
|
||||
if pos > 0 {
|
||||
camelized.WriteRune(unicode.ToLower(ru))
|
||||
camelized += string(unicode.ToLower(ru))
|
||||
} else {
|
||||
camelized.WriteRune(unicode.ToUpper(ru))
|
||||
camelized += string(unicode.ToUpper(ru))
|
||||
}
|
||||
}
|
||||
return camelized.String()
|
||||
return
|
||||
}
|
||||
|
||||
// ToFileName lowercases and underscores a go type name
|
||||
@ -162,40 +224,33 @@ func ToCommandName(name string) string {
|
||||
|
||||
// ToHumanNameLower represents a code name as a human series of words
|
||||
func ToHumanNameLower(name string) string {
|
||||
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
|
||||
in := s.split(name)
|
||||
poolOfSplitters.RedeemSplitter(s)
|
||||
out := make([]string, 0, len(*in))
|
||||
in := newSplitter(withPostSplitInitialismCheck).split(name)
|
||||
out := make([]string, 0, len(in))
|
||||
|
||||
for _, w := range *in {
|
||||
for _, w := range in {
|
||||
if !w.IsInitialism() {
|
||||
out = append(out, lower(w.GetOriginal()))
|
||||
} else {
|
||||
out = append(out, trim(w.GetOriginal()))
|
||||
out = append(out, w.GetOriginal())
|
||||
}
|
||||
}
|
||||
poolOfLexems.RedeemLexems(in)
|
||||
|
||||
return strings.Join(out, " ")
|
||||
}
|
||||
|
||||
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
|
||||
func ToHumanNameTitle(name string) string {
|
||||
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
|
||||
in := s.split(name)
|
||||
poolOfSplitters.RedeemSplitter(s)
|
||||
in := newSplitter(withPostSplitInitialismCheck).split(name)
|
||||
|
||||
out := make([]string, 0, len(*in))
|
||||
for _, w := range *in {
|
||||
original := trim(w.GetOriginal())
|
||||
out := make([]string, 0, len(in))
|
||||
for _, w := range in {
|
||||
original := w.GetOriginal()
|
||||
if !w.IsInitialism() {
|
||||
out = append(out, Camelize(original))
|
||||
} else {
|
||||
out = append(out, original)
|
||||
}
|
||||
}
|
||||
poolOfLexems.RedeemLexems(in)
|
||||
|
||||
return strings.Join(out, " ")
|
||||
}
|
||||
|
||||
@ -209,7 +264,7 @@ func ToJSONName(name string) string {
|
||||
out = append(out, lower(w))
|
||||
continue
|
||||
}
|
||||
out = append(out, Camelize(trim(w)))
|
||||
out = append(out, Camelize(w))
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
@ -228,70 +283,35 @@ func ToVarName(name string) string {
|
||||
|
||||
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
|
||||
func ToGoName(name string) string {
|
||||
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
|
||||
lexems := s.split(name)
|
||||
poolOfSplitters.RedeemSplitter(s)
|
||||
defer func() {
|
||||
poolOfLexems.RedeemLexems(lexems)
|
||||
}()
|
||||
lexemes := *lexems
|
||||
lexems := newSplitter(withPostSplitInitialismCheck).split(name)
|
||||
|
||||
if len(lexemes) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
result := poolOfBuffers.BorrowBuffer(len(name))
|
||||
defer func() {
|
||||
poolOfBuffers.RedeemBuffer(result)
|
||||
}()
|
||||
|
||||
// check if not starting with a letter, upper case
|
||||
firstPart := lexemes[0].GetUnsafeGoName()
|
||||
if lexemes[0].IsInitialism() {
|
||||
firstPart = upper(firstPart)
|
||||
}
|
||||
|
||||
if c := firstPart[0]; c < utf8.RuneSelf {
|
||||
// ASCII
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z':
|
||||
result.WriteString(firstPart)
|
||||
case 'a' <= c && c <= 'z':
|
||||
result.WriteByte(c - 'a' + 'A')
|
||||
result.WriteString(firstPart[1:])
|
||||
default:
|
||||
result.WriteString(prefixFunc(name, firstPart))
|
||||
// NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
|
||||
// assume this is always the case
|
||||
}
|
||||
} else {
|
||||
// unicode
|
||||
firstRune, _ := utf8.DecodeRuneInString(firstPart)
|
||||
switch {
|
||||
case !unicode.IsLetter(firstRune):
|
||||
result.WriteString(prefixFunc(name, firstPart))
|
||||
case !unicode.IsUpper(firstRune):
|
||||
result.WriteString(prefixFunc(name, firstPart))
|
||||
/*
|
||||
result.WriteRune(unicode.ToUpper(firstRune))
|
||||
result.WriteString(firstPart[offset:])
|
||||
*/
|
||||
default:
|
||||
result.WriteString(firstPart)
|
||||
}
|
||||
}
|
||||
|
||||
for _, lexem := range lexemes[1:] {
|
||||
result := ""
|
||||
for _, lexem := range lexems {
|
||||
goName := lexem.GetUnsafeGoName()
|
||||
|
||||
// to support old behavior
|
||||
if lexem.IsInitialism() {
|
||||
goName = upper(goName)
|
||||
}
|
||||
result.WriteString(goName)
|
||||
result += goName
|
||||
}
|
||||
|
||||
return result.String()
|
||||
if len(result) > 0 {
|
||||
// Only prefix with X when the first character isn't an ascii letter
|
||||
first := []rune(result)[0]
|
||||
if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
|
||||
if GoNamePrefixFunc == nil {
|
||||
return "X" + result
|
||||
}
|
||||
result = GoNamePrefixFunc(name) + result
|
||||
}
|
||||
first = []rune(result)[0]
|
||||
if unicode.IsLetter(first) && !unicode.IsUpper(first) {
|
||||
result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ContainsStrings searches a slice of strings for a case-sensitive match
|
||||
@ -321,22 +341,13 @@ type zeroable interface {
|
||||
// IsZero returns true when the value passed into the function is a zero value.
|
||||
// This allows for safer checking of interface values.
|
||||
func IsZero(data interface{}) bool {
|
||||
v := reflect.ValueOf(data)
|
||||
// check for nil data
|
||||
switch v.Kind() { //nolint:exhaustive
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// check for things that have an IsZero method instead
|
||||
if vv, ok := data.(zeroable); ok {
|
||||
return vv.IsZero()
|
||||
}
|
||||
|
||||
// continue with slightly more complex reflection
|
||||
switch v.Kind() { //nolint:exhaustive
|
||||
v := reflect.ValueOf(data)
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
@ -347,13 +358,24 @@ func IsZero(data interface{}) bool {
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
case reflect.Struct, reflect.Array:
|
||||
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AddInitialisms add additional initialisms
|
||||
func AddInitialisms(words ...string) {
|
||||
for _, word := range words {
|
||||
// commonInitialisms[upper(word)] = true
|
||||
commonInitialisms.add(upper(word))
|
||||
}
|
||||
// sort again
|
||||
initialisms = commonInitialisms.sorted()
|
||||
}
|
||||
|
||||
// CommandLineOptionsGroup represents a group of user-defined command line options
|
||||
|
39
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
39
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
@ -16,11 +16,8 @@ package swag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/mailru/easyjson/jlexer"
|
||||
@ -51,7 +48,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
|
||||
return nil, errors.New("only YAML documents that are objects are supported")
|
||||
return nil, fmt.Errorf("only YAML documents that are objects are supported")
|
||||
}
|
||||
return &document, nil
|
||||
}
|
||||
@ -150,7 +147,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
|
||||
case yamlTimestamp:
|
||||
return node.Value, nil
|
||||
case yamlNull:
|
||||
return nil, nil //nolint:nilnil
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
|
||||
}
|
||||
@ -248,27 +245,7 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
|
||||
return yaml.Marshal(&n)
|
||||
}
|
||||
|
||||
func isNil(input interface{}) bool {
|
||||
if input == nil {
|
||||
return true
|
||||
}
|
||||
kind := reflect.TypeOf(input).Kind()
|
||||
switch kind { //nolint:exhaustive
|
||||
case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
|
||||
return reflect.ValueOf(input).IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func json2yaml(item interface{}) (*yaml.Node, error) {
|
||||
if isNil(item) {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: "null",
|
||||
}, nil
|
||||
}
|
||||
|
||||
switch val := item.(type) {
|
||||
case JSONMapSlice:
|
||||
var n yaml.Node
|
||||
@ -288,14 +265,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
|
||||
case map[string]interface{}:
|
||||
var n yaml.Node
|
||||
n.Kind = yaml.MappingNode
|
||||
keys := make([]string, 0, len(val))
|
||||
for k := range val {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := val[k]
|
||||
for k, v := range val {
|
||||
childNode, err := json2yaml(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -348,9 +318,8 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
|
||||
Tag: yamlBoolScalar,
|
||||
Value: strconv.FormatBool(val),
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unhandled type: %T", val)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
|
||||
|
180
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
180
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const urlPrefix = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
//
|
||||
// Deprecated: Call the any.MessageName method instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||
name, err := anyMessageName(any)
|
||||
return string(name), err
|
||||
}
|
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
name := protoreflect.FullName(any.TypeUrl)
|
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||
name = name[i+len("/"):]
|
||||
}
|
||||
if !name.IsValid() {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
//
|
||||
// Deprecated: Call the anypb.New function instead.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||
switch dm := m.(type) {
|
||||
case DynamicAny:
|
||||
m = dm.Message
|
||||
case *DynamicAny:
|
||||
if dm == nil {
|
||||
return nil, proto.ErrNil
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||
}
|
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||
// to resolve the message name and create a new instance of it.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||
name, err := anyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.MessageV1(mt.New().Interface()), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
//
|
||||
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||
if dm, ok := m.(*DynamicAny); ok {
|
||||
if dm.Message == nil {
|
||||
var err error
|
||||
dm.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
|
||||
anyName, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgName := proto.MessageName(m)
|
||||
if anyName != msgName {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, m)
|
||||
}
|
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
//
|
||||
// Deprecated: Call the any.MessageIs method instead.
|
||||
func Is(any *anypb.Any, m proto.Message) bool {
|
||||
if any == nil || m == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(m)
|
||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||
return false
|
||||
}
|
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
//
|
||||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||
// the any message contents into a new instance of the underlying message.
|
||||
type DynamicAny struct{ proto.Message }
|
||||
|
||||
func (m DynamicAny) String() string {
|
||||
if m.Message == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return m.Message.String()
|
||||
}
|
||||
func (m DynamicAny) Reset() {
|
||||
if m.Message == nil {
|
||||
return
|
||||
}
|
||||
m.Message.Reset()
|
||||
}
|
||||
func (m DynamicAny) ProtoMessage() {
|
||||
return
|
||||
}
|
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||
if m.Message == nil {
|
||||
return nil
|
||||
}
|
||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||
}
|
||||
|
||||
type dynamicAny struct{ protoreflect.Message }
|
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||
return dynamicAnyType{m.Message.Type()}
|
||||
}
|
||||
func (m dynamicAny) New() protoreflect.Message {
|
||||
return dynamicAnyType{m.Message.Type()}.New()
|
||||
}
|
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||
}
|
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.New()}
|
||||
}
|
||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.Zero()}
|
||||
}
|
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/any.proto.
|
||||
|
||||
type Any = anypb.Any
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||
}
|
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
//
|
||||
// Deprecated: Well-known types have specialized functionality directly
|
||||
// injected into the generated packages for each message type.
|
||||
// See the deprecation notice for each function for the suggested alternative.
|
||||
package ptypes
|
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const (
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
//
|
||||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(dur.Seconds) * time.Second
|
||||
if int64(d/time.Second) != dur.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
if dur.Nanos != 0 {
|
||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (dur.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
//
|
||||
// Deprecated: Call the durationpb.New function instead.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durationpb.Duration{
|
||||
Seconds: int64(secs),
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error {
|
||||
if dur == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||
}
|
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||
}
|
||||
return nil
|
||||
}
|
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||
|
||||
type Duration = durationpb.Duration
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||
}
|
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.Now function instead.
|
||||
func TimestampNow() *timestamppb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.New function instead.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||
ts := ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime method instead,
|
||||
// followed by a call to the Format method on the time.Time value.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
8
vendor/github.com/google/gnostic-models/compiler/extensions.go
generated
vendored
8
vendor/github.com/google/gnostic-models/compiler/extensions.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
extensions "github.com/google/gnostic-models/extensions"
|
||||
@ -33,7 +33,7 @@ type ExtensionHandler struct {
|
||||
}
|
||||
|
||||
// CallExtension calls a binary extension handler.
|
||||
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) {
|
||||
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
|
||||
if context == nil || context.ExtensionHandlers == nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl
|
||||
return handled, response, err
|
||||
}
|
||||
|
||||
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) {
|
||||
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
|
||||
if extensionHandlers.Name != "" {
|
||||
yamlData, _ := yaml.Marshal(in)
|
||||
request := &extensions.ExtensionHandlerRequest{
|
||||
|
96
vendor/github.com/google/gnostic-models/extensions/extension.pb.go
generated
vendored
96
vendor/github.com/google/gnostic-models/extensions/extension.pb.go
generated
vendored
@ -14,8 +14,8 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc v4.23.4
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.19.3
|
||||
// source: extensions/extension.proto
|
||||
|
||||
package gnostic_extension_v1
|
||||
@ -51,9 +51,11 @@ type Version struct {
|
||||
|
||||
func (x *Version) Reset() {
|
||||
*x = Version{}
|
||||
mi := &file_extensions_extension_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Version) String() string {
|
||||
@ -64,7 +66,7 @@ func (*Version) ProtoMessage() {}
|
||||
|
||||
func (x *Version) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@ -121,9 +123,11 @@ type ExtensionHandlerRequest struct {
|
||||
|
||||
func (x *ExtensionHandlerRequest) Reset() {
|
||||
*x = ExtensionHandlerRequest{}
|
||||
mi := &file_extensions_extension_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerRequest) String() string {
|
||||
@ -134,7 +138,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@ -187,9 +191,11 @@ type ExtensionHandlerResponse struct {
|
||||
|
||||
func (x *ExtensionHandlerResponse) Reset() {
|
||||
*x = ExtensionHandlerResponse{}
|
||||
mi := &file_extensions_extension_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) String() string {
|
||||
@ -200,7 +206,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@ -251,9 +257,11 @@ type Wrapper struct {
|
||||
|
||||
func (x *Wrapper) Reset() {
|
||||
*x = Wrapper{}
|
||||
mi := &file_extensions_extension_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Wrapper) String() string {
|
||||
@ -264,7 +272,7 @@ func (*Wrapper) ProtoMessage() {}
|
||||
|
||||
func (x *Wrapper) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
@ -359,7 +367,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_extensions_extension_proto_goTypes = []any{
|
||||
var file_extensions_extension_proto_goTypes = []interface{}{
|
||||
(*Version)(nil), // 0: gnostic.extension.v1.Version
|
||||
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
|
||||
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
|
||||
@ -382,6 +390,56 @@ func file_extensions_extension_proto_init() {
|
||||
if File_extensions_extension_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Version); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExtensionHandlerRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExtensionHandlerResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Wrapper); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
|
6
vendor/github.com/google/gnostic-models/extensions/extensions.go
generated
vendored
6
vendor/github.com/google/gnostic-models/extensions/extensions.go
generated
vendored
@ -19,8 +19,8 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
|
||||
@ -54,7 +54,7 @@ func Main(handler extensionHandler) {
|
||||
response.Errors = append(response.Errors, err.Error())
|
||||
} else if handled {
|
||||
response.Handled = true
|
||||
response.Value, err = anypb.New(output)
|
||||
response.Value, err = ptypes.MarshalAny(output)
|
||||
if err != nil {
|
||||
response.Errors = append(response.Errors, err.Error())
|
||||
}
|
||||
|
1349
vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
generated
vendored
1349
vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1763
vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
generated
vendored
1763
vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
182
vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
generated
vendored
182
vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
// Copyright 2022 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc v4.23.4
|
||||
// source: openapiv3/annotations.proto
|
||||
|
||||
package openapi_v3
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
|
||||
{
|
||||
ExtendedType: (*descriptorpb.FileOptions)(nil),
|
||||
ExtensionType: (*Document)(nil),
|
||||
Field: 1143,
|
||||
Name: "openapi.v3.document",
|
||||
Tag: "bytes,1143,opt,name=document",
|
||||
Filename: "openapiv3/annotations.proto",
|
||||
},
|
||||
{
|
||||
ExtendedType: (*descriptorpb.MethodOptions)(nil),
|
||||
ExtensionType: (*Operation)(nil),
|
||||
Field: 1143,
|
||||
Name: "openapi.v3.operation",
|
||||
Tag: "bytes,1143,opt,name=operation",
|
||||
Filename: "openapiv3/annotations.proto",
|
||||
},
|
||||
{
|
||||
ExtendedType: (*descriptorpb.MessageOptions)(nil),
|
||||
ExtensionType: (*Schema)(nil),
|
||||
Field: 1143,
|
||||
Name: "openapi.v3.schema",
|
||||
Tag: "bytes,1143,opt,name=schema",
|
||||
Filename: "openapiv3/annotations.proto",
|
||||
},
|
||||
{
|
||||
ExtendedType: (*descriptorpb.FieldOptions)(nil),
|
||||
ExtensionType: (*Schema)(nil),
|
||||
Field: 1143,
|
||||
Name: "openapi.v3.property",
|
||||
Tag: "bytes,1143,opt,name=property",
|
||||
Filename: "openapiv3/annotations.proto",
|
||||
},
|
||||
}
|
||||
|
||||
// Extension fields to descriptorpb.FileOptions.
|
||||
var (
|
||||
// optional openapi.v3.Document document = 1143;
|
||||
E_Document = &file_openapiv3_annotations_proto_extTypes[0]
|
||||
)
|
||||
|
||||
// Extension fields to descriptorpb.MethodOptions.
|
||||
var (
|
||||
// optional openapi.v3.Operation operation = 1143;
|
||||
E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
|
||||
)
|
||||
|
||||
// Extension fields to descriptorpb.MessageOptions.
|
||||
var (
|
||||
// optional openapi.v3.Schema schema = 1143;
|
||||
E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
|
||||
)
|
||||
|
||||
// Extension fields to descriptorpb.FieldOptions.
|
||||
var (
|
||||
// optional openapi.v3.Schema property = 1143;
|
||||
E_Property = &file_openapiv3_annotations_proto_extTypes[3]
|
||||
)
|
||||
|
||||
var File_openapiv3_annotations_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_openapiv3_annotations_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
|
||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65,
|
||||
0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
|
||||
0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
|
||||
0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
|
||||
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
|
||||
0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
|
||||
0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
|
||||
0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
|
||||
0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
|
||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
|
||||
0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
|
||||
0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f,
|
||||
0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
|
||||
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_openapiv3_annotations_proto_goTypes = []any{
|
||||
(*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
|
||||
(*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
|
||||
(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
|
||||
(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
|
||||
(*Document)(nil), // 4: openapi.v3.Document
|
||||
(*Operation)(nil), // 5: openapi.v3.Operation
|
||||
(*Schema)(nil), // 6: openapi.v3.Schema
|
||||
}
|
||||
var file_openapiv3_annotations_proto_depIdxs = []int32{
|
||||
0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
|
||||
1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
|
||||
2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
|
||||
3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
|
||||
4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
|
||||
5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
|
||||
6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
|
||||
6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
|
||||
8, // [8:8] is the sub-list for method output_type
|
||||
8, // [8:8] is the sub-list for method input_type
|
||||
4, // [4:8] is the sub-list for extension type_name
|
||||
0, // [0:4] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_openapiv3_annotations_proto_init() }
|
||||
func file_openapiv3_annotations_proto_init() {
|
||||
if File_openapiv3_annotations_proto != nil {
|
||||
return
|
||||
}
|
||||
file_openapiv3_OpenAPIv3_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 4,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_openapiv3_annotations_proto_goTypes,
|
||||
DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
|
||||
ExtensionInfos: file_openapiv3_annotations_proto_extTypes,
|
||||
}.Build()
|
||||
File_openapiv3_annotations_proto = out.File
|
||||
file_openapiv3_annotations_proto_rawDesc = nil
|
||||
file_openapiv3_annotations_proto_goTypes = nil
|
||||
file_openapiv3_annotations_proto_depIdxs = nil
|
||||
}
|
56
vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
generated
vendored
56
vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
generated
vendored
@ -1,56 +0,0 @@
|
||||
// Copyright 2022 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package openapi.v3;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
import "openapiv3/OpenAPIv3.proto";
|
||||
|
||||
// The Go package name.
|
||||
option go_package = "./openapiv3;openapi_v3";
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "AnnotationsProto";
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapi_v3";
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
option objc_class_prefix = "OAS";
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
Document document = 1143;
|
||||
}
|
||||
|
||||
extend google.protobuf.MethodOptions {
|
||||
Operation operation = 1143;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
Schema schema = 1143;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
Schema property = 1143;
|
||||
}
|
59
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
59
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,62 +1,3 @@
|
||||
## 1.36.0
|
||||
|
||||
### Features
|
||||
- new: make collection-related matchers Go 1.23 iterator aware [4c964c6]
|
||||
|
||||
### Maintenance
|
||||
- Replace min/max helpers with built-in min/max [ece6872]
|
||||
- Fix some typos in docs [8e924d7]
|
||||
|
||||
## 1.35.1
|
||||
|
||||
### Fixes
|
||||
- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1]
|
||||
|
||||
## 1.35.0
|
||||
|
||||
### Features
|
||||
|
||||
- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265]
|
||||
- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931]
|
||||
|
||||
### Fixes
|
||||
|
||||
- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e]
|
||||
|
||||
### Maintenance
|
||||
|
||||
- Bump all dependencies [a05a416]
|
||||
|
||||
## 1.34.2
|
||||
|
||||
Require Go 1.22+
|
||||
|
||||
### Maintenance
|
||||
- bump ginkgo as well [c59c6dc]
|
||||
- bump to go 1.22 - remove x/exp dependency [8158b99]
|
||||
|
||||
## 1.34.1
|
||||
|
||||
### Maintenance
|
||||
- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd]
|
||||
|
||||
## 1.34.0
|
||||
|
||||
### Features
|
||||
- Add RoundTripper method to ghttp.Server [c549e0d]
|
||||
|
||||
### Fixes
|
||||
- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c]
|
||||
- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67]
|
||||
|
||||
### Maintenance
|
||||
- bump ginkgo [8af2ece]
|
||||
- Fix typo in docs [123a071]
|
||||
- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083]
|
||||
- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796]
|
||||
- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f]
|
||||
- Bump github-pages from 230 to 231 in /docs (#748) [892c303]
|
||||
|
||||
## 1.33.1
|
||||
|
||||
### Fixes
|
||||
|
26
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
26
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.36.0"
|
||||
const GOMEGA_VERSION = "1.33.1"
|
||||
|
||||
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
@ -319,19 +319,7 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in
|
||||
Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
|
||||
}, SpecTimeout(time.Second))
|
||||
|
||||
Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
|
||||
|
||||
By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example:
|
||||
|
||||
Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17))
|
||||
|
||||
now either the context cacnellation or the timeout will cause Eventually to stop polling.
|
||||
|
||||
If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call:
|
||||
|
||||
EnforceDefaultTimeoutsWhenUsingContexts()
|
||||
|
||||
in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses.
|
||||
Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
|
||||
|
||||
**Category 3: Making assertions _in_ the function passed into Eventually**
|
||||
|
||||
@ -503,16 +491,6 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
||||
Default.SetDefaultConsistentlyPollingInterval(t)
|
||||
}
|
||||
|
||||
// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided.
|
||||
func EnforceDefaultTimeoutsWhenUsingContexts() {
|
||||
Default.EnforceDefaultTimeoutsWhenUsingContexts()
|
||||
}
|
||||
|
||||
// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`.
|
||||
func DisableDefaultTimeoutsWhenUsingContext() {
|
||||
Default.DisableDefaultTimeoutsWhenUsingContext()
|
||||
}
|
||||
|
||||
// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
|
||||
// the matcher passed to the Should and ShouldNot methods.
|
||||
//
|
||||
|
12
vendor/github.com/onsi/gomega/internal/async_assertion.go
generated
vendored
12
vendor/github.com/onsi/gomega/internal/async_assertion.go
generated
vendored
@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time {
|
||||
if assertion.asyncType == AsyncAssertionTypeConsistently {
|
||||
return time.After(assertion.g.DurationBundle.ConsistentlyDuration)
|
||||
} else {
|
||||
if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts {
|
||||
if assertion.ctx == nil {
|
||||
return time.After(assertion.g.DurationBundle.EventuallyTimeout)
|
||||
} else {
|
||||
return nil
|
||||
@ -496,15 +496,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
|
||||
for _, err := range []error{actualErr, matcherErr} {
|
||||
if pollingSignalErr, ok := AsPollingSignalError(err); ok {
|
||||
if pollingSignalErr.IsStopTrying() {
|
||||
if pollingSignalErr.IsSuccessful() {
|
||||
if assertion.asyncType == AsyncAssertionTypeEventually {
|
||||
fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)")
|
||||
} else {
|
||||
return true // early escape hatch for Consistently
|
||||
}
|
||||
} else {
|
||||
fail("Told to stop trying")
|
||||
}
|
||||
fail("Told to stop trying")
|
||||
return false
|
||||
}
|
||||
if pollingSignalErr.IsTryAgainAfter() {
|
||||
|
17
vendor/github.com/onsi/gomega/internal/duration_bundle.go
generated
vendored
17
vendor/github.com/onsi/gomega/internal/duration_bundle.go
generated
vendored
@ -8,11 +8,10 @@ import (
|
||||
)
|
||||
|
||||
type DurationBundle struct {
|
||||
EventuallyTimeout time.Duration
|
||||
EventuallyPollingInterval time.Duration
|
||||
ConsistentlyDuration time.Duration
|
||||
ConsistentlyPollingInterval time.Duration
|
||||
EnforceDefaultTimeoutsWhenUsingContexts bool
|
||||
EventuallyTimeout time.Duration
|
||||
EventuallyPollingInterval time.Duration
|
||||
ConsistentlyDuration time.Duration
|
||||
ConsistentlyPollingInterval time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
@ -21,19 +20,15 @@ const (
|
||||
|
||||
ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
|
||||
ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
|
||||
|
||||
EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS"
|
||||
)
|
||||
|
||||
func FetchDefaultDurationBundle() DurationBundle {
|
||||
_, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName)
|
||||
return DurationBundle{
|
||||
EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second),
|
||||
EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond),
|
||||
|
||||
ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
|
||||
ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
|
||||
EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts,
|
||||
ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
|
||||
ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
8
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
@ -127,11 +127,3 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) {
|
||||
func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
||||
g.DurationBundle.ConsistentlyPollingInterval = t
|
||||
}
|
||||
|
||||
func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() {
|
||||
g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true
|
||||
}
|
||||
|
||||
func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() {
|
||||
g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false
|
||||
}
|
||||
|
11
vendor/github.com/onsi/gomega/internal/polling_signal_error.go
generated
vendored
11
vendor/github.com/onsi/gomega/internal/polling_signal_error.go
generated
vendored
@ -17,7 +17,6 @@ type PollingSignalError interface {
|
||||
error
|
||||
Wrap(err error) PollingSignalError
|
||||
Attach(description string, obj any) PollingSignalError
|
||||
Successfully() PollingSignalError
|
||||
Now()
|
||||
}
|
||||
|
||||
@ -46,7 +45,6 @@ type PollingSignalErrorImpl struct {
|
||||
wrappedErr error
|
||||
pollingSignalErrorType PollingSignalErrorType
|
||||
duration time.Duration
|
||||
successful bool
|
||||
Attachments []PollingSignalErrorAttachment
|
||||
}
|
||||
|
||||
@ -75,11 +73,6 @@ func (s *PollingSignalErrorImpl) Unwrap() error {
|
||||
return s.wrappedErr
|
||||
}
|
||||
|
||||
func (s *PollingSignalErrorImpl) Successfully() PollingSignalError {
|
||||
s.successful = true
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *PollingSignalErrorImpl) Now() {
|
||||
panic(s)
|
||||
}
|
||||
@ -88,10 +81,6 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool {
|
||||
return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying
|
||||
}
|
||||
|
||||
func (s *PollingSignalErrorImpl) IsSuccessful() bool {
|
||||
return s.successful
|
||||
}
|
||||
|
||||
func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool {
|
||||
return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter
|
||||
}
|
||||
|
16
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
16
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
@ -4,31 +4,17 @@ package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type BeEmptyMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
// short-circuit the iterator case, as we only need to see the first
|
||||
// element, if any.
|
||||
if miter.IsIter(actual) {
|
||||
var length int
|
||||
if miter.IsSeq2(actual) {
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool { length++; return false })
|
||||
} else {
|
||||
miter.IterateV(actual, func(v reflect.Value) bool { length++; return false })
|
||||
}
|
||||
return length == 0, nil
|
||||
}
|
||||
|
||||
length, ok := lengthOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
|
||||
return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == 0, nil
|
||||
|
32
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
32
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
|
||||
)
|
||||
|
||||
@ -18,8 +17,8 @@ type ConsistOfMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
|
||||
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
matchers := matchers(matcher.Elements)
|
||||
@ -61,21 +60,10 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
|
||||
}
|
||||
|
||||
func flatten(elems []interface{}) []interface{} {
|
||||
if len(elems) != 1 ||
|
||||
!(isArrayOrSlice(elems[0]) ||
|
||||
(miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) {
|
||||
if len(elems) != 1 || !isArrayOrSlice(elems[0]) {
|
||||
return elems
|
||||
}
|
||||
|
||||
if miter.IsIter(elems[0]) {
|
||||
flattened := []any{}
|
||||
miter.IterateV(elems[0], func(v reflect.Value) bool {
|
||||
flattened = append(flattened, v.Interface())
|
||||
return true
|
||||
})
|
||||
return flattened
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(elems[0])
|
||||
flattened := make([]interface{}, value.Len())
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
@ -128,19 +116,7 @@ func presentable(elems []interface{}) interface{} {
|
||||
func valuesOf(actual interface{}) []interface{} {
|
||||
value := reflect.ValueOf(actual)
|
||||
values := []interface{}{}
|
||||
if miter.IsIter(actual) {
|
||||
if miter.IsSeq2(actual) {
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool {
|
||||
values = append(values, v.Interface())
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
miter.IterateV(actual, func(v reflect.Value) bool {
|
||||
values = append(values, v.Interface())
|
||||
return true
|
||||
})
|
||||
}
|
||||
} else if isMap(actual) {
|
||||
if isMap(actual) {
|
||||
keys := value.MapKeys()
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
values = append(values, value.MapIndex(keys[i]).Interface())
|
||||
|
239
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
239
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type ContainElementMatcher struct {
|
||||
@ -17,18 +16,16 @@ type ContainElementMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
|
||||
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1))
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
var actualT reflect.Type
|
||||
var result reflect.Value
|
||||
switch numResultArgs := len(matcher.Result); {
|
||||
case numResultArgs > 1:
|
||||
switch l := len(matcher.Result); {
|
||||
case l > 1:
|
||||
return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
|
||||
case numResultArgs == 1:
|
||||
// Check the optional result arg to point to a single value/array/slice/map
|
||||
// of a type compatible with the actual value.
|
||||
case l == 1:
|
||||
if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
|
||||
return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
|
||||
format.Object(matcher.Result[0], 1))
|
||||
@ -37,209 +34,93 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
|
||||
resultReference := matcher.Result[0]
|
||||
result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
|
||||
switch result.Kind() {
|
||||
case reflect.Array: // result arrays are not supported, as they cannot be dynamically sized.
|
||||
if miter.IsIter(actual) {
|
||||
_, actualvT := miter.IterKVTypes(actual)
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
reflect.SliceOf(actualvT), result.Type().String())
|
||||
}
|
||||
case reflect.Array:
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
|
||||
|
||||
case reflect.Slice: // result slice
|
||||
// can we assign elements in actual to elements in what the result
|
||||
// arg points to?
|
||||
// - ✔ actual is an array or slice
|
||||
// - ✔ actual is an iter.Seq producing "v" elements
|
||||
// - ✔ actual is an iter.Seq2 producing "v" elements, ignoring
|
||||
// the "k" elements.
|
||||
switch {
|
||||
case isArrayOrSlice(actual):
|
||||
if !actualT.Elem().AssignableTo(result.Type().Elem()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.String(), result.Type().String())
|
||||
}
|
||||
|
||||
case miter.IsIter(actual):
|
||||
_, actualvT := miter.IterKVTypes(actual)
|
||||
if !actualvT.AssignableTo(result.Type().Elem()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualvT.String(), result.Type().String())
|
||||
}
|
||||
|
||||
default: // incompatible result reference
|
||||
case reflect.Slice:
|
||||
if !isArrayOrSlice(actual) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
|
||||
}
|
||||
|
||||
case reflect.Map: // result map
|
||||
// can we assign elements in actual to elements in what the result
|
||||
// arg points to?
|
||||
// - ✔ actual is a map
|
||||
// - ✔ actual is an iter.Seq2 (iter.Seq doesn't fit though)
|
||||
switch {
|
||||
case isMap(actual):
|
||||
if !actualT.AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.String(), result.Type().String())
|
||||
}
|
||||
|
||||
case miter.IsIter(actual):
|
||||
actualkT, actualvT := miter.IterKVTypes(actual)
|
||||
if actualkT == nil {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
reflect.SliceOf(actualvT).String(), result.Type().String())
|
||||
}
|
||||
if !reflect.MapOf(actualkT, actualvT).AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
reflect.MapOf(actualkT, actualvT), result.Type().String())
|
||||
}
|
||||
|
||||
default: // incompatible result reference
|
||||
if !actualT.Elem().AssignableTo(result.Type().Elem()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.String(), result.Type().String())
|
||||
}
|
||||
case reflect.Map:
|
||||
if !isMap(actual) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.String(), result.Type().String())
|
||||
}
|
||||
if !actualT.AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.String(), result.Type().String())
|
||||
}
|
||||
|
||||
default:
|
||||
// can we assign a (single) element in actual to what the result arg
|
||||
// points to?
|
||||
switch {
|
||||
case miter.IsIter(actual):
|
||||
_, actualvT := miter.IterKVTypes(actual)
|
||||
if !actualvT.AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualvT.String(), result.Type().String())
|
||||
}
|
||||
default:
|
||||
if !actualT.Elem().AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.Elem().String(), result.Type().String())
|
||||
}
|
||||
if !actualT.Elem().AssignableTo(result.Type()) {
|
||||
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
|
||||
actualT.Elem().String(), result.Type().String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the supplied matcher isn't an Omega matcher, default to the Equal
|
||||
// matcher.
|
||||
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
|
||||
if !elementIsMatcher {
|
||||
elemMatcher = &EqualMatcher{Expected: matcher.Element}
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(actual)
|
||||
var valueAt func(int) interface{}
|
||||
|
||||
var getFindings func() reflect.Value // abstracts how the findings are collected and stored
|
||||
var lastError error
|
||||
var getFindings func() reflect.Value
|
||||
var foundAt func(int)
|
||||
|
||||
if !miter.IsIter(actual) {
|
||||
var valueAt func(int) interface{}
|
||||
var foundAt func(int)
|
||||
// We're dealing with an array/slice/map, so in all cases we can iterate
|
||||
// over the elements in actual using indices (that can be considered
|
||||
// keys in case of maps).
|
||||
if isMap(actual) {
|
||||
keys := value.MapKeys()
|
||||
valueAt = func(i int) interface{} {
|
||||
return value.MapIndex(keys[i]).Interface()
|
||||
}
|
||||
if result.Kind() != reflect.Invalid {
|
||||
fm := reflect.MakeMap(actualT)
|
||||
getFindings = func() reflect.Value { return fm }
|
||||
foundAt = func(i int) {
|
||||
fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
valueAt = func(i int) interface{} {
|
||||
return value.Index(i).Interface()
|
||||
}
|
||||
if result.Kind() != reflect.Invalid {
|
||||
var fsl reflect.Value
|
||||
if result.Kind() == reflect.Slice {
|
||||
fsl = reflect.MakeSlice(result.Type(), 0, 0)
|
||||
} else {
|
||||
fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
|
||||
}
|
||||
getFindings = func() reflect.Value { return fsl }
|
||||
foundAt = func(i int) {
|
||||
fsl = reflect.Append(fsl, value.Index(i))
|
||||
}
|
||||
}
|
||||
if isMap(actual) {
|
||||
keys := value.MapKeys()
|
||||
valueAt = func(i int) interface{} {
|
||||
return value.MapIndex(keys[i]).Interface()
|
||||
}
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
elem := valueAt(i)
|
||||
success, err := elemMatcher.Match(elem)
|
||||
if err != nil {
|
||||
lastError = err
|
||||
continue
|
||||
if result.Kind() != reflect.Invalid {
|
||||
fm := reflect.MakeMap(actualT)
|
||||
getFindings = func() reflect.Value {
|
||||
return fm
|
||||
}
|
||||
if success {
|
||||
if result.Kind() == reflect.Invalid {
|
||||
return true, nil
|
||||
}
|
||||
foundAt(i)
|
||||
foundAt = func(i int) {
|
||||
fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We're dealing with an iterator as a first-class construct, so things
|
||||
// are slightly different: there is no index defined as in case of
|
||||
// arrays/slices/maps, just "ooooorder"
|
||||
var found func(k, v reflect.Value)
|
||||
valueAt = func(i int) interface{} {
|
||||
return value.Index(i).Interface()
|
||||
}
|
||||
if result.Kind() != reflect.Invalid {
|
||||
if result.Kind() == reflect.Map {
|
||||
fm := reflect.MakeMap(result.Type())
|
||||
getFindings = func() reflect.Value { return fm }
|
||||
found = func(k, v reflect.Value) { fm.SetMapIndex(k, v) }
|
||||
var f reflect.Value
|
||||
if result.Kind() == reflect.Slice {
|
||||
f = reflect.MakeSlice(result.Type(), 0, 0)
|
||||
} else {
|
||||
var fsl reflect.Value
|
||||
if result.Kind() == reflect.Slice {
|
||||
fsl = reflect.MakeSlice(result.Type(), 0, 0)
|
||||
} else {
|
||||
fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
|
||||
}
|
||||
getFindings = func() reflect.Value { return fsl }
|
||||
found = func(_, v reflect.Value) { fsl = reflect.Append(fsl, v) }
|
||||
f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
|
||||
}
|
||||
getFindings = func() reflect.Value {
|
||||
return f
|
||||
}
|
||||
foundAt = func(i int) {
|
||||
f = reflect.Append(f, value.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success := false
|
||||
actualkT, _ := miter.IterKVTypes(actual)
|
||||
if actualkT == nil {
|
||||
miter.IterateV(actual, func(v reflect.Value) bool {
|
||||
var err error
|
||||
success, err = elemMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
lastError = err
|
||||
return true // iterate on...
|
||||
}
|
||||
if success {
|
||||
if result.Kind() == reflect.Invalid {
|
||||
return false // a match and no result needed, so we're done
|
||||
}
|
||||
found(reflect.Value{}, v)
|
||||
}
|
||||
return true // iterate on...
|
||||
})
|
||||
} else {
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool {
|
||||
var err error
|
||||
success, err = elemMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
lastError = err
|
||||
return true // iterate on...
|
||||
}
|
||||
if success {
|
||||
if result.Kind() == reflect.Invalid {
|
||||
return false // a match and no result needed, so we're done
|
||||
}
|
||||
found(k, v)
|
||||
}
|
||||
return true // iterate on...
|
||||
})
|
||||
var lastError error
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
elem := valueAt(i)
|
||||
success, err := elemMatcher.Match(elem)
|
||||
if err != nil {
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
if success && result.Kind() == reflect.Invalid {
|
||||
return true, nil
|
||||
if success {
|
||||
if result.Kind() == reflect.Invalid {
|
||||
return true, nil
|
||||
}
|
||||
foundAt(i)
|
||||
}
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
generated
vendored
5
vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
|
||||
)
|
||||
|
||||
@ -14,8 +13,8 @@ type ContainElementsMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
|
||||
return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
matchers := matchers(matcher.Elements)
|
||||
|
40
vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
generated
vendored
40
vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type HaveEachMatcher struct {
|
||||
@ -13,8 +12,8 @@ type HaveEachMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
|
||||
return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s",
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
|
||||
format.Object(actual, 1))
|
||||
}
|
||||
|
||||
@ -23,38 +22,6 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
|
||||
elemMatcher = &EqualMatcher{Expected: matcher.Element}
|
||||
}
|
||||
|
||||
if miter.IsIter(actual) {
|
||||
// rejecting the non-elements case works different for iterators as we
|
||||
// don't want to fetch all elements into a slice first.
|
||||
count := 0
|
||||
var success bool
|
||||
var err error
|
||||
if miter.IsSeq2(actual) {
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool {
|
||||
count++
|
||||
success, err = elemMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return success
|
||||
})
|
||||
} else {
|
||||
miter.IterateV(actual, func(v reflect.Value) bool {
|
||||
count++
|
||||
success, err = elemMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return success
|
||||
})
|
||||
}
|
||||
if count == 0 {
|
||||
return false, fmt.Errorf("HaveEach matcher expects a non-empty iter.Seq/iter.Seq2. Got:\n%s",
|
||||
format.Object(actual, 1))
|
||||
}
|
||||
return success, err
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(actual)
|
||||
if value.Len() == 0 {
|
||||
return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
|
||||
@ -73,8 +40,7 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
|
||||
}
|
||||
}
|
||||
|
||||
// if we never failed then we succeed; the empty/nil cases have already been
|
||||
// rejected above.
|
||||
// if there are no elements, then HaveEach will match.
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
success, err := elemMatcher.Match(valueAt(i))
|
||||
if err != nil {
|
||||
|
58
vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
generated
vendored
58
vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
generated
vendored
@ -2,10 +2,8 @@ package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type mismatchFailure struct {
|
||||
@ -23,68 +21,24 @@ type HaveExactElementsMatcher struct {
|
||||
func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
matcher.resetState()
|
||||
|
||||
if isMap(actual) || miter.IsSeq2(actual) {
|
||||
return false, fmt.Errorf("HaveExactElements matcher doesn't work on map or iter.Seq2. Got:\n%s", format.Object(actual, 1))
|
||||
if isMap(actual) {
|
||||
return false, fmt.Errorf("error")
|
||||
}
|
||||
|
||||
matchers := matchers(matcher.Elements)
|
||||
lenMatchers := len(matchers)
|
||||
|
||||
success = true
|
||||
|
||||
if miter.IsIter(actual) {
|
||||
// In the worst case, we need to see everything before we can give our
|
||||
// verdict. The only exception is fast fail.
|
||||
i := 0
|
||||
miter.IterateV(actual, func(v reflect.Value) bool {
|
||||
if i >= lenMatchers {
|
||||
// the iterator produces more values than we got matchers: this
|
||||
// is not good.
|
||||
matcher.extraIndex = i
|
||||
success = false
|
||||
return false
|
||||
}
|
||||
|
||||
elemMatcher := matchers[i].(omegaMatcher)
|
||||
match, err := elemMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
|
||||
index: i,
|
||||
failure: err.Error(),
|
||||
})
|
||||
success = false
|
||||
} else if !match {
|
||||
matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
|
||||
index: i,
|
||||
failure: elemMatcher.FailureMessage(v.Interface()),
|
||||
})
|
||||
success = false
|
||||
}
|
||||
i++
|
||||
return true
|
||||
})
|
||||
if i < len(matchers) {
|
||||
// the iterator produced less values than we got matchers: this is
|
||||
// no good, no no no.
|
||||
matcher.missingIndex = i
|
||||
success = false
|
||||
}
|
||||
return success, nil
|
||||
}
|
||||
|
||||
values := valuesOf(actual)
|
||||
|
||||
lenMatchers := len(matchers)
|
||||
lenValues := len(values)
|
||||
|
||||
for i := 0; i < lenMatchers || i < lenValues; i++ {
|
||||
if i >= lenMatchers {
|
||||
matcher.extraIndex = i
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= lenValues {
|
||||
matcher.missingIndex = i
|
||||
success = false
|
||||
return
|
||||
}
|
||||
|
||||
@ -95,17 +49,15 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool
|
||||
index: i,
|
||||
failure: err.Error(),
|
||||
})
|
||||
success = false
|
||||
} else if !match {
|
||||
matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
|
||||
index: i,
|
||||
failure: elemMatcher.FailureMessage(values[i]),
|
||||
})
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
return success, nil
|
||||
return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
|
36
vendor/github.com/onsi/gomega/matchers/have_field.go
generated
vendored
36
vendor/github.com/onsi/gomega/matchers/have_field.go
generated
vendored
@ -17,7 +17,7 @@ func (e missingFieldError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func extractField(actual interface{}, field string, matchername string) (any, error) {
|
||||
func extractField(actual interface{}, field string, matchername string) (interface{}, error) {
|
||||
fields := strings.SplitN(field, ".", 2)
|
||||
actualValue := reflect.ValueOf(actual)
|
||||
|
||||
@ -64,46 +64,36 @@ func extractField(actual interface{}, field string, matchername string) (any, er
|
||||
type HaveFieldMatcher struct {
|
||||
Field string
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
|
||||
var isMatcher bool
|
||||
expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher)
|
||||
if !isMatcher {
|
||||
expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
|
||||
}
|
||||
return expectedMatcher
|
||||
extractedField interface{}
|
||||
expectedMatcher omegaMatcher
|
||||
}
|
||||
|
||||
func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
extractedField, err := extractField(actual, matcher.Field, "HaveField")
|
||||
matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return matcher.expectedMatcher().Match(extractedField)
|
||||
var isMatcher bool
|
||||
matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
|
||||
if !isMatcher {
|
||||
matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
|
||||
}
|
||||
|
||||
return matcher.expectedMatcher.Match(matcher.extractedField)
|
||||
}
|
||||
|
||||
func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
extractedField, err := extractField(actual, matcher.Field, "HaveField")
|
||||
if err != nil {
|
||||
// this really shouldn't happen
|
||||
return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err)
|
||||
}
|
||||
message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
|
||||
message += matcher.expectedMatcher().FailureMessage(extractedField)
|
||||
message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
extractedField, err := extractField(actual, matcher.Field, "HaveField")
|
||||
if err != nil {
|
||||
// this really shouldn't happen
|
||||
return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err)
|
||||
}
|
||||
message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
|
||||
message += matcher.expectedMatcher().NegatedFailureMessage(extractedField)
|
||||
message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
|
||||
|
||||
return message
|
||||
}
|
||||
|
19
vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
generated
vendored
19
vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type HaveKeyMatcher struct {
|
||||
@ -15,8 +14,8 @@ type HaveKeyMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isMap(actual) && !miter.IsSeq2(actual) {
|
||||
return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
|
||||
if !isMap(actual) {
|
||||
return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
|
||||
@ -24,20 +23,6 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
|
||||
keyMatcher = &EqualMatcher{Expected: matcher.Key}
|
||||
}
|
||||
|
||||
if miter.IsSeq2(actual) {
|
||||
var success bool
|
||||
var err error
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool {
|
||||
success, err = keyMatcher.Match(k.Interface())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
return false
|
||||
}
|
||||
return !success
|
||||
})
|
||||
return success, err
|
||||
}
|
||||
|
||||
keys := reflect.ValueOf(actual).MapKeys()
|
||||
for i := 0; i < len(keys); i++ {
|
||||
success, err := keyMatcher.Match(keys[i].Interface())
|
||||
|
26
vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
generated
vendored
26
vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type HaveKeyWithValueMatcher struct {
|
||||
@ -16,8 +15,8 @@ type HaveKeyWithValueMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isMap(actual) && !miter.IsSeq2(actual) {
|
||||
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
|
||||
if !isMap(actual) {
|
||||
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
|
||||
@ -30,27 +29,6 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
|
||||
valueMatcher = &EqualMatcher{Expected: matcher.Value}
|
||||
}
|
||||
|
||||
if miter.IsSeq2(actual) {
|
||||
var success bool
|
||||
var err error
|
||||
miter.IterateKV(actual, func(k, v reflect.Value) bool {
|
||||
success, err = keyMatcher.Match(k.Interface())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
return false
|
||||
}
|
||||
if success {
|
||||
success, err = valueMatcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return !success
|
||||
})
|
||||
return success, err
|
||||
}
|
||||
|
||||
keys := reflect.ValueOf(actual).MapKeys()
|
||||
for i := 0; i < len(keys); i++ {
|
||||
success, err := keyMatcher.Match(keys[i].Interface())
|
||||
|
2
vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
generated
vendored
@ -13,7 +13,7 @@ type HaveLenMatcher struct {
|
||||
func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
length, ok := lengthOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
|
||||
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == matcher.Count, nil
|
||||
|
128
vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go
generated
vendored
128
vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
//go:build go1.23
|
||||
|
||||
package miter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// HasIterators always returns false for Go versions before 1.23.
|
||||
func HasIterators() bool { return true }
|
||||
|
||||
// IsIter returns true if the specified value is a function type that can be
|
||||
// range-d over, otherwise false.
|
||||
//
|
||||
// We don't use reflect's CanSeq and CanSeq2 directly, as these would return
|
||||
// true also for other value types that are range-able, such as integers,
|
||||
// slices, et cetera. Here, we aim only at range-able (iterator) functions.
|
||||
func IsIter(it any) bool {
|
||||
if it == nil { // on purpose we only test for untyped nil.
|
||||
return false
|
||||
}
|
||||
// reject all non-iterator-func values, even if they're range-able.
|
||||
t := reflect.TypeOf(it)
|
||||
if t.Kind() != reflect.Func {
|
||||
return false
|
||||
}
|
||||
return t.CanSeq() || t.CanSeq2()
|
||||
}
|
||||
|
||||
// IterKVTypes returns the reflection types of an iterator's yield function's K
|
||||
// and optional V arguments, otherwise nil K and V reflection types.
|
||||
func IterKVTypes(it any) (k, v reflect.Type) {
|
||||
if it == nil {
|
||||
return
|
||||
}
|
||||
// reject all non-iterator-func values, even if they're range-able.
|
||||
t := reflect.TypeOf(it)
|
||||
if t.Kind() != reflect.Func {
|
||||
return
|
||||
}
|
||||
// get the reflection types for V, and where applicable, K.
|
||||
switch {
|
||||
case t.CanSeq():
|
||||
v = t. /*iterator fn*/ In(0). /*yield fn*/ In(0)
|
||||
case t.CanSeq2():
|
||||
yieldfn := t. /*iterator fn*/ In(0)
|
||||
k = yieldfn.In(0)
|
||||
v = yieldfn.In(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IsSeq2 returns true if the passed iterator function is compatible with
|
||||
// iter.Seq2, otherwise false.
|
||||
//
|
||||
// IsSeq2 hides the Go 1.23+ specific reflect.Type.CanSeq2 behind a facade which
|
||||
// is empty for Go versions before 1.23.
|
||||
func IsSeq2(it any) bool {
|
||||
if it == nil {
|
||||
return false
|
||||
}
|
||||
t := reflect.TypeOf(it)
|
||||
return t.Kind() == reflect.Func && t.CanSeq2()
|
||||
}
|
||||
|
||||
// isNilly returns true if v is either an untyped nil, or is a nil function (not
|
||||
// necessarily an iterator function).
|
||||
func isNilly(v any) bool {
|
||||
if v == nil {
|
||||
return true
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
return rv.Kind() == reflect.Func && rv.IsNil()
|
||||
}
|
||||
|
||||
// IterateV loops over the elements produced by an iterator function, passing
|
||||
// the elements to the specified yield function individually and stopping only
|
||||
// when either the iterator function runs out of elements or the yield function
|
||||
// tell us to stop it.
|
||||
//
|
||||
// IterateV works very much like reflect.Value.Seq but hides the Go 1.23+
|
||||
// specific parts behind a facade which is empty for Go versions before 1.23, in
|
||||
// order to simplify code maintenance for matchers when using older Go versions.
|
||||
func IterateV(it any, yield func(v reflect.Value) bool) {
|
||||
if isNilly(it) {
|
||||
return
|
||||
}
|
||||
// reject all non-iterator-func values, even if they're range-able.
|
||||
t := reflect.TypeOf(it)
|
||||
if t.Kind() != reflect.Func || !t.CanSeq() {
|
||||
return
|
||||
}
|
||||
// Call the specified iterator function, handing it our adaptor to call the
|
||||
// specified generic reflection yield function.
|
||||
reflectedYield := reflect.MakeFunc(
|
||||
t. /*iterator fn*/ In(0),
|
||||
func(args []reflect.Value) []reflect.Value {
|
||||
return []reflect.Value{reflect.ValueOf(yield(args[0]))}
|
||||
})
|
||||
reflect.ValueOf(it).Call([]reflect.Value{reflectedYield})
|
||||
}
|
||||
|
||||
// IterateKV loops over the key-value elements produced by an iterator function,
|
||||
// passing the elements to the specified yield function individually and
|
||||
// stopping only when either the iterator function runs out of elements or the
|
||||
// yield function tell us to stop it.
|
||||
//
|
||||
// IterateKV works very much like reflect.Value.Seq2 but hides the Go 1.23+
|
||||
// specific parts behind a facade which is empty for Go versions before 1.23, in
|
||||
// order to simplify code maintenance for matchers when using older Go versions.
|
||||
func IterateKV(it any, yield func(k, v reflect.Value) bool) {
|
||||
if isNilly(it) {
|
||||
return
|
||||
}
|
||||
// reject all non-iterator-func values, even if they're range-able.
|
||||
t := reflect.TypeOf(it)
|
||||
if t.Kind() != reflect.Func || !t.CanSeq2() {
|
||||
return
|
||||
}
|
||||
// Call the specified iterator function, handing it our adaptor to call the
|
||||
// specified generic reflection yield function.
|
||||
reflectedYield := reflect.MakeFunc(
|
||||
t. /*iterator fn*/ In(0),
|
||||
func(args []reflect.Value) []reflect.Value {
|
||||
return []reflect.Value{reflect.ValueOf(yield(args[0], args[1]))}
|
||||
})
|
||||
reflect.ValueOf(it).Call([]reflect.Value{reflectedYield})
|
||||
}
|
44
vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go
generated
vendored
44
vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
//go:build !go1.23
|
||||
|
||||
/*
|
||||
Gomega matchers
|
||||
|
||||
This package implements the Gomega matchers and does not typically need to be imported.
|
||||
See the docs for Gomega for documentation on the matchers
|
||||
|
||||
http://onsi.github.io/gomega/
|
||||
*/
|
||||
|
||||
package miter
|
||||
|
||||
import "reflect"
|
||||
|
||||
// HasIterators always returns false for Go versions before 1.23.
|
||||
func HasIterators() bool { return false }
|
||||
|
||||
// IsIter always returns false for Go versions before 1.23 as there is no
|
||||
// iterator (function) pattern defined yet; see also:
|
||||
// https://tip.golang.org/blog/range-functions.
|
||||
func IsIter(i any) bool { return false }
|
||||
|
||||
// IsSeq2 always returns false for Go versions before 1.23 as there is no
|
||||
// iterator (function) pattern defined yet; see also:
|
||||
// https://tip.golang.org/blog/range-functions.
|
||||
func IsSeq2(it any) bool { return false }
|
||||
|
||||
// IterKVTypes always returns nil reflection types for Go versions before 1.23
|
||||
// as there is no iterator (function) pattern defined yet; see also:
|
||||
// https://tip.golang.org/blog/range-functions.
|
||||
func IterKVTypes(i any) (k, v reflect.Type) {
|
||||
return
|
||||
}
|
||||
|
||||
// IterateV never loops over what has been passed to it as an iterator for Go
|
||||
// versions before 1.23 as there is no iterator (function) pattern defined yet;
|
||||
// see also: https://tip.golang.org/blog/range-functions.
|
||||
func IterateV(it any, yield func(v reflect.Value) bool) {}
|
||||
|
||||
// IterateKV never loops over what has been passed to it as an iterator for Go
|
||||
// versions before 1.23 as there is no iterator (function) pattern defined yet;
|
||||
// see also: https://tip.golang.org/blog/range-functions.
|
||||
func IterateKV(it any, yield func(k, v reflect.Value) bool) {}
|
@ -1,8 +1,6 @@
|
||||
package bipartitegraph
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
. "github.com/onsi/gomega/matchers/support/goraph/edge"
|
||||
. "github.com/onsi/gomega/matchers/support/goraph/node"
|
||||
"github.com/onsi/gomega/matchers/support/goraph/util"
|
||||
@ -159,11 +157,6 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
|
||||
if len(currentLayer) == 0 {
|
||||
return []NodeOrderedSet{}
|
||||
}
|
||||
if done { // if last layer - into last layer must be only 'free' nodes
|
||||
currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool {
|
||||
return !matching.Free(in)
|
||||
})
|
||||
}
|
||||
guideLayers = append(guideLayers, currentLayer)
|
||||
}
|
||||
|
||||
|
13
vendor/github.com/onsi/gomega/matchers/type_support.go
generated
vendored
13
vendor/github.com/onsi/gomega/matchers/type_support.go
generated
vendored
@ -15,8 +15,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/matchers/internal/miter"
|
||||
)
|
||||
|
||||
type omegaMatcher interface {
|
||||
@ -154,17 +152,6 @@ func lengthOf(a interface{}) (int, bool) {
|
||||
switch reflect.TypeOf(a).Kind() {
|
||||
case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice:
|
||||
return reflect.ValueOf(a).Len(), true
|
||||
case reflect.Func:
|
||||
if !miter.IsIter(a) {
|
||||
return 0, false
|
||||
}
|
||||
var l int
|
||||
if miter.IsSeq2(a) {
|
||||
miter.IterateKV(a, func(k, v reflect.Value) bool { l++; return true })
|
||||
} else {
|
||||
miter.IterateV(a, func(v reflect.Value) bool { l++; return true })
|
||||
}
|
||||
return l, true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
2
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
2
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
@ -29,8 +29,6 @@ type Gomega interface {
|
||||
SetDefaultEventuallyPollingInterval(time.Duration)
|
||||
SetDefaultConsistentlyDuration(time.Duration)
|
||||
SetDefaultConsistentlyPollingInterval(time.Duration)
|
||||
EnforceDefaultTimeoutsWhenUsingContexts()
|
||||
DisableDefaultTimeoutsWhenUsingContext()
|
||||
}
|
||||
|
||||
// All Gomega matchers must implement the GomegaMatcher interface
|
||||
|
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- go test -short -coverprofile=coverage.txt -covermode=count ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
133
vendor/github.com/x448/float16/README.md
generated
vendored
133
vendor/github.com/x448/float16/README.md
generated
vendored
@ -1,133 +0,0 @@
|
||||
# Float16 (Binary16) in Go/Golang
|
||||
[](https://travis-ci.org/x448/float16)
|
||||
[](https://codecov.io/gh/x448/float16)
|
||||
[](https://goreportcard.com/report/github.com/x448/float16)
|
||||
[](https://github.com/x448/float16/releases)
|
||||
[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
|
||||
|
||||
`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
|
||||
|
||||
IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
|
||||
|
||||
All possible 4+ billion floating-point conversions with this library are verified to be correct.
|
||||
|
||||
Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
|
||||
|
||||
## Features
|
||||
Current features include:
|
||||
|
||||
* float16 to float32 conversions use lossless conversion.
|
||||
* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
|
||||
* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
|
||||
* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
|
||||
* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
|
||||
* all functions in this library use zero allocs except String().
|
||||
|
||||
## Status
|
||||
This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
|
||||
|
||||
Current status:
|
||||
|
||||
* core API is done and breaking API changes are unlikely.
|
||||
* 100% of unit tests pass:
|
||||
* short mode (`go test -short`) tests around 65765 conversions in 0.005s.
|
||||
* normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
|
||||
* 100% code coverage with both short mode and normal mode.
|
||||
* tested on amd64 but it should work on all little-endian platforms supported by Go.
|
||||
|
||||
Roadmap:
|
||||
|
||||
* add functions for fast batch conversions leveraging SIMD when supported by hardware.
|
||||
* speed up unit test when verifying all possible 4+ billion conversions.
|
||||
* test on additional platforms.
|
||||
|
||||
## Float16 to Float32 Conversion
|
||||
Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
|
||||
|
||||
## Float32 to Float16 Conversion
|
||||
Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
|
||||
|
||||
Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
|
||||
|
||||
## Usage
|
||||
Install with `go get github.com/x448/float16`.
|
||||
```
|
||||
// Convert float32 to float16
|
||||
pi := float32(math.Pi)
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
|
||||
// Convert float16 to float32
|
||||
pi32 := pi16.Float32()
|
||||
|
||||
// PrecisionFromfloat32() is faster than the overhead of calling a function.
|
||||
// This example only converts if there's no data loss and input is not a subnormal.
|
||||
if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
}
|
||||
```
|
||||
|
||||
## Float16 Type and API
|
||||
Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
|
||||
```
|
||||
package float16 // import "github.com/x448/float16"
|
||||
|
||||
// Exported types and consts
|
||||
type Float16 uint16
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
// Exported functions
|
||||
Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
|
||||
with identical results to AMD and Intel F16C hardware. NaN inputs
|
||||
are converted with quiet bit always set on, to be like F16C.
|
||||
|
||||
FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
|
||||
// The "ps" suffix means "preserve signaling".
|
||||
// Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
|
||||
|
||||
Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
|
||||
NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
|
||||
Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
|
||||
|
||||
PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
|
||||
// (inline and < 1 ns/op)
|
||||
// Exported methods
|
||||
(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
|
||||
(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
|
||||
(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
|
||||
(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
|
||||
(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
|
||||
(f Float16) IsFinite() bool // true if f is not infinite or NaN
|
||||
(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
|
||||
(f Float16) Signbit() bool // true if f is negative or negative zero
|
||||
(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
|
||||
```
|
||||
See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
|
||||
|
||||
## Benchmarks
|
||||
Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
|
||||
|
||||
```
|
||||
All functions have zero allocations except float16.String().
|
||||
|
||||
FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
|
||||
ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
|
||||
Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
|
||||
|
||||
PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
|
||||
* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
|
||||
|
||||
## Special Thanks
|
||||
Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
|
||||
|
||||
## License
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Licensed under [MIT License](LICENSE)
|
302
vendor/github.com/x448/float16/float16.go
generated
vendored
302
vendor/github.com/x448/float16/float16.go
generated
vendored
@ -1,302 +0,0 @@
|
||||
// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
//
|
||||
// Special thanks to Kathryn Long for her Rust implementation
|
||||
// of float16 at github.com/starkat99/half-rs (MIT license)
|
||||
|
||||
package float16
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
|
||||
type Float16 uint16
|
||||
|
||||
// Precision indicates whether the conversion to Float16 is
|
||||
// exact, subnormal without dropped bits, inexact, underflow, or overflow.
|
||||
type Precision int
|
||||
|
||||
const (
|
||||
|
||||
// PrecisionExact is for non-subnormals that don't drop bits during conversion.
|
||||
// All of these can round-trip. Should always convert to float16.
|
||||
PrecisionExact Precision = iota
|
||||
|
||||
// PrecisionUnknown is for subnormals that don't drop bits during conversion but
|
||||
// not all of these can round-trip so precision is unknown without more effort.
|
||||
// Only 2046 of these can round-trip and the rest cannot round-trip.
|
||||
PrecisionUnknown
|
||||
|
||||
// PrecisionInexact is for dropped significand bits and cannot round-trip.
|
||||
// Some of these are subnormals. Cannot round-trip float32->float16->float32.
|
||||
PrecisionInexact
|
||||
|
||||
// PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionUnderflow
|
||||
|
||||
// PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionOverflow
|
||||
)
|
||||
|
||||
// PrecisionFromfloat32 returns Precision without performing
|
||||
// the conversion. Conversions from both Infinity and NaN
|
||||
// values will always report PrecisionExact even if NaN payload
|
||||
// or NaN-Quiet-Bit is lost. This function is kept simple to
|
||||
// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
|
||||
func PrecisionFromfloat32(f32 float32) Precision {
|
||||
u32 := math.Float32bits(f32)
|
||||
|
||||
if u32 == 0 || u32 == 0x80000000 {
|
||||
// +- zero will always be exact conversion
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
const COEFMASK uint32 = 0x7fffff // 23 least significant bits
|
||||
const EXPSHIFT uint32 = 23
|
||||
const EXPBIAS uint32 = 127
|
||||
const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
|
||||
const DROPMASK uint32 = COEFMASK >> 10
|
||||
|
||||
exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
|
||||
coef := u32 & COEFMASK
|
||||
|
||||
if exp == 128 {
|
||||
// +- infinity or NaN
|
||||
// apps may want to do extra checks for NaN separately
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
|
||||
// "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
|
||||
if exp < -24 {
|
||||
return PrecisionUnderflow
|
||||
}
|
||||
if exp > 15 {
|
||||
return PrecisionOverflow
|
||||
}
|
||||
if (coef & DROPMASK) != uint32(0) {
|
||||
// these include subnormals and non-subnormals that dropped bits
|
||||
return PrecisionInexact
|
||||
}
|
||||
|
||||
if exp < -14 {
|
||||
// Subnormals. Caller may want to test these further.
|
||||
// There are 2046 subnormals that can successfully round-trip f32->f16->f32
|
||||
// and 20 of those 2046 have 32-bit input coef == 0.
|
||||
// RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
|
||||
// so some protocols and libraries will choose to handle subnormals differently
|
||||
// when deciding to encode them to CBOR float32 vs float16.
|
||||
return PrecisionUnknown
|
||||
}
|
||||
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// Frombits returns the float16 number corresponding to the IEEE 754 binary16
|
||||
// representation u16, with the sign bit of u16 and the result in the same bit
|
||||
// position. Frombits(Bits(x)) == x.
|
||||
func Frombits(u16 uint16) Float16 {
|
||||
return Float16(u16)
|
||||
}
|
||||
|
||||
// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
|
||||
// IEEE default rounding (nearest int, with ties to even).
|
||||
func Fromfloat32(f32 float32) Float16 {
|
||||
return Float16(f32bitsToF16bits(math.Float32bits(f32)))
|
||||
}
|
||||
|
||||
// ErrInvalidNaNValue indicates a NaN was not received.
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
type float16Error string
|
||||
|
||||
func (e float16Error) Error() string { return string(e) }
|
||||
|
||||
// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
|
||||
// signaling and payload. Unlike Fromfloat32(), which can only return
|
||||
// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
|
||||
// If the result is infinity (sNaN with empty payload), then the
|
||||
// lowest bit of payload is set to make the result a NaN.
|
||||
// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
|
||||
// This function was kept simple to be able to inline.
|
||||
func FromNaN32ps(nan float32) (Float16, error) {
|
||||
const SNAN = Float16(uint16(0x7c01)) // signalling NaN
|
||||
|
||||
u32 := math.Float32bits(nan)
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if (exp != 0x7f800000) || (coef == 0) {
|
||||
return SNAN, ErrInvalidNaNValue
|
||||
}
|
||||
|
||||
u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
|
||||
|
||||
if (u16 & 0x03ff) == 0 {
|
||||
// result became infinity, make it NaN by setting lowest bit in payload
|
||||
u16 = u16 | 0x0001
|
||||
}
|
||||
|
||||
return Float16(u16), nil
|
||||
}
|
||||
|
||||
// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
|
||||
// Returned NaN value 0x7e01 has all exponent bits = 1 with the
|
||||
// first and last bits = 1 in the significand. This is consistent
|
||||
// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
|
||||
func NaN() Float16 {
|
||||
return Float16(0x7e01)
|
||||
}
|
||||
|
||||
// Inf returns a Float16 with an infinity value with the specified sign.
|
||||
// A sign >= returns positive infinity.
|
||||
// A sign < 0 returns negative infinity.
|
||||
func Inf(sign int) Float16 {
|
||||
if sign >= 0 {
|
||||
return Float16(0x7c00)
|
||||
}
|
||||
return Float16(0x8000 | 0x7c00)
|
||||
}
|
||||
|
||||
// Float32 returns a float32 converted from f (Float16).
|
||||
// This is a lossless conversion.
|
||||
func (f Float16) Float32() float32 {
|
||||
u32 := f16bitsToF32bits(uint16(f))
|
||||
return math.Float32frombits(u32)
|
||||
}
|
||||
|
||||
// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
|
||||
// of f and the result in the same bit position. Bits(Frombits(x)) == x.
|
||||
func (f Float16) Bits() uint16 {
|
||||
return uint16(f)
|
||||
}
|
||||
|
||||
// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
|
||||
func (f Float16) IsNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
|
||||
}
|
||||
|
||||
// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
|
||||
// “not-a-number” value.
|
||||
func (f Float16) IsQuietNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
|
||||
}
|
||||
|
||||
// IsInf reports whether f is an infinity (inf).
|
||||
// A sign > 0 reports whether f is positive inf.
|
||||
// A sign < 0 reports whether f is negative inf.
|
||||
// A sign == 0 reports whether f is either inf.
|
||||
func (f Float16) IsInf(sign int) bool {
|
||||
return ((f == 0x7c00) && sign >= 0) ||
|
||||
(f == 0xfc00 && sign <= 0)
|
||||
}
|
||||
|
||||
// IsFinite returns true if f is neither infinite nor NaN.
|
||||
func (f Float16) IsFinite() bool {
|
||||
return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
|
||||
}
|
||||
|
||||
// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
|
||||
func (f Float16) IsNormal() bool {
|
||||
exp := uint16(f) & uint16(0x7c00)
|
||||
return (exp != uint16(0x7c00)) && (exp != 0)
|
||||
}
|
||||
|
||||
// Signbit reports whether f is negative or negative zero.
|
||||
func (f Float16) Signbit() bool {
|
||||
return (uint16(f) & uint16(0x8000)) != 0
|
||||
}
|
||||
|
||||
// String satisfies the fmt.Stringer interface.
|
||||
func (f Float16) String() string {
|
||||
return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
|
||||
}
|
||||
|
||||
// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
|
||||
func f16bitsToF32bits(in uint16) uint32 {
|
||||
// All 65536 conversions with this were confirmed to be correct
|
||||
// by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
|
||||
sign := uint32(in&0x8000) << 16 // sign for 32-bit
|
||||
exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
|
||||
coef := uint32(in&0x03ff) << 13 // significand for 32-bit
|
||||
|
||||
if exp == 0x1f {
|
||||
if coef == 0 {
|
||||
// infinity
|
||||
return sign | 0x7f800000 | coef
|
||||
}
|
||||
// NaN
|
||||
return sign | 0x7fc00000 | coef
|
||||
}
|
||||
|
||||
if exp == 0 {
|
||||
if coef == 0 {
|
||||
// zero
|
||||
return sign
|
||||
}
|
||||
|
||||
// normalize subnormal numbers
|
||||
exp++
|
||||
for coef&0x7f800000 == 0 {
|
||||
coef <<= 1
|
||||
exp--
|
||||
}
|
||||
coef &= 0x007fffff
|
||||
}
|
||||
|
||||
return sign | ((exp + (0x7f - 0xf)) << 23) | coef
|
||||
}
|
||||
|
||||
// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
|
||||
// Conversion rounds to nearest integer with ties to even.
|
||||
func f32bitsToF16bits(u32 uint32) uint16 {
|
||||
// Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
// All 4294967296 conversions with this were confirmed to be correct by x448.
|
||||
// Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
|
||||
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if exp == 0x7f800000 {
|
||||
// NaN or Infinity
|
||||
nanBit := uint32(0)
|
||||
if coef != 0 {
|
||||
nanBit = uint32(0x0200)
|
||||
}
|
||||
return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
|
||||
}
|
||||
|
||||
halfSign := sign >> 16
|
||||
|
||||
unbiasedExp := int32(exp>>23) - 127
|
||||
halfExp := unbiasedExp + 15
|
||||
|
||||
if halfExp >= 0x1f {
|
||||
return uint16(halfSign | uint32(0x7c00))
|
||||
}
|
||||
|
||||
if halfExp <= 0 {
|
||||
if 14-halfExp > 24 {
|
||||
return uint16(halfSign)
|
||||
}
|
||||
coef := coef | uint32(0x00800000)
|
||||
halfCoef := coef >> uint32(14-halfExp)
|
||||
roundBit := uint32(1) << uint32(13-halfExp)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
halfCoef++
|
||||
}
|
||||
return uint16(halfSign | halfCoef)
|
||||
}
|
||||
|
||||
uHalfExp := uint32(halfExp) << 10
|
||||
halfCoef := coef >> 13
|
||||
roundBit := uint32(0x00001000)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
return uint16((halfSign | uHalfExp | halfCoef) + 1)
|
||||
}
|
||||
return uint16(halfSign | uHalfExp | halfCoef)
|
||||
}
|
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
4
vendor/golang.org/x/net/LICENSE
generated
vendored
4
vendor/golang.org/x/net/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
9
vendor/golang.org/x/net/html/doc.go
generated
vendored
9
vendor/golang.org/x/net/html/doc.go
generated
vendored
@ -78,11 +78,16 @@ example, to process each anchor node in depth-first order:
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
for n := range doc.Descendants() {
|
||||
var f func(*html.Node)
|
||||
f = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
// Do something with n...
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
f(c)
|
||||
}
|
||||
}
|
||||
f(doc)
|
||||
|
||||
The relevant specifications include:
|
||||
https://html.spec.whatwg.org/multipage/syntax.html and
|
||||
@ -99,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML
|
||||
parsing specification respectively. While the tokenizer parses and normalizes
|
||||
individual HTML tokens, only the parser constructs the DOM tree from the
|
||||
tokenized HTML, as described in the tree construction stage of the
|
||||
specification, dynamically modifying or extending the document's DOM tree.
|
||||
specification, dynamically modifying or extending the docuemnt's DOM tree.
|
||||
|
||||
If your use case requires semantically well-formed HTML documents, as defined by
|
||||
the WHATWG specification, the parser should be used rather than the tokenizer.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user