Merge pull request #800 from cynepco3hahue/k8s_1_13_4

Update vendor k8s dependencies to 1.13.4
This commit is contained in:
Artyom Lukianov 2019-05-16 11:35:34 +03:00 committed by GitHub
commit 83c12eaae2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1825 changed files with 281438 additions and 354399 deletions

View File

@ -2083,7 +2083,7 @@
}
},
"v1.OwnerReference": {
"description": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.",
"description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
"required": [
"apiVersion",
"kind",
@ -2151,7 +2151,7 @@
"type": "string"
},
"volumeMode": {
"description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.",
"description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
"$ref": "#/definitions/v1.PersistentVolumeMode"
},
"volumeName": {

View File

@ -32,16 +32,8 @@ func init() {
// flags
flag.StringVar(&configPath, "kubeconfig", os.Getenv("KUBECONFIG"), "(Optional) Overrides $KUBECONFIG")
flag.StringVar(&masterURL, "server", "", "(Optional) URL address of a remote api server. Do not set for local clusters.")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
// get the verbose level so it can be passed to the importer pod
defVerbose := fmt.Sprintf("%d", 1) // note flag values are strings

View File

@ -42,16 +42,8 @@ var (
func init() {
namedPipe = flag.String("pipedir", "nopipedir", "The name and directory of the named pipe to read from")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
prometheus.MustRegister(progress)
ownerUID, _ = util.ParseEnvVar(common.OwnerUID, false)

View File

@ -46,16 +46,8 @@ func init() {
// flags
flag.StringVar(&configPath, "kubeconfig", os.Getenv("KUBECONFIG"), "(Optional) Overrides $KUBECONFIG")
flag.StringVar(&masterURL, "server", "", "(Optional) URL address of a remote api server. Do not set for local clusters.")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
importerImage = getRequiredEnvVar("IMPORTER_IMAGE")
clonerImage = getRequiredEnvVar("CLONER_IMAGE")

View File

@ -33,16 +33,8 @@ import (
)
func init() {
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
}
func main() {

View File

@ -30,16 +30,8 @@ func init() {
// flags
flag.StringVar(&configPath, "kubeconfig", os.Getenv("KUBECONFIG"), "(Optional) Overrides $KUBECONFIG")
flag.StringVar(&masterURL, "server", "", "(Optional) URL address of a remote api server. Do not set for local clusters.")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
// get the verbose level so it can be passed to the importer pod
defVerbose := fmt.Sprintf("%d", 1) // note flag values are strings

View File

@ -37,17 +37,8 @@ const (
)
func init() {
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
}
func main() {

74
glide.lock generated
View File

@ -1,6 +1,8 @@
hash: 7a2f8c7580e6b8db6d097f7074939a303308b0d830fc09676996e6033297da4d
updated: 2019-05-13T17:16:56.5836088Z
hash: 017a56fcd0a1df5f134329ef8570a2728b823748621c728c13d287f70e91285e
updated: 2019-05-15T08:28:52.7344063Z
imports:
- name: github.com/appscode/jsonpatch
version: 7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2
- name: github.com/beorn7/perks
version: 3ac7bf7a47d159a033b107610db8a1b6575507a4
subpackages:
@ -19,8 +21,10 @@ imports:
- log
- name: github.com/emicklei/go-restful-openapi
version: b7062368c258c9e8f8cbe9dd2e6aebfa1b747be6
- name: github.com/evanphx/json-patch
version: 5858425f75500d40c52783dce87d085a483ce135
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
version: c7ce16629ff4cd059ed96ed06419dd3856fd3577
- name: github.com/go-ini/ini
version: 3be5ad479f69d4e08d7fe25edf79bf3346bd658e
- name: github.com/go-logr/logr
@ -28,20 +32,18 @@ imports:
- name: github.com/go-logr/zapr
version: 03f06a783fbb7dfaf3f629c7825480e43a7105e6
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
version: 8483a886a90412cd6858df4ea3483dce9c8e35a3
- name: github.com/go-openapi/spec
version: 1de3e0542de65ad8d75452a595886fdd0befb363
version: 5bae59e25b21498baea7f9d46e9c147ec106a42e
- name: github.com/go-openapi/swag
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
version: 5899d5c5e619fda5fa86e14795a835f473ca284c
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
subpackages:
- proto
- sortkeys
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/golang/groupcache
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
subpackages:
@ -84,7 +86,7 @@ imports:
- name: github.com/imdario/mergo
version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58
- name: github.com/json-iterator/go
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
version: ab8a2e0c74be9d3be70b3184d9acc634935ded82
- name: github.com/kelseyhightower/envconfig
version: f611eb38b3875cc3bd991ca91c51d06446afa14c
- name: github.com/mailru/easyjson
@ -93,8 +95,6 @@ imports:
- buffer
- jlexer
- jwriter
- name: github.com/mattbaird/jsonpatch
version: 81af80346b1a01caae0cbc27fd3c1ba5b11e189f
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
@ -113,7 +113,7 @@ imports:
- name: github.com/modern-go/concurrent
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
- name: github.com/modern-go/reflect2
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
version: 94122c33edd36123c84d5368cfb2b69df93a0ec8
- name: github.com/onsi/ginkgo
version: 3774a09d95489ccaa16032e0770d08ea77ba6184
subpackages:
@ -152,12 +152,12 @@ imports:
- matchers/support/goraph/util
- types
- name: github.com/openshift/api
version: 8741ff068a473be041d7bafb4502c12e4f10aab5
version: 3a6077f1f910bfaec1f34ae9db3492a52e804ae0
subpackages:
- route/v1
- security/v1
- name: github.com/openshift/client-go
version: 431ec9a26e5021f35fa41ee9a89842db9bfdb370
version: 84c2b942258aea2462e675e03aeb8eb4cb5f3c29
subpackages:
- route/clientset/versioned
- route/clientset/versioned/fake
@ -213,11 +213,11 @@ imports:
- name: github.com/xi2/xz
version: 48954b6210f8d154cb5f8484d3a3e1f83489309e
- name: go.uber.org/atomic
version: df976f2515e274675050de7b3f42545de80594fd
version: 8dc6146f7569370a472715e178d8ae31172ee6da
- name: go.uber.org/multierr
version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
version: ddea229ff1dff9e6fe8a6c0344ac73b09e81fce5
- name: go.uber.org/zap
version: 8a2ee5670ced5d94154bf385dc6a362722945daf
version: 67bc79d13d155c02fd008f721863ff8cc5f30659
subpackages:
- buffer
- internal/bufferpool
@ -285,7 +285,7 @@ imports:
subpackages:
- rate
- name: google.golang.org/appengine
version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610
version: 311d3c5cf9373249645db030e53c37c209a8b378
subpackages:
- internal
- internal/base
@ -306,9 +306,9 @@ imports:
- name: gopkg.in/tomb.v1
version: c131134a1947e9afd9cecfe11f4c6dff0732ae58
- name: gopkg.in/yaml.v2
version: 670d4cfef0544295bc27a114dbac37980d83185a
version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
- name: k8s.io/api
version: 6db15a15d2d3874a6c3ddb2140ac9f3bc7058428
version: 5cb15d34447165a97c76ed5a60e4e99c8a01ecfe
subpackages:
- admission/v1beta1
- admissionregistration/v1alpha1
@ -316,6 +316,7 @@ imports:
- apps/v1
- apps/v1beta1
- apps/v1beta2
- auditregistration/v1alpha1
- authentication/v1
- authentication/v1beta1
- authorization/v1
@ -344,7 +345,7 @@ imports:
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apiextensions-apiserver
version: 853f76028711219c2fc251fd5184f23fd44a7aa0
version: d002e88f6236312f0289d9d1deab106751718ff0
subpackages:
- pkg/apis/apiextensions
- pkg/apis/apiextensions/v1beta1
@ -352,7 +353,7 @@ imports:
- pkg/client/clientset/clientset/scheme
- pkg/client/clientset/clientset/typed/apiextensions/v1beta1
- name: k8s.io/apimachinery
version: 49ce2735e5074ffc3f8190c8406cf51a96302dad
version: 86fb29eff6288413d76bd8506874fddd9fccdff0
subpackages:
- pkg/api/errors
- pkg/api/meta
@ -402,7 +403,7 @@ imports:
- third_party/forked/golang/netutil
- third_party/forked/golang/reflect
- name: k8s.io/client-go
version: 5e6a3d4e34f694e895b13ae728111e726a5b69df
version: b40b2a5939e43f7ffe0028ad67586b7ce50bb675
subpackages:
- discovery
- discovery/fake
@ -415,6 +416,8 @@ imports:
- informers/apps/v1
- informers/apps/v1beta1
- informers/apps/v1beta2
- informers/auditregistration
- informers/auditregistration/v1alpha1
- informers/autoscaling
- informers/autoscaling/v1
- informers/autoscaling/v2beta1
@ -464,6 +467,8 @@ imports:
- kubernetes/typed/apps/v1beta1/fake
- kubernetes/typed/apps/v1beta2
- kubernetes/typed/apps/v1beta2/fake
- kubernetes/typed/auditregistration/v1alpha1
- kubernetes/typed/auditregistration/v1alpha1/fake
- kubernetes/typed/authentication/v1
- kubernetes/typed/authentication/v1/fake
- kubernetes/typed/authentication/v1beta1
@ -521,6 +526,7 @@ imports:
- listers/apps/v1
- listers/apps/v1beta1
- listers/apps/v1beta2
- listers/auditregistration/v1alpha1
- listers/autoscaling/v1
- listers/autoscaling/v2beta1
- listers/autoscaling/v2beta2
@ -553,7 +559,6 @@ imports:
- restmapper
- testing
- tools/auth
- tools/bootstrap/token/api
- tools/cache
- tools/cache/testing
- tools/clientcmd
@ -571,7 +576,6 @@ imports:
- transport/spdy
- util/buffer
- util/cert
- util/cert/triple
- util/connrotation
- util/exec
- util/flowcontrol
@ -579,14 +583,18 @@ imports:
- util/integer
- util/retry
- util/workqueue
- name: k8s.io/cluster-bootstrap
version: e96ff33745e46d3558d9ec78cd7cd6200937febe
subpackages:
- token/api
- name: k8s.io/code-generator
version: b1289fc74931d4b6b04bd1a259acfc88a2cb0a66
version: c2090bec4d9b1fb25de3812f868accc2bc9ecbae
- name: k8s.io/gengo
version: e17681d19d3ac4837a019ece36c2a0ec31ffe985
- name: k8s.io/klog
version: e88f7305ac70285fef29b8acebb00ba53136eab4
version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
- name: k8s.io/kube-aggregator
version: e95ebe3de8a24c3da1ad7606ca5a2444a64f4c82
version: 3e0149950b0e22a3b8579db52bd50e40d0dac10e
subpackages:
- pkg/apis/apiregistration
- pkg/apis/apiregistration/v1
@ -599,7 +607,7 @@ imports:
- pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1
- pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake
- name: k8s.io/kube-openapi
version: 0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803
version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d
subpackages:
- pkg/common
- pkg/util/proto
@ -610,7 +618,7 @@ imports:
- pkg/ginkgo-reporters
- pkg/polarion-xml
- name: sigs.k8s.io/controller-runtime
version: f6f0bc9611363b43664d08fb097ab13243ef621d
version: 12d98582e72927b6cd0123e2b4e819f9341ce62c
subpackages:
- pkg/cache
- pkg/cache/internal
@ -649,4 +657,6 @@ imports:
- integration
- integration/addr
- integration/internal
- name: sigs.k8s.io/yaml
version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480
testImports: []

View File

@ -6,11 +6,17 @@ import:
- package: github.com/minio/minio-go
version: ^4.0.6
- package: k8s.io/apimachinery
version: kubernetes-1.12.4
version: kubernetes-1.13.4
- package: k8s.io/code-generator
version: kubernetes-1.12.4
version: kubernetes-1.13.4
- package: k8s.io/client-go
version: kubernetes-1.12.4
version: kubernetes-1.13.4
- package: k8s.io/cluster-bootstrap
version: kubernetes-1.13.4
- package: k8s.io/kube-aggregator
version: kubernetes-1.13.4
- package: k8s.io/apiextensions-apiserver
version: kubernetes-1.13.4
- package: k8s.io/gengo
- package: github.com/ulikunitz/xz
- package: kubevirt.io/qe-tools
@ -18,8 +24,6 @@ import:
repo: https://github.com/kubevirt/qe-tools.git
- package: github.com/spf13/pflag
- package: github.com/xi2/xz
- package: k8s.io/kube-aggregator
version: kubernetes-1.12.4
- package: github.com/emicklei/go-restful
version: v2.6.0
- package: gopkg.in/square/go-jose.v2
@ -29,12 +33,10 @@ import:
- package: github.com/emicklei/go-restful-openapi
version: ^1.0.0
- package: sigs.k8s.io/controller-runtime
version: v0.1.9
- package: k8s.io/apiextensions-apiserver
version: kubernetes-1.12.4
version: v0.1.10
- package: github.com/kelseyhightower/envconfig
version: ^v1.3.0
- package: github.com/openshift/api
version: release-3.11
version: rebase-1.13.4
- package: github.com/openshift/client-go
version: release-3.11
version: rebase-1.13.4

View File

@ -4,7 +4,7 @@ RUN dnf install -y qemu xz gzip git gradle gcc autoconf automake libtool python
RUN pip3 install j2cli && pip3 install operator-courier
ENV GIMME_GO_VERSION=1.11.5 GOPATH="/go" KUBEBUILDER_VERSION="1.0.7" ARCH="amd64"
ENV GIMME_GO_VERSION=1.11.5 GOPATH="/go" KUBEBUILDER_VERSION="1.0.8" ARCH="amd64"
RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh
@ -34,7 +34,7 @@ RUN \
git checkout kubernetes-1.12.4 && \
go install ./... ) && \
( cd $GOPATH/src/k8s.io/kube-openapi/cmd/openapi-gen && \
git checkout 0317810137be915b9cf888946c6e115c1bfac693 && \
git checkout a01b7d5d6c2258c80a4a10070f3dee9cd575d9c7 && \
go install ./... ) && \
(go get -u -d github.com/Masterminds/glide && \
cd $GOPATH/src/github.com/Masterminds/glide && \

View File

@ -2,4 +2,4 @@ FROM fedora:29
COPY ./cdi-operator /usr/bin/cdi-operator
ENTRYPOINT [ "/usr/bin/cdi-operator", "-alsologtostderr"]
ENTRYPOINT [ "/usr/bin/cdi-operator"]

View File

@ -57,6 +57,7 @@ func schema_pkg_apis_core_v1alpha1_CDI(ref common.ReferenceCallback) common.Open
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDI is the CDI Operator CRD",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -101,6 +102,7 @@ func schema_pkg_apis_core_v1alpha1_CDICondition(ref common.ReferenceCallback) co
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDICondition represents a condition of a CDI deployment",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
@ -150,6 +152,7 @@ func schema_pkg_apis_core_v1alpha1_CDIConfig(ref common.ReferenceCallback) commo
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIConfig provides a user configuration for CDI",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -194,6 +197,7 @@ func schema_pkg_apis_core_v1alpha1_CDIConfigList(ref common.ReferenceCallback) c
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIConfigList provides the needed parameters to do request a list of CDIConfigs from the system",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -241,6 +245,7 @@ func schema_pkg_apis_core_v1alpha1_CDIConfigSpec(ref common.ReferenceCallback) c
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIConfigSpec defines specification for user configuration",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uploadProxyURLOverride": {
SchemaProps: spec.SchemaProps{
@ -257,7 +262,6 @@ func schema_pkg_apis_core_v1alpha1_CDIConfigSpec(ref common.ReferenceCallback) c
},
},
},
Dependencies: []string{},
}
}
@ -266,6 +270,7 @@ func schema_pkg_apis_core_v1alpha1_CDIConfigStatus(ref common.ReferenceCallback)
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIConfigStatus provides",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uploadProxyURL": {
SchemaProps: spec.SchemaProps{
@ -282,7 +287,6 @@ func schema_pkg_apis_core_v1alpha1_CDIConfigStatus(ref common.ReferenceCallback)
},
},
},
Dependencies: []string{},
}
}
@ -291,6 +295,7 @@ func schema_pkg_apis_core_v1alpha1_CDIList(ref common.ReferenceCallback) common.
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIList provides the needed parameters to do request a list of CDIs from the system",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -338,6 +343,7 @@ func schema_pkg_apis_core_v1alpha1_CDISpec(ref common.ReferenceCallback) common.
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDISpec defines our specification for the CDI installation",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"imageRegistry": {
SchemaProps: spec.SchemaProps{
@ -360,7 +366,6 @@ func schema_pkg_apis_core_v1alpha1_CDISpec(ref common.ReferenceCallback) common.
},
},
},
Dependencies: []string{},
}
}
@ -369,6 +374,7 @@ func schema_pkg_apis_core_v1alpha1_CDIStatus(ref common.ReferenceCallback) commo
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDIStatus defines the status of the CDI installation",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
@ -419,6 +425,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolume(ref common.ReferenceCallback) comm
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolume provides a representation of our data volume",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -463,10 +470,9 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeBlankImage(ref common.ReferenceCall
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC",
Properties: map[string]spec.Schema{},
Type: []string{"object"},
},
},
Dependencies: []string{},
}
}
@ -475,6 +481,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeList(ref common.ReferenceCallback)
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -522,6 +529,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSource(ref common.ReferenceCallback
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSource represents the source for our Data Volume, this can be HTTP, S3, Registry or an existing PVC",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"http": {
SchemaProps: spec.SchemaProps{
@ -566,6 +574,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceHTTP(ref common.ReferenceCall
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSourceHTTP provides the parameters to create a Data Volume from an HTTP source",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"url": {
SchemaProps: spec.SchemaProps{
@ -591,7 +600,6 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceHTTP(ref common.ReferenceCall
},
},
},
Dependencies: []string{},
}
}
@ -600,6 +608,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourcePVC(ref common.ReferenceCallb
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"namespace": {
SchemaProps: spec.SchemaProps{
@ -616,7 +625,6 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourcePVC(ref common.ReferenceCallb
},
},
},
Dependencies: []string{},
}
}
@ -625,6 +633,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceRegistry(ref common.Reference
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"url": {
SchemaProps: spec.SchemaProps{
@ -650,7 +659,6 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceRegistry(ref common.Reference
},
},
},
Dependencies: []string{},
}
}
@ -659,6 +667,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceS3(ref common.ReferenceCallba
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"url": {
SchemaProps: spec.SchemaProps{
@ -677,7 +686,6 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceS3(ref common.ReferenceCallba
},
},
},
Dependencies: []string{},
}
}
@ -686,10 +694,9 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSourceUpload(ref common.ReferenceCa
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source",
Properties: map[string]spec.Schema{},
Type: []string{"object"},
},
},
Dependencies: []string{},
}
}
@ -698,6 +705,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeSpec(ref common.ReferenceCallback)
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeSpec defines our specification for a DataVolume type",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"source": {
SchemaProps: spec.SchemaProps{
@ -732,6 +740,7 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeStatus(ref common.ReferenceCallback
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DataVolumeStatus provides the parameters to store the phase of the Data Volume",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
@ -749,6 +758,5 @@ func schema_pkg_apis_core_v1alpha1_DataVolumeStatus(ref common.ReferenceCallback
},
},
},
Dependencies: []string{},
}
}

View File

@ -41,6 +41,7 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequest(ref common.ReferenceCall
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UploadTokenRequest is the CR used to initiate a CDI upload",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -87,6 +88,7 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequestList(ref common.Reference
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UploadTokenRequestList contains a list of UploadTokenRequests",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@ -134,6 +136,7 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequestSpec(ref common.Reference
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UploadTokenRequestSpec defines the parameters of the token request",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pvcName": {
SchemaProps: spec.SchemaProps{
@ -146,7 +149,6 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequestSpec(ref common.Reference
Required: []string{"pvcName"},
},
},
Dependencies: []string{},
}
}
@ -155,6 +157,7 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequestStatus(ref common.Referen
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UploadTokenRequestStatus stores the status of a token request",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"token": {
SchemaProps: spec.SchemaProps{
@ -166,6 +169,5 @@ func schema_pkg_apis_upload_v1alpha1_UploadTokenRequestStatus(ref common.Referen
},
},
},
Dependencies: []string{},
}
}

View File

@ -21,10 +21,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/cert/triple"
"k8s.io/klog"
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
cdicorev1alpha1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
cdiuploadv1alpha1 "kubevirt.io/containerized-data-importer/pkg/apis/upload/v1alpha1"
validatingwebhook "kubevirt.io/containerized-data-importer/pkg/apiserver/webhooks/validating-webhook"
@ -33,6 +33,7 @@ import (
"kubevirt.io/containerized-data-importer/pkg/keys"
"kubevirt.io/containerized-data-importer/pkg/operator"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
const (

View File

@ -13,8 +13,6 @@ import (
restful "github.com/emicklei/go-restful"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/cert/triple"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -26,6 +24,7 @@ import (
cdiuploadv1alpha1 "kubevirt.io/containerized-data-importer/pkg/apis/upload/v1alpha1"
"kubevirt.io/containerized-data-importer/pkg/keys/keystest"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
var foo aggregatorapifake.Clientset

View File

@ -19,6 +19,8 @@ limitations under the License.
package v1alpha1
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
@ -73,10 +75,15 @@ func (c *cDIs) Get(name string, options v1.GetOptions) (result *v1alpha1.CDI, er
// List takes label and field selectors, and returns the list of CDIs that match those selectors.
func (c *cDIs) List(opts v1.ListOptions) (result *v1alpha1.CDIList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.CDIList{}
err = c.client.Get().
Resource("cdis").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
@ -84,10 +91,15 @@ func (c *cDIs) List(opts v1.ListOptions) (result *v1alpha1.CDIList, err error) {
// Watch returns a watch.Interface that watches the requested cDIs.
func (c *cDIs) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("cdis").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
@ -141,9 +153,14 @@ func (c *cDIs) Delete(name string, options *v1.DeleteOptions) error {
// DeleteCollection deletes a collection of objects.
func (c *cDIs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("cdis").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()

View File

@ -19,6 +19,8 @@ limitations under the License.
package v1alpha1
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
@ -73,10 +75,15 @@ func (c *cDIConfigs) Get(name string, options v1.GetOptions) (result *v1alpha1.C
// List takes label and field selectors, and returns the list of CDIConfigs that match those selectors.
func (c *cDIConfigs) List(opts v1.ListOptions) (result *v1alpha1.CDIConfigList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.CDIConfigList{}
err = c.client.Get().
Resource("cdiconfigs").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
@ -84,10 +91,15 @@ func (c *cDIConfigs) List(opts v1.ListOptions) (result *v1alpha1.CDIConfigList,
// Watch returns a watch.Interface that watches the requested cDIConfigs.
func (c *cDIConfigs) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("cdiconfigs").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
@ -141,9 +153,14 @@ func (c *cDIConfigs) Delete(name string, options *v1.DeleteOptions) error {
// DeleteCollection deletes a collection of objects.
func (c *cDIConfigs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("cdiconfigs").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()

View File

@ -19,6 +19,8 @@ limitations under the License.
package v1alpha1
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
@ -76,11 +78,16 @@ func (c *dataVolumes) Get(name string, options v1.GetOptions) (result *v1alpha1.
// List takes label and field selectors, and returns the list of DataVolumes that match those selectors.
func (c *dataVolumes) List(opts v1.ListOptions) (result *v1alpha1.DataVolumeList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.DataVolumeList{}
err = c.client.Get().
Namespace(c.ns).
Resource("datavolumes").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
@ -88,11 +95,16 @@ func (c *dataVolumes) List(opts v1.ListOptions) (result *v1alpha1.DataVolumeList
// Watch returns a watch.Interface that watches the requested dataVolumes.
func (c *dataVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("datavolumes").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
@ -150,10 +162,15 @@ func (c *dataVolumes) Delete(name string, options *v1.DeleteOptions) error {
// DeleteCollection deletes a collection of objects.
func (c *dataVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("datavolumes").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()

View File

@ -123,7 +123,7 @@ func (c *FakeCDIs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
// Patch applies the patch and returns the patched cDI.
func (c *FakeCDIs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CDI, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(cdisResource, name, data, subresources...), &v1alpha1.CDI{})
Invokes(testing.NewRootPatchSubresourceAction(cdisResource, name, pt, data, subresources...), &v1alpha1.CDI{})
if obj == nil {
return nil, err
}

View File

@ -123,7 +123,7 @@ func (c *FakeCDIConfigs) DeleteCollection(options *v1.DeleteOptions, listOptions
// Patch applies the patch and returns the patched cDIConfig.
func (c *FakeCDIConfigs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CDIConfig, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(cdiconfigsResource, name, data, subresources...), &v1alpha1.CDIConfig{})
Invokes(testing.NewRootPatchSubresourceAction(cdiconfigsResource, name, pt, data, subresources...), &v1alpha1.CDIConfig{})
if obj == nil {
return nil, err
}

View File

@ -131,7 +131,7 @@ func (c *FakeDataVolumes) DeleteCollection(options *v1.DeleteOptions, listOption
// Patch applies the patch and returns the patched dataVolume.
func (c *FakeDataVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.DataVolume, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(datavolumesResource, c.ns, name, data, subresources...), &v1alpha1.DataVolume{})
Invokes(testing.NewPatchSubresourceAction(datavolumesResource, c.ns, name, pt, data, subresources...), &v1alpha1.DataVolume{})
if obj == nil {
return nil, err

View File

@ -131,7 +131,7 @@ func (c *FakeUploadTokenRequests) DeleteCollection(options *v1.DeleteOptions, li
// Patch applies the patch and returns the patched uploadTokenRequest.
func (c *FakeUploadTokenRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.UploadTokenRequest, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(uploadtokenrequestsResource, c.ns, name, data, subresources...), &v1alpha1.UploadTokenRequest{})
Invokes(testing.NewPatchSubresourceAction(uploadtokenrequestsResource, c.ns, name, pt, data, subresources...), &v1alpha1.UploadTokenRequest{})
if obj == nil {
return nil, err

View File

@ -19,6 +19,8 @@ limitations under the License.
package v1alpha1
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
@ -76,11 +78,16 @@ func (c *uploadTokenRequests) Get(name string, options v1.GetOptions) (result *v
// List takes label and field selectors, and returns the list of UploadTokenRequests that match those selectors.
func (c *uploadTokenRequests) List(opts v1.ListOptions) (result *v1alpha1.UploadTokenRequestList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.UploadTokenRequestList{}
err = c.client.Get().
Namespace(c.ns).
Resource("uploadtokenrequests").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
@ -88,11 +95,16 @@ func (c *uploadTokenRequests) List(opts v1.ListOptions) (result *v1alpha1.Upload
// Watch returns a watch.Interface that watches the requested uploadTokenRequests.
func (c *uploadTokenRequests) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("uploadtokenrequests").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
@ -150,10 +162,15 @@ func (c *uploadTokenRequests) Delete(name string, options *v1.DeleteOptions) err
// DeleteCollection deletes a collection of objects.
func (c *uploadTokenRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("uploadtokenrequests").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()

View File

@ -27,6 +27,7 @@ import (
versioned "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
@ -35,4 +36,5 @@ type SharedInformerFactory interface {
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@ -29,13 +29,13 @@ import (
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/cert/triple"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"kubevirt.io/containerized-data-importer/pkg/keys"
"kubevirt.io/containerized-data-importer/pkg/util"
clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned"
"kubevirt.io/containerized-data-importer/pkg/keys"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
const (

View File

@ -21,20 +21,19 @@ import (
"testing"
"time"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/util/cert/triple"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
cdifake "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/fake"
"kubevirt.io/containerized-data-importer/pkg/common"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
const (

View File

@ -20,14 +20,15 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/cert/triple"
"k8s.io/klog"
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned"
"kubevirt.io/containerized-data-importer/pkg/common"
"kubevirt.io/containerized-data-importer/pkg/keys"
"kubevirt.io/containerized-data-importer/pkg/operator"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
const (

View File

@ -17,9 +17,9 @@ import (
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
k8sfake "k8s.io/client-go/kubernetes/fake"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
cdifake "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/fake"

View File

@ -20,10 +20,10 @@ import (
. "github.com/onsi/gomega"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/cert/triple"
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
"kubevirt.io/containerized-data-importer/pkg/util"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
var (

View File

@ -20,13 +20,13 @@ import (
"crypto/rsa"
"crypto/x509"
"k8s.io/client-go/util/cert/triple"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/cert"
"kubevirt.io/containerized-data-importer/pkg/common"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
// NewTLSSecret returns a new TLS secret from objects

View File

@ -29,10 +29,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/cert/triple"
"k8s.io/klog"
"kubevirt.io/containerized-data-importer/pkg/common"
"kubevirt.io/containerized-data-importer/pkg/operator"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
const (

View File

@ -33,8 +33,9 @@ import (
"k8s.io/apimachinery/pkg/util/diff"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/util/cert/triple"
"kubevirt.io/containerized-data-importer/pkg/keys/keystest"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
func tlsSecretCreateAction(namespace, secretName string, keyPair *triple.KeyPair, caCert *x509.Certificate) core.Action {

View File

@ -10,11 +10,10 @@ import (
"testing"
"time"
"k8s.io/client-go/util/cert/triple"
"k8s.io/client-go/util/cert"
"kubevirt.io/containerized-data-importer/pkg/apiserver"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
type httpClientConfig struct {

View File

@ -31,7 +31,7 @@ import (
"time"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/cert/triple"
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
)
func newServer() *uploadServerApp {

View File

@ -1,12 +1,9 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ -27,11 +24,13 @@ import (
certutil "k8s.io/client-go/util/cert"
)
// KeyPair contains a private key and certificate
type KeyPair struct {
Key *rsa.PrivateKey
Cert *x509.Certificate
}
// NewCA creates a new CA
func NewCA(name string) (*KeyPair, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
@ -53,6 +52,7 @@ func NewCA(name string) (*KeyPair, error) {
}, nil
}
// NewServerKeyPair creates a new server key pair
func NewServerKeyPair(ca *KeyPair, commonName, svcName, svcNamespace, dnsDomain string, ips, hostnames []string) (*KeyPair, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
@ -93,6 +93,7 @@ func NewServerKeyPair(ca *KeyPair, commonName, svcName, svcNamespace, dnsDomain
}, nil
}
// NewClientKeyPair creates a new client key pair
func NewClientKeyPair(ca *KeyPair, commonName string, organizations []string) (*KeyPair, error) {
key, err := certutil.NewPrivateKey()
if err != nil {

View File

@ -36,16 +36,8 @@ func main() {
certDir := flag.String("certDir", "", "")
inFile := flag.String("inFile", "", "")
outDir := flag.String("outDir", "", "")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
klog.Info("Generating test files")
ft := &formatTable{

View File

@ -37,16 +37,8 @@ func main() {
certDir := flag.String("certDir", "", "")
inFile := flag.String("inFile", "", "")
outDir := flag.String("outDir", "", "")
klog.InitFlags(nil)
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
f2 := klogFlags.Lookup(f1.Name)
if f2 != nil {
value := f1.Value.String()
f2.Value.Set(value)
}
})
ft := &formatTable{
[]string{""},

View File

@ -22,3 +22,5 @@ _testmain.go
*.exe
*.test
*.prof
.idea/

10
vendor/github.com/appscode/jsonpatch/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
language: go
go:
- 1.x
- tip
env:
- GO111MODULE=on
script:
- go test -v

View File

@ -1,10 +1,15 @@
# jsonpatch
As per http://jsonpatch.com/ JSON Patch is specified in RFC 6902 from the IETF.
[![Build Status](https://travis-ci.org/appscode/jsonpatch.svg?branch=master)](https://travis-ci.org/appscode/jsonpatch)
[![Go Report Card](https://goreportcard.com/badge/appscode/jsonpatch "Go Report Card")](https://goreportcard.com/report/appscode/jsonpatch)
[![GoDoc](https://godoc.org/github.com/appscode/jsonpatch?status.svg "GoDoc")](https://godoc.org/github.com/appscode/jsonpatch)
As per http://jsonpatch.com JSON Patch is specified in RFC 6902 from the IETF.
JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs.
```bash
go get github.com/mattbaird/jsonpatch
```console
go get github.com/appscode/jsonpatch
```
I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and
@ -19,13 +24,15 @@ generate format like jsonpatch.com specifies. Here's an example of the patch for
```
The API is super simple
#example
## example
```go
package main
import (
"fmt"
"github.com/mattbaird/jsonpatch"
"github.com/appscode/jsonpatch"
)
var simpleA = `{"a":100, "b":200, "c":"hello"}`

8
vendor/github.com/appscode/jsonpatch/go.mod generated vendored Normal file
View File

@ -0,0 +1,8 @@
module github.com/appscode/jsonpatch
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v4.0.0+incompatible
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

8
vendor/github.com/appscode/jsonpatch/go.sum generated vendored Normal file
View File

@ -0,0 +1,8 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc=
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

View File

@ -8,20 +8,22 @@ import (
"strings"
)
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var errBadJSONDoc = fmt.Errorf("invalid JSON Document")
type JsonPatchOperation struct {
type JsonPatchOperation = Operation
type Operation struct {
Operation string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
func (j *JsonPatchOperation) Json() string {
func (j *Operation) Json() string {
b, _ := json.Marshal(j)
return string(b)
}
func (j *JsonPatchOperation) MarshalJSON() ([]byte, error) {
func (j *Operation) MarshalJSON() ([]byte, error) {
var b bytes.Buffer
b.WriteString("{")
b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation))
@ -39,14 +41,14 @@ func (j *JsonPatchOperation) MarshalJSON() ([]byte, error) {
return b.Bytes(), nil
}
type ByPath []JsonPatchOperation
type ByPath []Operation
func (a ByPath) Len() int { return len(a) }
func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
func NewPatch(operation, path string, value interface{}) JsonPatchOperation {
return JsonPatchOperation{Operation: operation, Path: path, Value: value}
func NewPatch(operation, path string, value interface{}) Operation {
return Operation{Operation: operation, Path: path, Value: value}
}
// CreatePatch creates a patch as specified in http://jsonpatch.com/
@ -55,7 +57,7 @@ func NewPatch(operation, path string, value interface{}) JsonPatchOperation {
// The function will return an array of JsonPatchOperations
//
// An error will be returned if any of the two documents are invalid.
func CreatePatch(a, b []byte) ([]JsonPatchOperation, error) {
func CreatePatch(a, b []byte) ([]Operation, error) {
aI := map[string]interface{}{}
bI := map[string]interface{}{}
err := json.Unmarshal(a, &aI)
@ -66,7 +68,7 @@ func CreatePatch(a, b []byte) ([]JsonPatchOperation, error) {
if err != nil {
return nil, errBadJSONDoc
}
return diff(aI, bI, "", []JsonPatchOperation{})
return diff(aI, bI, "", []Operation{})
}
// Returns true if the values matches (must be json types)
@ -78,22 +80,25 @@ func matchesValue(av, bv interface{}) bool {
}
switch at := av.(type) {
case string:
bt := bv.(string)
if bt == at {
bt, ok := bv.(string)
if ok && bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
bt, ok := bv.(float64)
if ok && bt == at {
return true
}
case bool:
bt := bv.(bool)
if bt == at {
bt, ok := bv.(bool)
if ok && bt == at {
return true
}
case map[string]interface{}:
bt := bv.(map[string]interface{})
bt, ok := bv.(map[string]interface{})
if !ok {
return false
}
for key := range at {
if !matchesValue(at[key], bt[key]) {
return false
@ -106,7 +111,10 @@ func matchesValue(av, bv interface{}) bool {
}
return true
case []interface{}:
bt := bv.([]interface{})
bt, ok := bv.([]interface{})
if !ok {
return false
}
if len(bt) != len(at) {
return false
}
@ -148,7 +156,7 @@ func makePath(path string, newPart interface{}) string {
}
// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations.
func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation) ([]JsonPatchOperation, error) {
func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) {
for key, bv := range b {
p := makePath(path, key)
av, ok := a[key]
@ -157,11 +165,6 @@ func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation)
patch = append(patch, NewPatch("add", p, bv))
continue
}
// If types have changed, replace completely
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
patch = append(patch, NewPatch("replace", p, bv))
continue
}
// Types are the same, compare values
var err error
patch, err = handleValues(av, bv, p, patch)
@ -181,7 +184,21 @@ func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation)
return patch, nil
}
func handleValues(av, bv interface{}, p string, patch []JsonPatchOperation) ([]JsonPatchOperation, error) {
func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) {
{
at := reflect.TypeOf(av)
bt := reflect.TypeOf(bv)
if at == nil && bt == nil {
// do nothing
return patch, nil
} else if at == nil && bt != nil {
return append(patch, NewPatch("add", p, bv)), nil
} else if at != bt {
// If types have changed, replace completely (preserves null in destination)
return append(patch, NewPatch("replace", p, bv)), nil
}
}
var err error
switch at := av.(type) {
case map[string]interface{}:
@ -195,63 +212,125 @@ func handleValues(av, bv interface{}, p string, patch []JsonPatchOperation) ([]J
patch = append(patch, NewPatch("replace", p, bv))
}
case []interface{}:
bt, ok := bv.([]interface{})
if !ok {
// array replaced by non-array
patch = append(patch, NewPatch("replace", p, bv))
} else if len(at) != len(bt) {
// arrays are not the same length
patch = append(patch, compareArray(at, bt, p)...)
bt := bv.([]interface{})
if isSimpleArray(at) && isSimpleArray(bt) {
patch = append(patch, compareEditDistance(at, bt, p)...)
} else {
for i := range bt {
n := min(len(at), len(bt))
for i := len(at) - 1; i >= n; i-- {
patch = append(patch, NewPatch("remove", makePath(p, i), nil))
}
for i := n; i < len(bt); i++ {
patch = append(patch, NewPatch("add", makePath(p, i), bt[i]))
}
for i := 0; i < n; i++ {
var err error
patch, err = handleValues(at[i], bt[i], makePath(p, i), patch)
if err != nil {
return nil, err
}
}
}
case nil:
switch bv.(type) {
case nil:
// Both nil, fine.
default:
patch = append(patch, NewPatch("add", p, bv))
}
default:
panic(fmt.Sprintf("Unknown type:%T ", av))
}
return patch, nil
}
func compareArray(av, bv []interface{}, p string) []JsonPatchOperation {
retval := []JsonPatchOperation{}
// var err error
for i, v := range av {
found := false
for _, v2 := range bv {
if reflect.DeepEqual(v, v2) {
found = true
break
}
}
if !found {
retval = append(retval, NewPatch("remove", makePath(p, i), nil))
}
func isBasicType(a interface{}) bool {
switch a.(type) {
case string, float64, bool:
default:
return false
}
for i, v := range bv {
found := false
for _, v2 := range av {
if reflect.DeepEqual(v, v2) {
found = true
break
}
}
if !found {
retval = append(retval, NewPatch("add", makePath(p, i), v))
}
}
return retval
return true
}
func isSimpleArray(a []interface{}) bool {
for i := range a {
switch a[i].(type) {
case string, float64, bool:
default:
val := reflect.ValueOf(a[i])
if val.Kind() == reflect.Map {
for _, k := range val.MapKeys() {
av := val.MapIndex(k)
if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface {
if av.IsNil() {
continue
}
av = av.Elem()
}
if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool {
return false
}
}
return true
}
return false
}
}
return true
}
// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm
// Adapted from https://github.com/texttheater/golang-levenshtein
func compareEditDistance(s, t []interface{}, p string) []Operation {
m := len(s)
n := len(t)
d := make([][]int, m+1)
for i := 0; i <= m; i++ {
d[i] = make([]int, n+1)
d[i][0] = i
}
for j := 0; j <= n; j++ {
d[0][j] = j
}
for j := 1; j <= n; j++ {
for i := 1; i <= m; i++ {
if reflect.DeepEqual(s[i-1], t[j-1]) {
d[i][j] = d[i-1][j-1] // no op required
} else {
del := d[i-1][j] + 1
add := d[i][j-1] + 1
rep := d[i-1][j-1] + 1
d[i][j] = min(rep, min(add, del))
}
}
}
return backtrace(s, t, p, m, n, d)
}
func min(x int, y int) int {
if y < x {
return y
}
return x
}
func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation {
if i > 0 && matrix[i-1][j]+1 == matrix[i][j] {
op := NewPatch("remove", makePath(p, i-1), nil)
return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...)
}
if j > 0 && matrix[i][j-1]+1 == matrix[i][j] {
op := NewPatch("add", makePath(p, i), t[j-1])
return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...)
}
if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] {
if isBasicType(s[0]) {
op := NewPatch("replace", makePath(p, i-1), t[j-1])
return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...)
}
p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{})
return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...)
}
if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] {
return backtrace(s, t, p, i-1, j-1, matrix)
}
return []Operation{}
}

View File

@ -1,19 +1,21 @@
package jsonpatch
package jsonpatch_test
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/appscode/jsonpatch"
"github.com/stretchr/testify/assert"
)
func TestMarshalNullableValue(t *testing.T) {
p1 := JsonPatchOperation{
p1 := jsonpatch.Operation{
Operation: "replace",
Path: "/a1",
Value: nil,
}
assert.JSONEq(t, `{"op":"replace", "path":"/a1","value":null}`, p1.Json())
p2 := JsonPatchOperation{
p2 := jsonpatch.Operation{
Operation: "replace",
Path: "/a2",
Value: "v2",
@ -22,7 +24,7 @@ func TestMarshalNullableValue(t *testing.T) {
}
func TestMarshalNonNullableValue(t *testing.T) {
p1 := JsonPatchOperation{
p1 := jsonpatch.Operation{
Operation: "remove",
Path: "/a1",
}

View File

@ -1,10 +1,202 @@
package jsonpatch
package jsonpatch_test
import (
"github.com/stretchr/testify/assert"
"encoding/json"
"testing"
"github.com/appscode/jsonpatch"
jp "github.com/evanphx/json-patch"
"github.com/stretchr/testify/assert"
)
var simpleA = `{"a":100, "b":200, "c":"hello"}`
var simpleB = `{"a":100, "b":200, "c":"goodbye"}`
var simpleC = `{"a":100, "b":100, "c":"hello"}`
var simpleD = `{"a":100, "b":200, "c":"hello", "d":"foo"}`
var simpleE = `{"a":100, "b":200}`
var simplef = `{"a":100, "b":100, "d":"foo"}`
var simpleG = `{"a":100, "b":null, "d":"foo"}`
var empty = `{}`
var arraySrc = `
{
"spec": {
"loadBalancerSourceRanges": [
"192.101.0.0/16",
"192.0.0.0/24"
]
}
}
`
var arrayDst = `
{
"spec": {
"loadBalancerSourceRanges": [
"192.101.0.0/24"
]
}
}
`
var complexBase = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexA = `{"a":100, "b":[{"c1":"goodbye", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexB = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":100, "g":"h", "i":"j"}}`
var complexC = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}, "k":[{"l":"m"}, {"l":"o"}]}`
var complexD = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"}, {"c3":"hello3", "d3":"foo3"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexE = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var point = `{"type":"Point", "coordinates":[0.0, 1.0]}`
var lineString = `{"type":"LineString", "coordinates":[[0.0, 1.0], [2.0, 3.0]]}`
var hyperComplexBase = `
{
"goods": [
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]
}`
var hyperComplexA = `
{
"goods": [
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Strawberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Vanilla" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5004", "type": "Maple" }
]
}
]
}`
var superComplexBase = `
{
"annotations": {
@ -489,18 +681,127 @@ var superComplexA = `
}
}`
func TestSuperComplexSame(t *testing.T) {
patch, e := CreatePatch([]byte(superComplexBase), []byte(superComplexBase))
assert.NoError(t, e)
assert.Equal(t, 0, len(patch), "they should be equal")
var (
oldDeployment = `{
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"annotations": {
"k8s.io/app": "busy-dep"
}
}
}`
newDeployment = `{
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"annotations": {
"k8s.io/app": "busy-dep",
"docker.com/commit": "github.com/myrepo#xyz"
}
}
}`
)
var (
oldNestedObj = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"doNotPause": true,
"version": "5.6"
}
}`
newNestedObj = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"doNotPause": true,
"version": "5.6",
"storageType": "Durable",
"updateStrategy": {
"type": "RollingUpdate"
},
"terminationPolicy": "Pause"
}
}`
)
func TestCreatePatch(t *testing.T) {
cases := []struct {
name string
src string
dst string
}{
// simple
{"Simple:OneNullReplace", simplef, simpleG},
{"Simple:Same", simpleA, simpleA},
{"Simple:OneStringReplace", simpleA, simpleB},
{"Simple:OneIntReplace", simpleA, simpleC},
{"Simple:OneAdd", simpleA, simpleD},
{"Simple:OneRemove", simpleA, simpleE},
{"Simple:VsEmpty", simpleA, empty},
// array types
{"Array:Same", arraySrc, arraySrc},
{"Array:BoolReplace", arraySrc, arrayDst},
{"Array:AlmostSame", `{"Lines":[1,2,3,4,5,6,7,8,9,10]}`, `{"Lines":[2,3,4,5,6,7,8,9,10,11]}`},
{"Array:Remove", `{"x":["A", "B", "C"]}`, `{"x":["D"]}`},
{"Array:EditDistance", `{"letters":["A","B","C","D","E","F","G","H","I","J","K"]}`, `{"letters":["L","M","N"]}`},
// complex types
{"Complex:Same", complexBase, complexBase},
{"Complex:OneStringReplaceInArray", complexBase, complexA},
{"Complex:OneIntReplace", complexBase, complexB},
{"Complex:OneAdd", complexBase, complexC},
{"Complex:OneAddToArray", complexBase, complexC},
{"Complex:VsEmpty", complexBase, empty},
// geojson
{"GeoJson:PointLineStringReplace", point, lineString},
{"GeoJson:LineStringPointReplace", lineString, point},
// HyperComplex
{"HyperComplex:Same", hyperComplexBase, hyperComplexBase},
{"HyperComplex:BoolReplace", hyperComplexBase, hyperComplexA},
// SuperComplex
{"SuperComplex:Same", superComplexBase, superComplexBase},
{"SuperComplex:BoolReplace", superComplexBase, superComplexA},
// map
{"Kubernetes:Annotations", oldDeployment, newDeployment},
// crd with nested object
{"Nested Member Object", oldNestedObj, newNestedObj},
}
for _, c := range cases {
t.Run(c.name+"[src->dst]", func(t *testing.T) {
check(t, c.src, c.dst)
})
t.Run(c.name+"[dst->src]", func(t *testing.T) {
check(t, c.dst, c.src)
})
}
}
func TestSuperComplexBoolReplace(t *testing.T) {
patch, e := CreatePatch([]byte(superComplexBase), []byte(superComplexA))
assert.NoError(t, e)
assert.Equal(t, 1, len(patch), "they should be equal")
change := patch[0]
assert.Equal(t, "replace", change.Operation, "they should be equal")
assert.Equal(t, "/attributes/attribute-key/36/properties/visible", change.Path, "they should be equal")
assert.Equal(t, false, change.Value, "they should be equal")
func check(t *testing.T, src, dst string) {
patch, err := jsonpatch.CreatePatch([]byte(src), []byte(dst))
assert.Nil(t, err)
data, err := json.Marshal(patch)
assert.Nil(t, err)
p2, err := jp.DecodePatch(data)
assert.Nil(t, err)
d2, err := p2.Apply([]byte(src))
assert.Nil(t, err)
assert.JSONEq(t, dst, string(d2))
}

16
vendor/github.com/evanphx/json-patch/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
go:
- 1.8
- 1.7
install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- go get github.com/jessevdk/go-flags
script:
- go get
- go test -cover ./...
notifications:
email: false

25
vendor/github.com/evanphx/json-patch/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2014, Evan Phoenix
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

297
vendor/github.com/evanphx/json-patch/README.md generated vendored Normal file
View File

@ -0,0 +1,297 @@
# JSON-Patch
`jsonpatch` is a library which provides functionallity for both applying
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
# Get It!
**Latest and greatest**:
```bash
go get -u github.com/evanphx/json-patch
```
**Stable Versions**:
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
(previous versions below `v3` are unavailable)
# Use It!
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
* [Comparing JSON documents](#comparing-json-documents)
* [Combine merge patches](#combine-merge-patches)
# Configuration
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
This defaults to `true` and enables the non-standard practice of allowing
negative indices to mean indices starting at the end of an array. This
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
false`.
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
It can describe the changes needed to convert from the original to the
modified JSON document.
Once you have a merge patch, you can apply it to other JSON documents using the
`jsonpatch.MergePatch(document, patch)` function.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
// Let's create a merge patch from these two documents...
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
target := []byte(`{"name": "Jane", "age": 24}`)
patch, err := jsonpatch.CreateMergePatch(original, target)
if err != nil {
panic(err)
}
// Now lets apply the patch against a different JSON document...
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
fmt.Printf("patch document: %s\n", patch)
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
}
```
When ran, you get the following output:
```bash
$ go run main.go
patch document: {"height":null,"name":"Jane"}
updated tina doc: {"age":28,"name":"Jane"}
```
## Create and apply a JSON Patch
You can create patch objects using `DecodePatch([]byte)`, which can then
be applied against JSON documents.
The following is an example of creating a patch from two operations, and
applying it against a JSON document.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
patchJSON := []byte(`[
{"op": "replace", "path": "/name", "value": "Jane"},
{"op": "remove", "path": "/height"}
]`)
patch, err := jsonpatch.DecodePatch(patchJSON)
if err != nil {
panic(err)
}
modified, err := patch.Apply(original)
if err != nil {
panic(err)
}
fmt.Printf("Original document: %s\n", original)
fmt.Printf("Modified document: %s\n", modified)
}
```
When ran, you get the following output:
```bash
$ go run main.go
Original document: {"name": "John", "age": 24, "height": 3.21}
Modified document: {"age":24,"name":"Jane"}
```
## Comparing JSON documents
Due to potential whitespace and ordering differences, one cannot simply compare
JSON strings or byte-arrays directly.
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
determine if two JSON documents are _structurally_ equal. This ignores
whitespace differences, and key-value ordering.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
similar := []byte(`
{
"age": 24,
"height": 3.21,
"name": "John"
}
`)
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
if jsonpatch.Equal(original, similar) {
fmt.Println(`"original" is structurally equal to "similar"`)
}
if !jsonpatch.Equal(original, different) {
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
}
}
```
When ran, you get the following output:
```bash
$ go run main.go
"original" is structurally equal to "similar"
"original" is _not_ structurally equal to "similar"
```
## Combine merge patches
Given two JSON merge patch documents, it is possible to combine them into a
single merge patch which can describe both set of changes.
The resulting merge patch can be used such that applying it results in a
document structurally similar as merging each merge patch to the document
in succession.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
// Let's combine these merge patch documents...
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
if err != nil {
panic(err)
}
// Apply each patch individual against the original document
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
if err != nil {
panic(err)
}
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
if err != nil {
panic(err)
}
// Apply the combined patch against the original document
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
if err != nil {
panic(err)
}
// Do both result in the same thing? They should!
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
fmt.Println("Both JSON documents are structurally the same!")
}
fmt.Printf("combined merge patch: %s", combinedPatch)
}
```
When ran, you get the following output:
```bash
$ go run main.go
Both JSON documents are structurally the same!
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
```
# CLI for comparing JSON documents
You can install the commandline program `json-patch`.
This program can take multiple JSON patch documents as arguments,
and fed a JSON document from `stdin`. It will apply the patch(es) against
the document and output the modified doc.
**patch.1.json**
```json
[
{"op": "replace", "path": "/name", "value": "Jane"},
{"op": "remove", "path": "/height"}
]
```
**patch.2.json**
```json
[
{"op": "add", "path": "/address", "value": "123 Main St"},
{"op": "replace", "path": "/age", "value": "21"}
]
```
**document.json**
```json
{
"name": "John",
"age": 24,
"height": 3.21
}
```
You can then run:
```bash
$ go install github.com/evanphx/json-patch/cmd/json-patch
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
{"address":"123 Main St","age":"21","name":"Jane"}
```
# Help It!
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
or [create a PR](https://github.com/evanphx/json-patch/compare).
Before creating a pull request, we'd ask that you make sure tests are passing
and that you have added new tests when applicable.
Contributors can run tests using:
```bash
go test -cover ./...
```
Builds for pull requests are tested automatically
using [TravisCI](https://travis-ci.org/evanphx/json-patch).

View File

@ -0,0 +1,39 @@
package main
// Borrowed from Concourse: https://github.com/concourse/atc/blob/master/atccmd/file_flag.go
import (
"fmt"
"os"
"path/filepath"
)
// FileFlag is a flag for passing a path to a file on disk. The file is
// expected to be a file, not a directory, that actually exists.
type FileFlag string
// UnmarshalFlag implements go-flag's Unmarshaler interface
func (f *FileFlag) UnmarshalFlag(value string) error {
stat, err := os.Stat(value)
if err != nil {
return err
}
if stat.IsDir() {
return fmt.Errorf("path '%s' is a directory, not a file", value)
}
abs, err := filepath.Abs(value)
if err != nil {
return err
}
*f = FileFlag(abs)
return nil
}
// Path is the path to the file
func (f FileFlag) Path() string {
return string(f)
}

View File

@ -0,0 +1,56 @@
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
jsonpatch "github.com/evanphx/json-patch"
flags "github.com/jessevdk/go-flags"
)
type opts struct {
PatchFilePaths []FileFlag `long:"patch-file" short:"p" value-name:"PATH" description:"Path to file with one or more operations"`
}
func main() {
var o opts
_, err := flags.Parse(&o)
if err != nil {
log.Fatalf("error: %s\n", err)
}
patches := make([]jsonpatch.Patch, len(o.PatchFilePaths))
for i, patchFilePath := range o.PatchFilePaths {
var bs []byte
bs, err = ioutil.ReadFile(patchFilePath.Path())
if err != nil {
log.Fatalf("error reading patch file: %s", err)
}
var patch jsonpatch.Patch
patch, err = jsonpatch.DecodePatch(bs)
if err != nil {
log.Fatalf("error decoding patch file: %s", err)
}
patches[i] = patch
}
doc, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("error reading from stdin: %s", err)
}
mdoc := doc
for _, patch := range patches {
mdoc, err = patch.Apply(mdoc)
if err != nil {
log.Fatalf("error applying patch: %s", err)
}
}
fmt.Printf("%s", mdoc)
}

38
vendor/github.com/evanphx/json-patch/errors.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package jsonpatch
import "fmt"
// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}
// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}
// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}
// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}
// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}

383
vendor/github.com/evanphx/json-patch/merge.go generated vendored Normal file
View File

@ -0,0 +1,383 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
curDoc, err := cur.intoDoc()
if err != nil {
pruneNulls(patch)
return patch
}
patchDoc, err := patch.intoDoc()
if err != nil {
return patch
}
mergeDocs(curDoc, patchDoc, mergeMerge)
return cur
}
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
for k, v := range *patch {
if v == nil {
if mergeMerge {
(*doc)[k] = nil
} else {
delete(*doc, k)
}
} else {
cur, ok := (*doc)[k]
if !ok || cur == nil {
pruneNulls(v)
(*doc)[k] = v
} else {
(*doc)[k] = merge(cur, v, mergeMerge)
}
}
}
}
func pruneNulls(n *lazyNode) {
sub, err := n.intoDoc()
if err == nil {
pruneDocNulls(sub)
} else {
ary, err := n.intoAry()
if err == nil {
pruneAryNulls(ary)
}
}
}
func pruneDocNulls(doc *partialDoc) *partialDoc {
for k, v := range *doc {
if v == nil {
delete(*doc, k)
} else {
pruneNulls(v)
}
}
return doc
}
func pruneAryNulls(ary *partialArray) *partialArray {
newAry := []*lazyNode{}
for _, v := range *ary {
if v != nil {
pruneNulls(v)
newAry = append(newAry, v)
}
}
*ary = newAry
return ary
}
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
// MergeMergePatches merges two merge patches together, such that
// applying this resulting merged merge patch to a document yields the same
// as merging each merge patch to the document in succession.
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
return doMergePatch(patch1Data, patch2Data, true)
}
// MergePatch merges the patchData into the docData.
func MergePatch(docData, patchData []byte) ([]byte, error) {
return doMergePatch(docData, patchData, false)
}
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
doc := &partialDoc{}
docErr := json.Unmarshal(docData, doc)
patch := &partialDoc{}
patchErr := json.Unmarshal(patchData, patch)
if _, ok := docErr.(*json.SyntaxError); ok {
return nil, errBadJSONDoc
}
if _, ok := patchErr.(*json.SyntaxError); ok {
return nil, errBadJSONPatch
}
if docErr == nil && *doc == nil {
return nil, errBadJSONDoc
}
if patchErr == nil && *patch == nil {
return nil, errBadJSONPatch
}
if docErr != nil || patchErr != nil {
// Not an error, just not a doc, so we turn straight into the patch
if patchErr == nil {
if mergeMerge {
doc = patch
} else {
doc = pruneDocNulls(patch)
}
} else {
patchAry := &partialArray{}
patchErr = json.Unmarshal(patchData, patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
pruneAryNulls(patchAry)
out, patchErr := json.Marshal(patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
return out, nil
}
} else {
mergeDocs(doc, patch, mergeMerge)
}
return json.Marshal(doc)
}
// resemblesJSONArray indicates whether the byte-slice "appears" to be
// a JSON array or not.
// False-positives are possible, as this function does not check the internal
// structure of the array. It only checks that the outer syntax is present and
// correct.
func resemblesJSONArray(input []byte) bool {
input = bytes.TrimSpace(input)
hasPrefix := bytes.HasPrefix(input, []byte("["))
hasSuffix := bytes.HasSuffix(input, []byte("]"))
return hasPrefix && hasSuffix
}
// CreateMergePatch will return a merge patch document capable of converting
// the original document(s) to the modified document(s).
// The parameters can be bytes of either two JSON Documents, or two arrays of
// JSON documents.
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalResemblesArray := resemblesJSONArray(originalJSON)
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
// Do both byte-slices seem like JSON arrays?
if originalResemblesArray && modifiedResemblesArray {
return createArrayMergePatch(originalJSON, modifiedJSON)
}
// Are both byte-slices are not arrays? Then they are likely JSON objects...
if !originalResemblesArray && !modifiedResemblesArray {
return createObjectMergePatch(originalJSON, modifiedJSON)
}
// None of the above? Then return an error because of mismatched types.
return nil, errBadMergeTypes
}
// createObjectMergePatch will return a merge-patch document capable of
// converting the original document to the modified document.
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDoc := map[string]interface{}{}
modifiedDoc := map[string]interface{}{}
err := json.Unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
}
dest, err := getDiff(originalDoc, modifiedDoc)
if err != nil {
return nil, err
}
return json.Marshal(dest)
}
// createArrayMergePatch will return an array of merge-patch documents capable
// of converting the original document to the modified document for each
// pair of JSON documents provided in the arrays.
// Arrays of mismatched sizes will result in an error.
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDocs := []json.RawMessage{}
modifiedDocs := []json.RawMessage{}
err := json.Unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
}
total := len(originalDocs)
if len(modifiedDocs) != total {
return nil, errBadJSONDoc
}
result := []json.RawMessage{}
for i := 0; i < len(originalDocs); i++ {
original := originalDocs[i]
modified := modifiedDocs[i]
patch, err := createObjectMergePatch(original, modified)
if err != nil {
return nil, err
}
result = append(result, json.RawMessage(patch))
}
return json.Marshal(result)
}
// Returns true if the array matches (must be json types).
// As is idiomatic for go, an empty array is not the same as a nil array.
func matchesArray(a, b []interface{}) bool {
if len(a) != len(b) {
return false
}
if (a == nil && b != nil) || (a != nil && b == nil) {
return false
}
for i := range a {
if !matchesValue(a[i], b[i]) {
return false
}
}
return true
}
// Returns true if the values matches (must be json types)
// The types of the values must match, otherwise it will always return false
// If two map[string]interface{} are given, all elements must match.
func matchesValue(av, bv interface{}) bool {
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
return false
}
switch at := av.(type) {
case string:
bt := bv.(string)
if bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
return true
}
case bool:
bt := bv.(bool)
if bt == at {
return true
}
case nil:
// Both nil, fine.
return true
case map[string]interface{}:
bt := bv.(map[string]interface{})
for key := range at {
if !matchesValue(at[key], bt[key]) {
return false
}
}
for key := range bt {
if !matchesValue(at[key], bt[key]) {
return false
}
}
return true
case []interface{}:
bt := bv.([]interface{})
return matchesArray(at, bt)
}
return false
}
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
into := map[string]interface{}{}
for key, bv := range b {
av, ok := a[key]
// value was added
if !ok {
into[key] = bv
continue
}
// If types have changed, replace completely
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
into[key] = bv
continue
}
// Types are the same, compare values
switch at := av.(type) {
case map[string]interface{}:
bt := bv.(map[string]interface{})
dst := make(map[string]interface{}, len(bt))
dst, err := getDiff(at, bt)
if err != nil {
return nil, err
}
if len(dst) > 0 {
into[key] = dst
}
case string, float64, bool:
if !matchesValue(av, bv) {
into[key] = bv
}
case []interface{}:
bt := bv.([]interface{})
if !matchesArray(at, bt) {
into[key] = bv
}
case nil:
switch bv.(type) {
case nil:
// Both nil, fine.
default:
into[key] = bv
}
default:
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
}
}
// Now add all deleted values as nil
for key := range a {
_, found := b[key]
if !found {
into[key] = nil
}
}
return into, nil
}

585
vendor/github.com/evanphx/json-patch/merge_test.go generated vendored Normal file
View File

@ -0,0 +1,585 @@
package jsonpatch
import (
"strings"
"testing"
)
func mergePatch(doc, patch string) string {
out, err := MergePatch([]byte(doc), []byte(patch))
if err != nil {
panic(err)
}
return string(out)
}
func TestMergePatchReplaceKey(t *testing.T) {
doc := `{ "title": "hello" }`
pat := `{ "title": "goodbye" }`
res := mergePatch(doc, pat)
if !compareJSON(pat, res) {
t.Fatalf("Key was not replaced")
}
}
func TestMergePatchIgnoresOtherValues(t *testing.T) {
doc := `{ "title": "hello", "age": 18 }`
pat := `{ "title": "goodbye" }`
res := mergePatch(doc, pat)
exp := `{ "title": "goodbye", "age": 18 }`
if !compareJSON(exp, res) {
t.Fatalf("Key was not replaced")
}
}
func TestMergePatchNilDoc(t *testing.T) {
doc := `{ "title": null }`
pat := `{ "title": {"foo": "bar"} }`
res := mergePatch(doc, pat)
exp := `{ "title": {"foo": "bar"} }`
if !compareJSON(exp, res) {
t.Fatalf("Key was not replaced")
}
}
func TestMergePatchRecursesIntoObjects(t *testing.T) {
doc := `{ "person": { "title": "hello", "age": 18 } }`
pat := `{ "person": { "title": "goodbye" } }`
res := mergePatch(doc, pat)
exp := `{ "person": { "title": "goodbye", "age": 18 } }`
if !compareJSON(exp, res) {
t.Fatalf("Key was not replaced")
}
}
type nonObjectCases struct {
doc, pat, res string
}
func TestMergePatchReplacesNonObjectsWholesale(t *testing.T) {
a1 := `[1]`
a2 := `[2]`
o1 := `{ "a": 1 }`
o2 := `{ "a": 2 }`
o3 := `{ "a": 1, "b": 1 }`
o4 := `{ "a": 2, "b": 1 }`
cases := []nonObjectCases{
{a1, a2, a2},
{o1, a2, a2},
{a1, o1, o1},
{o3, o2, o4},
}
for _, c := range cases {
act := mergePatch(c.doc, c.pat)
if !compareJSON(c.res, act) {
t.Errorf("whole object replacement failed")
}
}
}
func TestMergePatchReturnsErrorOnBadJSON(t *testing.T) {
_, err := MergePatch([]byte(`[[[[`), []byte(`1`))
if err == nil {
t.Errorf("Did not return an error for bad json: %s", err)
}
_, err = MergePatch([]byte(`1`), []byte(`[[[[`))
if err == nil {
t.Errorf("Did not return an error for bad json: %s", err)
}
}
func TestMergePatchReturnsEmptyArrayOnEmptyArray(t *testing.T) {
doc := `{ "array": ["one", "two"] }`
pat := `{ "array": [] }`
exp := `{ "array": [] }`
res, err := MergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Emtpy array did not return not return as empty array")
}
}
var rfcTests = []struct {
target string
patch string
expected string
}{
// test cases from https://tools.ietf.org/html/rfc7386#appendix-A
{target: `{"a":"b"}`, patch: `{"a":"c"}`, expected: `{"a":"c"}`},
{target: `{"a":"b"}`, patch: `{"b":"c"}`, expected: `{"a":"b","b":"c"}`},
{target: `{"a":"b"}`, patch: `{"a":null}`, expected: `{}`},
{target: `{"a":"b","b":"c"}`, patch: `{"a":null}`, expected: `{"b":"c"}`},
{target: `{"a":["b"]}`, patch: `{"a":"c"}`, expected: `{"a":"c"}`},
{target: `{"a":"c"}`, patch: `{"a":["b"]}`, expected: `{"a":["b"]}`},
{target: `{"a":{"b": "c"}}`, patch: `{"a": {"b": "d","c": null}}`, expected: `{"a":{"b":"d"}}`},
{target: `{"a":[{"b":"c"}]}`, patch: `{"a":[1]}`, expected: `{"a":[1]}`},
{target: `["a","b"]`, patch: `["c","d"]`, expected: `["c","d"]`},
{target: `{"a":"b"}`, patch: `["c"]`, expected: `["c"]`},
// {target: `{"a":"foo"}`, patch: `null`, expected: `null`},
// {target: `{"a":"foo"}`, patch: `"bar"`, expected: `"bar"`},
{target: `{"e":null}`, patch: `{"a":1}`, expected: `{"a":1,"e":null}`},
{target: `[1,2]`, patch: `{"a":"b","c":null}`, expected: `{"a":"b"}`},
{target: `{}`, patch: `{"a":{"bb":{"ccc":null}}}`, expected: `{"a":{"bb":{}}}`},
}
func TestMergePatchRFCCases(t *testing.T) {
for i, c := range rfcTests {
out := mergePatch(c.target, c.patch)
if !compareJSON(out, c.expected) {
t.Errorf("case[%d], patch '%s' did not apply properly to '%s'. expected:\n'%s'\ngot:\n'%s'", i, c.patch, c.target, c.expected, out)
}
}
}
var rfcFailTests = `
{"a":"foo"} | null
{"a":"foo"} | "bar"
`
func TestMergePatchFailRFCCases(t *testing.T) {
tests := strings.Split(rfcFailTests, "\n")
for _, c := range tests {
if strings.TrimSpace(c) == "" {
continue
}
parts := strings.SplitN(c, "|", 2)
doc := strings.TrimSpace(parts[0])
pat := strings.TrimSpace(parts[1])
out, err := MergePatch([]byte(doc), []byte(pat))
if err != errBadJSONPatch {
t.Errorf("error not returned properly: %s, %s", err, string(out))
}
}
}
func TestResembleJSONArray(t *testing.T) {
testCases := []struct {
input []byte
expected bool
}{
// Failure cases
{input: []byte(``), expected: false},
{input: []byte(`not an array`), expected: false},
{input: []byte(`{"foo": "bar"}`), expected: false},
{input: []byte(`{"fizz": ["buzz"]}`), expected: false},
{input: []byte(`[bad suffix`), expected: false},
{input: []byte(`bad prefix]`), expected: false},
{input: []byte(`][`), expected: false},
// Valid cases
{input: []byte(`[]`), expected: true},
{input: []byte(`["foo", "bar"]`), expected: true},
{input: []byte(`[["foo", "bar"]]`), expected: true},
{input: []byte(`[not valid syntax]`), expected: true},
// Valid cases with whitespace
{input: []byte(` []`), expected: true},
{input: []byte(`[] `), expected: true},
{input: []byte(` [] `), expected: true},
{input: []byte(` [ ] `), expected: true},
{input: []byte("\t[]"), expected: true},
{input: []byte("[]\n"), expected: true},
{input: []byte("\n\t\r[]"), expected: true},
}
for _, test := range testCases {
result := resemblesJSONArray(test.input)
if result != test.expected {
t.Errorf(
`expected "%t" but received "%t" for case: "%s"`,
test.expected,
result,
string(test.input),
)
}
}
}
func TestCreateMergePatchReplaceKey(t *testing.T) {
doc := `{ "title": "hello", "nested": {"one": 1, "two": 2} }`
pat := `{ "title": "goodbye", "nested": {"one": 2, "two": 2} }`
exp := `{ "title": "goodbye", "nested": {"one": 2} }`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Key was not replaced")
}
}
func TestCreateMergePatchGetArray(t *testing.T) {
doc := `{ "title": "hello", "array": ["one", "two"], "notmatch": [1, 2, 3] }`
pat := `{ "title": "hello", "array": ["one", "two", "three"], "notmatch": [1, 2, 3] }`
exp := `{ "array": ["one", "two", "three"] }`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Array was not added")
}
}
func TestCreateMergePatchGetObjArray(t *testing.T) {
doc := `{ "title": "hello", "array": [{"banana": true}, {"evil": false}], "notmatch": [{"one":1}, {"two":2}, {"three":3}] }`
pat := `{ "title": "hello", "array": [{"banana": false}, {"evil": true}], "notmatch": [{"one":1}, {"two":2}, {"three":3}] }`
exp := `{ "array": [{"banana": false}, {"evil": true}] }`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Object array was not added")
}
}
func TestCreateMergePatchDeleteKey(t *testing.T) {
doc := `{ "title": "hello", "nested": {"one": 1, "two": 2} }`
pat := `{ "title": "hello", "nested": {"one": 1} }`
exp := `{"nested":{"two":null}}`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
// We cannot use "compareJSON", since Equals does not report a difference if the value is null
if exp != string(res) {
t.Fatalf("Key was not removed")
}
}
func TestCreateMergePatchEmptyArray(t *testing.T) {
doc := `{ "array": null }`
pat := `{ "array": [] }`
exp := `{"array":[]}`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
// We cannot use "compareJSON", since Equals does not report a difference if the value is null
if exp != string(res) {
t.Fatalf("Key was not removed")
}
}
func TestCreateMergePatchNil(t *testing.T) {
doc := `{ "title": "hello", "nested": {"one": 1, "two": [{"one":null}, {"two":null}, {"three":null}]} }`
pat := doc
exp := `{}`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Object array was not added")
}
}
func TestCreateMergePatchObjArray(t *testing.T) {
doc := `{ "array": [ {"a": {"b": 2}}, {"a": {"b": 3}} ]}`
exp := `{}`
res, err := CreateMergePatch([]byte(doc), []byte(doc))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
// We cannot use "compareJSON", since Equals does not report a difference if the value is null
if exp != string(res) {
t.Fatalf("Array was not empty, was " + string(res))
}
}
func TestCreateMergePatchSameOuterArray(t *testing.T) {
doc := `[{"foo": "bar"}]`
pat := doc
exp := `[{}]`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Outer array was not unmodified")
}
}
func TestCreateMergePatchModifiedOuterArray(t *testing.T) {
doc := `[{"name": "John"}, {"name": "Will"}]`
pat := `[{"name": "Jane"}, {"name": "Will"}]`
exp := `[{"name": "Jane"}, {}]`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Expected %s but received %s", exp, res)
}
}
func TestCreateMergePatchMismatchedOuterArray(t *testing.T) {
doc := `[{"name": "John"}, {"name": "Will"}]`
pat := `[{"name": "Jane"}]`
_, err := CreateMergePatch([]byte(doc), []byte(pat))
if err == nil {
t.Errorf("Expected error due to array length differences but received none")
}
}
func TestCreateMergePatchMismatchedOuterTypes(t *testing.T) {
doc := `[{"name": "John"}]`
pat := `{"name": "Jane"}`
_, err := CreateMergePatch([]byte(doc), []byte(pat))
if err == nil {
t.Errorf("Expected error due to mismatched types but received none")
}
}
func TestCreateMergePatchNoDifferences(t *testing.T) {
doc := `{ "title": "hello", "nested": {"one": 1, "two": 2} }`
pat := doc
exp := `{}`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Fatalf("Key was not replaced")
}
}
func TestCreateMergePatchComplexMatch(t *testing.T) {
doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }`
empty := `{}`
res, err := CreateMergePatch([]byte(doc), []byte(doc))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
// We cannot use "compareJSON", since Equals does not report a difference if the value is null
if empty != string(res) {
t.Fatalf("Did not get empty result, was:%s", string(res))
}
}
func TestCreateMergePatchComplexAddAll(t *testing.T) {
doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }`
empty := `{}`
res, err := CreateMergePatch([]byte(empty), []byte(doc))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(doc, string(res)) {
t.Fatalf("Did not get everything as, it was:\n%s", string(res))
}
}
func TestCreateMergePatchComplexRemoveAll(t *testing.T) {
doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }`
exp := `{"a":null,"f":null,"hello":null,"i":null,"n":null,"nested":null,"pi":null,"t":null}`
empty := `{}`
res, err := CreateMergePatch([]byte(doc), []byte(empty))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if exp != string(res) {
t.Fatalf("Did not get result, was:%s", string(res))
}
// FIXME: Crashes if using compareJSON like this:
/*
if !compareJSON(doc, string(res)) {
t.Fatalf("Did not get everything as, it was:\n%s", string(res))
}
*/
}
func TestCreateMergePatchObjectWithInnerArray(t *testing.T) {
stateString := `{
"OuterArray": [
{
"InnerArray": [
{
"StringAttr": "abc123"
}
],
"StringAttr": "def456"
}
]
}`
patch, err := CreateMergePatch([]byte(stateString), []byte(stateString))
if err != nil {
t.Fatal(err)
}
if string(patch) != "{}" {
t.Fatalf("Patch should have been {} but was: %v", string(patch))
}
}
func TestCreateMergePatchReplaceKeyNotEscape(t *testing.T) {
doc := `{ "title": "hello", "nested": {"title/escaped": 1, "two": 2} }`
pat := `{ "title": "goodbye", "nested": {"title/escaped": 2, "two": 2} }`
exp := `{ "title": "goodbye", "nested": {"title/escaped": 2} }`
res, err := CreateMergePatch([]byte(doc), []byte(pat))
if err != nil {
t.Errorf("Unexpected error: %s, %s", err, string(res))
}
if !compareJSON(exp, string(res)) {
t.Log(string(res))
t.Fatalf("Key was not replaced")
}
}
func TestMergePatchReplaceKeyNotEscaping(t *testing.T) {
doc := `{ "obj": { "title/escaped": "hello" } }`
pat := `{ "obj": { "title/escaped": "goodbye" } }`
exp := `{ "obj": { "title/escaped": "goodbye" } }`
res := mergePatch(doc, pat)
if !compareJSON(exp, res) {
t.Fatalf("Key was not replaced")
}
}
func TestMergeMergePatches(t *testing.T) {
cases := []struct {
demonstrates string
p1 string
p2 string
exp string
}{
{
demonstrates: "simple patches are merged normally",
p1: `{"add1": 1}`,
p2: `{"add2": 2}`,
exp: `{"add1": 1, "add2": 2}`,
},
{
demonstrates: "nulls are kept",
p1: `{"del1": null}`,
p2: `{"del2": null}`,
exp: `{"del1": null, "del2": null}`,
},
{
demonstrates: "a key added then deleted is kept deleted",
p1: `{"add_then_delete": "atd"}`,
p2: `{"add_then_delete": null}`,
exp: `{"add_then_delete": null}`,
},
{
demonstrates: "a key deleted then added is kept added",
p1: `{"delete_then_add": null}`,
p2: `{"delete_then_add": "dta"}`,
exp: `{"delete_then_add": "dta"}`,
},
{
demonstrates: "object overrides array",
p1: `[]`,
p2: `{"del": null, "add": "a"}`,
exp: `{"del": null, "add": "a"}`,
},
{
demonstrates: "array overrides object",
p1: `{"del": null, "add": "a"}`,
p2: `[]`,
exp: `[]`,
},
}
for _, c := range cases {
out, err := MergeMergePatches([]byte(c.p1), []byte(c.p2))
if err != nil {
panic(err)
}
if !compareJSON(c.exp, string(out)) {
t.Logf("Error while trying to demonstrate: %v", c.demonstrates)
t.Logf("Got %v", string(out))
t.Logf("Expected %v", c.exp)
t.Fatalf("Merged merge patch is incorrect")
}
}
}

696
vendor/github.com/evanphx/json-patch/patch.go generated vendored Normal file
View File

@ -0,0 +1,696 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
)
const (
eRaw = iota
eDoc
eAry
)
var (
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool = true
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
)
type lazyNode struct {
raw *json.RawMessage
doc partialDoc
ary partialArray
which int
}
type operation map[string]*json.RawMessage
// Patch is an ordered collection of operations.
type Patch []operation
type partialDoc map[string]*lazyNode
type partialArray []*lazyNode
type container interface {
get(key string) (*lazyNode, error)
set(key string, val *lazyNode) error
add(key string, val *lazyNode) error
remove(key string) error
}
func newLazyNode(raw *json.RawMessage) *lazyNode {
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
}
func (n *lazyNode) MarshalJSON() ([]byte, error) {
switch n.which {
case eRaw:
return json.Marshal(n.raw)
case eDoc:
return json.Marshal(n.doc)
case eAry:
return json.Marshal(n.ary)
default:
return nil, fmt.Errorf("Unknown type")
}
}
func (n *lazyNode) UnmarshalJSON(data []byte) error {
dest := make(json.RawMessage, len(data))
copy(dest, data)
n.raw = &dest
n.which = eRaw
return nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
if err != nil {
return nil, 0, err
}
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return &n.doc, nil
}
if n.raw == nil {
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
}
err := json.Unmarshal(*n.raw, &n.doc)
if err != nil {
return nil, err
}
n.which = eDoc
return &n.doc, nil
}
func (n *lazyNode) intoAry() (*partialArray, error) {
if n.which == eAry {
return &n.ary, nil
}
if n.raw == nil {
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
}
err := json.Unmarshal(*n.raw, &n.ary)
if err != nil {
return nil, err
}
n.which = eAry
return &n.ary, nil
}
func (n *lazyNode) compact() []byte {
buf := &bytes.Buffer{}
if n.raw == nil {
return nil
}
err := json.Compact(buf, *n.raw)
if err != nil {
return *n.raw
}
return buf.Bytes()
}
func (n *lazyNode) tryDoc() bool {
if n.raw == nil {
return false
}
err := json.Unmarshal(*n.raw, &n.doc)
if err != nil {
return false
}
n.which = eDoc
return true
}
func (n *lazyNode) tryAry() bool {
if n.raw == nil {
return false
}
err := json.Unmarshal(*n.raw, &n.ary)
if err != nil {
return false
}
n.which = eAry
return true
}
func (n *lazyNode) equal(o *lazyNode) bool {
if n.which == eRaw {
if !n.tryDoc() && !n.tryAry() {
if o.which != eRaw {
return false
}
return bytes.Equal(n.compact(), o.compact())
}
}
if n.which == eDoc {
if o.which == eRaw {
if !o.tryDoc() {
return false
}
}
if o.which != eDoc {
return false
}
for k, v := range n.doc {
ov, ok := o.doc[k]
if !ok {
return false
}
if v == nil && ov == nil {
continue
}
if !v.equal(ov) {
return false
}
}
return true
}
if o.which != eAry && !o.tryAry() {
return false
}
if len(n.ary) != len(o.ary) {
return false
}
for idx, val := range n.ary {
if !val.equal(o.ary[idx]) {
return false
}
}
return true
}
func (o operation) kind() string {
if obj, ok := o["op"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) path() string {
if obj, ok := o["path"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) from() string {
if obj, ok := o["from"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) value() *lazyNode {
if obj, ok := o["value"]; ok {
return newLazyNode(obj)
}
return nil
}
func isArray(buf []byte) bool {
Loop:
for _, c := range buf {
switch c {
case ' ':
case '\n':
case '\t':
continue
case '[':
return true
default:
break Loop
}
}
return false
}
func findObject(pd *container, path string) (container, string) {
doc := *pd
split := strings.Split(path, "/")
if len(split) < 2 {
return nil, ""
}
parts := split[1 : len(split)-1]
key := split[len(split)-1]
var err error
for _, part := range parts {
next, ok := doc.get(decodePatchKey(part))
if next == nil || ok != nil {
return nil, ""
}
if isArray(*next.raw) {
doc, err = next.intoAry()
if err != nil {
return nil, ""
}
} else {
doc, err = next.intoDoc()
if err != nil {
return nil, ""
}
}
}
return doc, decodePatchKey(key)
}
func (d *partialDoc) set(key string, val *lazyNode) error {
(*d)[key] = val
return nil
}
func (d *partialDoc) add(key string, val *lazyNode) error {
(*d)[key] = val
return nil
}
func (d *partialDoc) get(key string) (*lazyNode, error) {
return (*d)[key], nil
}
func (d *partialDoc) remove(key string) error {
_, ok := (*d)[key]
if !ok {
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
}
delete(*d, key)
return nil
}
// set should only be used to implement the "replace" operation, so "key" must
// be an already existing index in "d".
func (d *partialArray) set(key string, val *lazyNode) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
(*d)[idx] = val
return nil
}
func (d *partialArray) add(key string, val *lazyNode) error {
if key == "-" {
*d = append(*d, val)
return nil
}
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
sz := len(*d) + 1
ary := make([]*lazyNode, sz)
cur := *d
if idx >= len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if SupportNegativeIndices {
if idx < -len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(ary)
}
}
copy(ary[0:idx], cur[0:idx])
ary[idx] = val
copy(ary[idx+1:], cur[idx:])
*d = ary
return nil
}
func (d *partialArray) get(key string) (*lazyNode, error) {
idx, err := strconv.Atoi(key)
if err != nil {
return nil, err
}
if idx >= len(*d) {
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
}
return (*d)[idx], nil
}
func (d *partialArray) remove(key string) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
cur := *d
if idx >= len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if SupportNegativeIndices {
if idx < -len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(cur)
}
}
ary := make([]*lazyNode, len(cur)-1)
copy(ary[0:idx], cur[0:idx])
copy(ary[idx:], cur[idx+1:])
*d = ary
return nil
}
func (p Patch) add(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
}
return con.add(key, op.value())
}
func (p Patch) remove(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
}
return con.remove(key)
}
func (p Patch) replace(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
}
_, ok := con.get(key)
if ok != nil {
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
}
return con.set(key, op.value())
}
func (p Patch) move(doc *container, op operation) error {
from := op.from()
con, key := findObject(doc, from)
if con == nil {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
if err != nil {
return err
}
err = con.remove(key)
if err != nil {
return err
}
path := op.path()
con, key = findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
}
return con.add(key, val)
}
func (p Patch) test(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
}
val, err := con.get(key)
if err != nil {
return err
}
if val == nil {
if op.value().raw == nil {
return nil
}
return fmt.Errorf("Testing value %s failed", path)
} else if op.value() == nil {
return fmt.Errorf("Testing value %s failed", path)
}
if val.equal(op.value()) {
return nil
}
return fmt.Errorf("Testing value %s failed", path)
}
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
from := op.from()
con, key := findObject(doc, from)
if con == nil {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
if err != nil {
return err
}
path := op.path()
con, key = findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
}
valCopy, sz, err := deepCopy(val)
if err != nil {
return err
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
}
return con.add(key, valCopy)
}
// Equal indicates if 2 JSON documents have the same structural equality.
func Equal(a, b []byte) bool {
ra := make(json.RawMessage, len(a))
copy(ra, a)
la := newLazyNode(&ra)
rb := make(json.RawMessage, len(b))
copy(rb, b)
lb := newLazyNode(&rb)
return la.equal(lb)
}
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
func DecodePatch(buf []byte) (Patch, error) {
var p Patch
err := json.Unmarshal(buf, &p)
if err != nil {
return nil, err
}
return p, nil
}
// Apply mutates a JSON document according to the patch, and returns the new
// document.
func (p Patch) Apply(doc []byte) ([]byte, error) {
return p.ApplyIndent(doc, "")
}
// ApplyIndent mutates a JSON document according to the patch, and returns the new
// document indented.
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
var pd container
if doc[0] == '[' {
pd = &partialArray{}
} else {
pd = &partialDoc{}
}
err := json.Unmarshal(doc, pd)
if err != nil {
return nil, err
}
err = nil
var accumulatedCopySize int64
for _, op := range p {
switch op.kind() {
case "add":
err = p.add(&pd, op)
case "remove":
err = p.remove(&pd, op)
case "replace":
err = p.replace(&pd, op)
case "move":
err = p.move(&pd, op)
case "test":
err = p.test(&pd, op)
case "copy":
err = p.copy(&pd, op, &accumulatedCopySize)
default:
err = fmt.Errorf("Unexpected kind: %s", op.kind())
}
if err != nil {
return nil, err
}
}
if indent != "" {
return json.MarshalIndent(pd, "", indent)
}
return json.Marshal(pd)
}
// From http://tools.ietf.org/html/rfc6901#section-4 :
//
// Evaluation of each reference token begins by decoding any escaped
// character sequence. This is performed by first transforming any
// occurrence of the sequence '~1' to '/', and then transforming any
// occurrence of the sequence '~0' to '~'.
var (
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
)
func decodePatchKey(k string) string {
return rfc6901Decoder.Replace(k)
}

479
vendor/github.com/evanphx/json-patch/patch_test.go generated vendored Normal file
View File

@ -0,0 +1,479 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"testing"
)
func reformatJSON(j string) string {
buf := new(bytes.Buffer)
json.Indent(buf, []byte(j), "", " ")
return buf.String()
}
func compareJSON(a, b string) bool {
// return Equal([]byte(a), []byte(b))
var objA, objB map[string]interface{}
json.Unmarshal([]byte(a), &objA)
json.Unmarshal([]byte(b), &objB)
// fmt.Printf("Comparing %#v\nagainst %#v\n", objA, objB)
return reflect.DeepEqual(objA, objB)
}
func applyPatch(doc, patch string) (string, error) {
obj, err := DecodePatch([]byte(patch))
if err != nil {
panic(err)
}
out, err := obj.Apply([]byte(doc))
if err != nil {
return "", err
}
return string(out), nil
}
type Case struct {
doc, patch, result string
}
func repeatedA(r int) string {
var s string
for i := 0; i < r; i++ {
s += "A"
}
return s
}
var Cases = []Case{
{
`{ "foo": "bar"}`,
`[
{ "op": "add", "path": "/baz", "value": "qux" }
]`,
`{
"baz": "qux",
"foo": "bar"
}`,
},
{
`{ "foo": [ "bar", "baz" ] }`,
`[
{ "op": "add", "path": "/foo/1", "value": "qux" }
]`,
`{ "foo": [ "bar", "qux", "baz" ] }`,
},
{
`{ "foo": [ "bar", "baz" ] }`,
`[
{ "op": "add", "path": "/foo/-1", "value": "qux" }
]`,
`{ "foo": [ "bar", "baz", "qux" ] }`,
},
{
`{ "baz": "qux", "foo": "bar" }`,
`[ { "op": "remove", "path": "/baz" } ]`,
`{ "foo": "bar" }`,
},
{
`{ "foo": [ "bar", "qux", "baz" ] }`,
`[ { "op": "remove", "path": "/foo/1" } ]`,
`{ "foo": [ "bar", "baz" ] }`,
},
{
`{ "baz": "qux", "foo": "bar" }`,
`[ { "op": "replace", "path": "/baz", "value": "boo" } ]`,
`{ "baz": "boo", "foo": "bar" }`,
},
{
`{
"foo": {
"bar": "baz",
"waldo": "fred"
},
"qux": {
"corge": "grault"
}
}`,
`[ { "op": "move", "from": "/foo/waldo", "path": "/qux/thud" } ]`,
`{
"foo": {
"bar": "baz"
},
"qux": {
"corge": "grault",
"thud": "fred"
}
}`,
},
{
`{ "foo": [ "all", "grass", "cows", "eat" ] }`,
`[ { "op": "move", "from": "/foo/1", "path": "/foo/3" } ]`,
`{ "foo": [ "all", "cows", "eat", "grass" ] }`,
},
{
`{ "foo": [ "all", "grass", "cows", "eat" ] }`,
`[ { "op": "move", "from": "/foo/1", "path": "/foo/2" } ]`,
`{ "foo": [ "all", "cows", "grass", "eat" ] }`,
},
{
`{ "foo": "bar" }`,
`[ { "op": "add", "path": "/child", "value": { "grandchild": { } } } ]`,
`{ "foo": "bar", "child": { "grandchild": { } } }`,
},
{
`{ "foo": ["bar"] }`,
`[ { "op": "add", "path": "/foo/-", "value": ["abc", "def"] } ]`,
`{ "foo": ["bar", ["abc", "def"]] }`,
},
{
`{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`,
`[ { "op": "remove", "path": "/qux/bar" } ]`,
`{ "foo": "bar", "qux": { "baz": 1 } }`,
},
{
`{ "foo": "bar" }`,
`[ { "op": "add", "path": "/baz", "value": null } ]`,
`{ "baz": null, "foo": "bar" }`,
},
{
`{ "foo": ["bar"]}`,
`[ { "op": "replace", "path": "/foo/0", "value": "baz"}]`,
`{ "foo": ["baz"]}`,
},
{
`{ "foo": ["bar","baz"]}`,
`[ { "op": "replace", "path": "/foo/0", "value": "bum"}]`,
`{ "foo": ["bum","baz"]}`,
},
{
`{ "foo": ["bar","qux","baz"]}`,
`[ { "op": "replace", "path": "/foo/1", "value": "bum"}]`,
`{ "foo": ["bar", "bum","baz"]}`,
},
{
`[ {"foo": ["bar","qux","baz"]}]`,
`[ { "op": "replace", "path": "/0/foo/0", "value": "bum"}]`,
`[ {"foo": ["bum","qux","baz"]}]`,
},
{
`[ {"foo": ["bar","qux","baz"], "bar": ["qux","baz"]}]`,
`[ { "op": "copy", "from": "/0/foo/0", "path": "/0/bar/0"}]`,
`[ {"foo": ["bar","qux","baz"], "bar": ["bar", "baz"]}]`,
},
{
`[ {"foo": ["bar","qux","baz"], "bar": ["qux","baz"]}]`,
`[ { "op": "copy", "from": "/0/foo/0", "path": "/0/bar"}]`,
`[ {"foo": ["bar","qux","baz"], "bar": ["bar", "qux", "baz"]}]`,
},
{
`[ { "foo": {"bar": ["qux","baz"]}, "baz": {"qux": "bum"}}]`,
`[ { "op": "copy", "from": "/0/foo/bar", "path": "/0/baz/bar"}]`,
`[ { "baz": {"bar": ["qux","baz"], "qux":"bum"}, "foo": {"bar": ["qux","baz"]}}]`,
},
{
`{ "foo": ["bar"]}`,
`[{"op": "copy", "path": "/foo/0", "from": "/foo"}]`,
`{ "foo": [["bar"], "bar"]}`,
},
{
`{ "foo": ["bar","qux","baz"]}`,
`[ { "op": "remove", "path": "/foo/-2"}]`,
`{ "foo": ["bar", "baz"]}`,
},
{
`{ "foo": []}`,
`[ { "op": "add", "path": "/foo/-1", "value": "qux"}]`,
`{ "foo": ["qux"]}`,
},
{
`{ "bar": [{"baz": null}]}`,
`[ { "op": "replace", "path": "/bar/0/baz", "value": 1 } ]`,
`{ "bar": [{"baz": 1}]}`,
},
{
`{ "bar": [{"baz": 1}]}`,
`[ { "op": "replace", "path": "/bar/0/baz", "value": null } ]`,
`{ "bar": [{"baz": null}]}`,
},
{
`{ "bar": [null]}`,
`[ { "op": "replace", "path": "/bar/0", "value": 1 } ]`,
`{ "bar": [1]}`,
},
{
`{ "bar": [1]}`,
`[ { "op": "replace", "path": "/bar/0", "value": null } ]`,
`{ "bar": [null]}`,
},
{
fmt.Sprintf(`{ "foo": ["A", %q] }`, repeatedA(48)),
// The wrapping quotes around 'A's are included in the copy
// size, so each copy operation increases the size by 50 bytes.
`[ { "op": "copy", "path": "/foo/-", "from": "/foo/1" },
{ "op": "copy", "path": "/foo/-", "from": "/foo/1" }]`,
fmt.Sprintf(`{ "foo": ["A", %q, %q, %q] }`, repeatedA(48), repeatedA(48), repeatedA(48)),
},
}
type BadCase struct {
doc, patch string
}
var MutationTestCases = []BadCase{
{
`{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`,
`[ { "op": "remove", "path": "/qux/bar" } ]`,
},
{
`{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`,
`[ { "op": "replace", "path": "/qux/baz", "value": null } ]`,
},
}
var BadCases = []BadCase{
{
`{ "foo": "bar" }`,
`[ { "op": "add", "path": "/baz/bat", "value": "qux" } ]`,
},
{
`{ "a": { "b": { "d": 1 } } }`,
`[ { "op": "remove", "path": "/a/b/c" } ]`,
},
{
`{ "a": { "b": { "d": 1 } } }`,
`[ { "op": "move", "from": "/a/b/c", "path": "/a/b/e" } ]`,
},
{
`{ "a": { "b": [1] } }`,
`[ { "op": "remove", "path": "/a/b/1" } ]`,
},
{
`{ "a": { "b": [1] } }`,
`[ { "op": "move", "from": "/a/b/1", "path": "/a/b/2" } ]`,
},
{
`{ "foo": "bar" }`,
`[ { "op": "add", "pathz": "/baz", "value": "qux" } ]`,
},
{
`{ "foo": "bar" }`,
`[ { "op": "add", "path": "", "value": "qux" } ]`,
},
{
`{ "foo": ["bar","baz"]}`,
`[ { "op": "replace", "path": "/foo/2", "value": "bum"}]`,
},
{
`{ "foo": ["bar","baz"]}`,
`[ { "op": "add", "path": "/foo/-4", "value": "bum"}]`,
},
{
`{ "name":{ "foo": "bat", "qux": "bum"}}`,
`[ { "op": "replace", "path": "/foo/bar", "value":"baz"}]`,
},
{
`{ "foo": ["bar"]}`,
`[ {"op": "add", "path": "/foo/2", "value": "bum"}]`,
},
{
`{ "foo": []}`,
`[ {"op": "remove", "path": "/foo/-"}]`,
},
{
`{ "foo": []}`,
`[ {"op": "remove", "path": "/foo/-1"}]`,
},
{
`{ "foo": ["bar"]}`,
`[ {"op": "remove", "path": "/foo/-2"}]`,
},
{
`{}`,
`[ {"op":null,"path":""} ]`,
},
{
`{}`,
`[ {"op":"add","path":null} ]`,
},
{
`{}`,
`[ { "op": "copy", "from": null }]`,
},
{
`{ "foo": ["bar"]}`,
`[{"op": "copy", "path": "/foo/6666666666", "from": "/"}]`,
},
// Can't copy into an index greater than the size of the array
{
`{ "foo": ["bar"]}`,
`[{"op": "copy", "path": "/foo/2", "from": "/foo/0"}]`,
},
// Accumulated copy size cannot exceed AccumulatedCopySizeLimit.
{
fmt.Sprintf(`{ "foo": ["A", %q] }`, repeatedA(49)),
// The wrapping quotes around 'A's are included in the copy
// size, so each copy operation increases the size by 51 bytes.
`[ { "op": "copy", "path": "/foo/-", "from": "/foo/1" },
{ "op": "copy", "path": "/foo/-", "from": "/foo/1" }]`,
},
// Can't move into an index greater than or equal to the size of the array
{
`{ "foo": [ "all", "grass", "cows", "eat" ] }`,
`[ { "op": "move", "from": "/foo/1", "path": "/foo/4" } ]`,
},
}
// This is not thread safe, so we cannot run patch tests in parallel.
func configureGlobals(accumulatedCopySizeLimit int64) func() {
oldAccumulatedCopySizeLimit := AccumulatedCopySizeLimit
AccumulatedCopySizeLimit = accumulatedCopySizeLimit
return func() {
AccumulatedCopySizeLimit = oldAccumulatedCopySizeLimit
}
}
func TestAllCases(t *testing.T) {
defer configureGlobals(int64(100))()
for _, c := range Cases {
out, err := applyPatch(c.doc, c.patch)
if err != nil {
t.Errorf("Unable to apply patch: %s", err)
}
if !compareJSON(out, c.result) {
t.Errorf("Patch did not apply. Expected:\n%s\n\nActual:\n%s",
reformatJSON(c.result), reformatJSON(out))
}
}
for _, c := range MutationTestCases {
out, err := applyPatch(c.doc, c.patch)
if err != nil {
t.Errorf("Unable to apply patch: %s", err)
}
if compareJSON(out, c.doc) {
t.Errorf("Patch did not apply. Original:\n%s\n\nPatched:\n%s",
reformatJSON(c.doc), reformatJSON(out))
}
}
for _, c := range BadCases {
_, err := applyPatch(c.doc, c.patch)
if err == nil {
t.Errorf("Patch %q should have failed to apply but it did not", c.patch)
}
}
}
type TestCase struct {
doc, patch string
result bool
failedPath string
}
var TestCases = []TestCase{
{
`{
"baz": "qux",
"foo": [ "a", 2, "c" ]
}`,
`[
{ "op": "test", "path": "/baz", "value": "qux" },
{ "op": "test", "path": "/foo/1", "value": 2 }
]`,
true,
"",
},
{
`{ "baz": "qux" }`,
`[ { "op": "test", "path": "/baz", "value": "bar" } ]`,
false,
"/baz",
},
{
`{
"baz": "qux",
"foo": ["a", 2, "c"]
}`,
`[
{ "op": "test", "path": "/baz", "value": "qux" },
{ "op": "test", "path": "/foo/1", "value": "c" }
]`,
false,
"/foo/1",
},
{
`{ "baz": "qux" }`,
`[ { "op": "test", "path": "/foo", "value": 42 } ]`,
false,
"/foo",
},
{
`{ "baz": "qux" }`,
`[ { "op": "test", "path": "/foo", "value": null } ]`,
true,
"",
},
{
`{ "foo": null }`,
`[ { "op": "test", "path": "/foo", "value": null } ]`,
true,
"",
},
{
`{ "foo": {} }`,
`[ { "op": "test", "path": "/foo", "value": null } ]`,
false,
"/foo",
},
{
`{ "foo": [] }`,
`[ { "op": "test", "path": "/foo", "value": null } ]`,
false,
"/foo",
},
{
`{ "baz/foo": "qux" }`,
`[ { "op": "test", "path": "/baz~1foo", "value": "qux"} ]`,
true,
"",
},
{
`{ "foo": [] }`,
`[ { "op": "test", "path": "/foo"} ]`,
false,
"/foo",
},
}
func TestAllTest(t *testing.T) {
for _, c := range TestCases {
_, err := applyPatch(c.doc, c.patch)
if c.result && err != nil {
t.Errorf("Testing failed when it should have passed: %s", err)
} else if !c.result && err == nil {
t.Errorf("Testing passed when it should have faild: %s", err)
} else if !c.result {
expected := fmt.Sprintf("Testing value %s failed", c.failedPath)
if err.Error() != expected {
t.Errorf("Testing failed as expected but invalid message: expected [%s], got [%s]", expected, err)
}
}
}
}

View File

@ -1,7 +1,8 @@
language: go
go:
- 1.3
- 1.4
- "1.3"
- "1.4"
- "1.10"
script:
- go test
- go build

View File

@ -4,13 +4,13 @@
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
@ -44,6 +44,8 @@ import "github.com/ghodss/yaml"
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
@ -51,8 +53,8 @@ import (
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"name"`
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
@ -65,13 +67,13 @@ func main() {
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err := yaml.Unmarshal(y, &p2)
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
@ -86,11 +88,14 @@ func main() {
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)

View File

@ -45,7 +45,11 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strconv"
@ -15,26 +16,30 @@ import (
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: ", err)
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
}
return y, nil
}
// Converts YAML to JSON then uses JSON to unmarshal into an object.
func Unmarshal(y []byte, o interface{}) error {
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
// optionally configuring the behavior of the JSON unmarshal.
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo)
j, err := yamlToJSON(y, &vo, yaml.Unmarshal)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = json.Unmarshal(j, o)
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
@ -42,13 +47,28 @@ func Unmarshal(y []byte, o interface{}) error {
return nil
}
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
d := json.NewDecoder(r)
for _, opt := range opts {
d = opt(d)
}
if err := d.Decode(&o); err != nil {
return fmt.Errorf("while decoding JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshling to interface{}, it just picks float64
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)
@ -60,8 +80,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
return yaml.Marshal(jsonObj)
}
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
// this method should be a no-op.
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
@ -70,14 +90,22 @@ func JSONToYAML(j []byte) ([]byte, error) {
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
//
// For strict decoding of YAML, use YAMLToJSONStrict.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
return yamlToJSON(y, nil, yaml.Unmarshal)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yaml.Unmarshal(y, &yamlObj)
err := yamlUnmarshal(y, &yamlObj)
if err != nil {
return nil, err
}

14
vendor/github.com/ghodss/yaml/yaml_go110.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// This file contains changes that are only compatible with go 1.10 and onwards.
// +build go1.10
package yaml
import "encoding/json"
// DisallowUnknownFields configures the JSON decoder to error out if unknown
// fields come along, instead of dropping them by default.
func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
d.DisallowUnknownFields()
return d
}

46
vendor/github.com/ghodss/yaml/yaml_go110_test.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// +build go1.10
package yaml
import (
"fmt"
"testing"
)
func TestUnmarshalWithTags(t *testing.T) {
type WithTaggedField struct {
Field string `json:"field"`
}
t.Run("Known tagged field", func(t *testing.T) {
y := []byte(`field: "hello"`)
v := WithTaggedField{}
if err := Unmarshal(y, &v, DisallowUnknownFields); err != nil {
t.Errorf("unexpected error: %v", err)
}
if v.Field != "hello" {
t.Errorf("v.Field=%v, want 'hello'", v.Field)
}
})
t.Run("With unknown tagged field", func(t *testing.T) {
y := []byte(`unknown: "hello"`)
v := WithTaggedField{}
err := Unmarshal(y, &v, DisallowUnknownFields)
if err == nil {
t.Errorf("want error because of unknown field, got <nil>: v=%#v", v)
}
})
}
func ExampleUnknown() {
type WithTaggedField struct {
Field string `json:"field"`
}
y := []byte(`unknown: "hello"`)
v := WithTaggedField{}
fmt.Printf("%v\n", Unmarshal(y, &v, DisallowUnknownFields))
// Ouptut:
// unmarshaling JSON: while decoding JSON: json: unknown field "unknown"
}

View File

@ -88,10 +88,26 @@ func TestUnmarshal(t *testing.T) {
s4 := UnmarshalStringMap{}
e4 := UnmarshalStringMap{map[string]string{"b": "1"}}
unmarshal(t, y, &s4, &e4)
y = []byte(`
a:
name: TestA
b:
name: TestB
`)
type NamedThing struct {
Name string `json:"name"`
}
s5 := map[string]*NamedThing{}
e5 := map[string]*NamedThing{
"a": &NamedThing{Name: "TestA"},
"b": &NamedThing{Name: "TestB"},
}
unmarshal(t, y, &s5, &e5)
}
func unmarshal(t *testing.T, y []byte, s, e interface{}) {
err := Unmarshal(y, s)
func unmarshal(t *testing.T, y []byte, s, e interface{}, opts ...JSONOpt) {
err := Unmarshal(y, s, opts...)
if err != nil {
t.Errorf("error unmarshaling YAML: %v", err)
}
@ -269,3 +285,16 @@ func runCases(t *testing.T, runType RunType, cases []Case) {
func strPtr(s string) *string {
return &s
}
func TestYAMLToJSONStrict(t *testing.T) {
const data = `
foo: bar
foo: baz
`
if _, err := YAMLToJSON([]byte(data)); err != nil {
t.Error("expected YAMLtoJSON to pass on duplicate field names")
}
if _, err := YAMLToJSONStrict([]byte(data)); err == nil {
t.Error("expected YAMLtoJSONStrict to fail on duplicate field names")
}
}

View File

@ -1 +0,0 @@
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.pDqezepze0YqRx4u6M8GFaWtnVR-utTWZic-GX-RvMATAoYpG4H2sc9tlnGNCxa44dbRY0vY10qfBU7Sno8vkp21fsK42ofGLfen_suum_0ilm0sFS0X-kAwk7TIq5L5lPPKiChPMUiGp5oJW-g5MqMFX1jNiI-4fP-vSM3B3-eyZtJD_O517TgfIRLnblCzqwIkyRmAfPNopi-Fe8Y31TmO2Vd0nFc1Aqro_VaJSACzEVxOHTNpjETcMjlYzwgMXLeiAfLV-5hM0f6DXgHMlLSuMkB_Ndnw25dkB7hreGk4x0tHQ3X9mUfTgLq1hIDoyeeKDIM83Tqw4LBRph20BQ.qd_pNuyi23B0PlWz.JtpO7kqOm0SWOGzWDalkWheHuNd-eDpVbqI9WPAEFDOIBvz7TbsYMBlIYVWEGWbat4mkx_ejxnMn1L1l996NJnyP7eY-QE82cfPJbjx94d0Ob70KZ4DCm_UxcY2t-OKFiPJqxW7MA5jKyDuGD16bdxpjLEoe_cMSEr8FNu-MVG6wcchPcyYyRkqTQSl4mb09KikkAzHjwjo-DcO0f8ps4Uzsoc0aqAAWdE-ocG0YqierLoemjusYMiLH-eLF6MvaLRvHSte-cLzPuYCeZURnBDgxu3i3UApgddnX7g1c7tdGGBGvgCl-tEEDW58Vxgdjksim2S7y3lfoJ8FFzSWeRH2y7Kq04hgew3b2J_RiDB9ejzIopzG8ZGjJa3EO1-i9ORTl12nXK1RdlLGqu604ENaeVOPCIHL-0C8e6_wHdUGHydLZImSxKYSrNvy8resP1D_9t4B-3q2mkS9mhnMONrXbPDVw5QY5mvXlWs0Db99ARwzsl-Qlu0A_tsZwMjWT2I1QMvWPyTRScmMm0FJSv9zStjzxWa_q2GL7Naz1fI4Dd6ZgNJWYYq-mHN5chEeBdIcwb_zMPHczMQXXNL5nmfRGM1aPffkToFWCDpIlI8IXec83ZC6_POxZegS6n9Drrvc.6Nz8EXxs1lWX3ASaCeNElA

View File

@ -1,32 +0,0 @@
clone:
path: github.com/go-openapi/jsonpointer
matrix:
GO_VERSION:
- "1.6"
build:
integration:
image: golang:$$GO_VERSION
pull: true
commands:
- go get -u github.com/stretchr/testify/assert
- go get -u github.com/go-openapi/swag
- go test -race
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
notify:
slack:
channel: bots
webhook_url: $$SLACK_URL
username: drone
publish:
coverage:
server: https://coverage.vmware.run
token: $$GITHUB_TOKEN
# threshold: 70
# must_increase: true
when:
matrix:
GO_VERSION: "1.6"

26
vendor/github.com/go-openapi/jsonpointer/.editorconfig generated vendored Normal file
View File

@ -0,0 +1,26 @@
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
# Set default charset
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
charset = utf-8
# Tab indentation (no size specified)
[*.go]
indent_style = tab
[*.md]
trim_trailing_whitespace = false
# Matches the exact files either package.json or .travis.yml
[{package.json,.travis.yml}]
indent_style = space
indent_size = 2

View File

@ -1,13 +0,0 @@
approve_by_comment: true
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
reject_regex: ^[Rr]ejected
reset_on_push: false
reviewers:
members:
- casualjim
- chancez
- frapposelli
- vburenin
- pytlesk4
name: pullapprove
required: 1

15
vendor/github.com/go-openapi/jsonpointer/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- '1.9'
- 1.10.x
- 1.11.x
install:
- go get -u github.com/stretchr/testify/assert
- go get -u github.com/go-openapi/swag
language: go
notifications:
slack:
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
script:
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...

View File

@ -1,4 +1,4 @@
# gojsonpointer [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonpointer/status.svg)](https://ci.vmware.run/go-openapi/jsonpointer) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonpointer/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language

10
vendor/github.com/go-openapi/jsonpointer/go.mod generated vendored Normal file
View File

@ -0,0 +1,10 @@
module github.com/go-openapi/jsonpointer
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-openapi/swag v0.17.0
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
gopkg.in/yaml.v2 v2.2.1 // indirect
)

11
vendor/github.com/go-openapi/jsonpointer/go.sum generated vendored Normal file
View File

@ -0,0 +1,11 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU=
github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -43,6 +43,7 @@ const (
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
@ -50,16 +51,10 @@ type JSONPointable interface {
JSONLookup(string) (interface{}, error)
}
type implStruct struct {
mode string // "SET" or "GET"
inDocument interface{}
setInValue interface{}
getOutNode interface{}
getOutKind reflect.Kind
outError error
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
JSONSet(string, interface{}) error
}
// New creates a new json pointer for the given string
@ -100,15 +95,25 @@ func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
kind := reflect.Invalid
rValue := reflect.Indirect(reflect.ValueOf(node))
kind = rValue.Kind()
kind := rValue.Kind()
switch kind {
case reflect.Struct:
@ -129,6 +134,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
case reflect.Map:
kv := reflect.ValueOf(decodedToken)
mv := rValue.MapIndex(kv)
if mv.IsValid() && !swag.IsZero(mv) {
return mv.Interface(), kind, nil
}
@ -141,7 +147,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
}
elem := rValue.Index(tokenIndex)
@ -153,6 +159,57 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
switch rValue.Kind() {
case reflect.Struct:
if ns, ok := node.(JSONSetable); ok { // pointer impl
return ns.JSONSet(decodedToken, data)
}
if rValue.Type().Implements(jsonSetableType) {
return node.(JSONSetable).JSONSet(decodedToken, data)
}
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
return fmt.Errorf("object has no field %q", decodedToken)
}
fld := rValue.FieldByName(nm)
if fld.IsValid() {
fld.Set(reflect.ValueOf(data))
}
return nil
case reflect.Map:
kv := reflect.ValueOf(decodedToken)
rValue.SetMapIndex(kv, reflect.ValueOf(data))
return nil
case reflect.Slice:
tokenIndex, err := strconv.Atoi(decodedToken)
if err != nil {
return err
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
}
elem := rValue.Index(tokenIndex)
if !elem.CanSet() {
return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
}
elem.Set(reflect.ValueOf(data))
return nil
default:
return fmt.Errorf("invalid token reference %q", decodedToken)
}
}
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
if nameProvider == nil {
@ -184,6 +241,101 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
}
// Full document when empty
if len(p.referenceTokens) == 0 {
return nil
}
lastI := len(p.referenceTokens) - 1
for i, token := range p.referenceTokens {
isLastToken := i == lastI
decodedToken := Unescape(token)
if isLastToken {
return setSingleImpl(node, data, decodedToken, nameProvider)
}
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
switch kind {
case reflect.Struct:
if rValue.Type().Implements(jsonPointableType) {
r, err := node.(JSONPointable).JSONLookup(decodedToken)
if err != nil {
return err
}
fld := reflect.ValueOf(r)
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
node = fld.Addr().Interface()
continue
}
node = r
continue
}
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
return fmt.Errorf("object has no field %q", decodedToken)
}
fld := rValue.FieldByName(nm)
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
node = fld.Addr().Interface()
continue
}
node = fld.Interface()
case reflect.Map:
kv := reflect.ValueOf(decodedToken)
mv := rValue.MapIndex(kv)
if !mv.IsValid() {
return fmt.Errorf("object has no key %q", decodedToken)
}
if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
node = mv.Addr().Interface()
continue
}
node = mv.Interface()
case reflect.Slice:
tokenIndex, err := strconv.Atoi(decodedToken)
if err != nil {
return err
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
}
elem := rValue.Index(tokenIndex)
if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr {
node = elem.Addr().Interface()
continue
}
node = elem.Interface()
default:
return fmt.Errorf("invalid token reference %q", decodedToken)
}
}
return nil
}
// DecodedTokens returns the decoded tokens
func (p *Pointer) DecodedTokens() []string {
result := make([]string, 0, len(p.referenceTokens))

View File

@ -28,6 +28,7 @@ package jsonpointer
import (
"encoding/json"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
@ -83,19 +84,12 @@ func TestEscaping(t *testing.T) {
outs := []float64{0, 0, 1, 1, 2, 3, 4, 5, 6, 7, 8}
for i := range ins {
p, err := New(ins[i])
if err != nil {
t.Errorf("New(%v) error %v", ins[i], err.Error())
}
result, _, err := p.Get(testDocumentJSON)
if err != nil {
t.Errorf("Get(%v) error %v", ins[i], err.Error())
}
if result != outs[i] {
t.Errorf("Get(%v) = %v, expect %v", ins[i], result, outs[i])
if assert.NoError(t, err, "input: %v", ins[i]) {
result, _, err := p.Get(testDocumentJSON)
if assert.NoError(t, err, "input: %v", ins[i]) {
assert.Equal(t, outs[i], result, "input: %v", ins[i])
}
}
}
@ -311,3 +305,269 @@ func TestObject(t *testing.T) {
assert.EqualValues(t, outs[i], result)
}
}
type setJsonDocEle struct {
B int `json:"b"`
C int `json:"c"`
}
type setJsonDoc struct {
A []struct {
B int `json:"b"`
C int `json:"c"`
} `json:"a"`
D int `json:"d"`
}
type settableDoc struct {
Coll settableColl
Int settableInt
}
func (s settableDoc) MarshalJSON() ([]byte, error) {
var res struct {
A settableColl `json:"a"`
D settableInt `json:"d"`
}
res.A = s.Coll
res.D = s.Int
return json.Marshal(res)
}
func (s *settableDoc) UnmarshalJSON(data []byte) error {
var res struct {
A settableColl `json:"a"`
D settableInt `json:"d"`
}
if err := json.Unmarshal(data, &res); err != nil {
return err
}
s.Coll = res.A
s.Int = res.D
return nil
}
// JSONLookup implements an interface to customize json pointer lookup
func (s settableDoc) JSONLookup(token string) (interface{}, error) {
switch token {
case "a":
return &s.Coll, nil
case "d":
return &s.Int, nil
default:
return nil, fmt.Errorf("%s is not a known field", token)
}
}
// JSONLookup implements an interface to customize json pointer lookup
func (s *settableDoc) JSONSet(token string, data interface{}) error {
switch token {
case "a":
switch dt := data.(type) {
case settableColl:
s.Coll = dt
return nil
case *settableColl:
if dt != nil {
s.Coll = *dt
} else {
s.Coll = settableColl{}
}
return nil
case []settableCollItem:
s.Coll.Items = dt
return nil
}
case "d":
switch dt := data.(type) {
case settableInt:
s.Int = dt
return nil
case int:
s.Int.Value = dt
return nil
case int8:
s.Int.Value = int(dt)
return nil
case int16:
s.Int.Value = int(dt)
return nil
case int32:
s.Int.Value = int(dt)
return nil
case int64:
s.Int.Value = int(dt)
return nil
default:
return fmt.Errorf("invalid type %T for %s", data, token)
}
}
return fmt.Errorf("%s is not a known field", token)
}
type settableColl struct {
Items []settableCollItem
}
func (s settableColl) MarshalJSON() ([]byte, error) {
return json.Marshal(s.Items)
}
func (s *settableColl) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &s.Items)
}
// JSONLookup implements an interface to customize json pointer lookup
func (s settableColl) JSONLookup(token string) (interface{}, error) {
if tok, err := strconv.Atoi(token); err == nil {
return &s.Items[tok], nil
}
return nil, fmt.Errorf("%s is not a valid index", token)
}
// JSONLookup implements an interface to customize json pointer lookup
func (s *settableColl) JSONSet(token string, data interface{}) error {
if _, err := strconv.Atoi(token); err == nil {
_, err := SetForToken(s.Items, token, data)
return err
}
return fmt.Errorf("%s is not a valid index", token)
}
type settableCollItem struct {
B int `json:"b"`
C int `json:"c"`
}
type settableInt struct {
Value int
}
func (s settableInt) MarshalJSON() ([]byte, error) {
return json.Marshal(s.Value)
}
func (s *settableInt) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &s.Value)
}
func TestSetNode(t *testing.T) {
jsonText := `{"a":[{"b": 1, "c": 2}], "d": 3}`
var jsonDocument interface{}
if assert.NoError(t, json.Unmarshal([]byte(jsonText), &jsonDocument)) {
in := "/a/0/c"
p, err := New(in)
if assert.NoError(t, err) {
_, err = p.Set(jsonDocument, 999)
assert.NoError(t, err)
firstNode := jsonDocument.(map[string]interface{})
assert.Len(t, firstNode, 2)
sliceNode := firstNode["a"].([]interface{})
assert.Len(t, sliceNode, 1)
changedNode := sliceNode[0].(map[string]interface{})
chNodeVI := changedNode["c"]
if assert.IsType(t, 0, chNodeVI) {
changedNodeValue := chNodeVI.(int)
if assert.Equal(t, 999, changedNodeValue) {
assert.Len(t, sliceNode, 1)
}
}
}
v, err := New("/a/0")
if assert.NoError(t, err) {
_, err = v.Set(jsonDocument, map[string]interface{}{"b": 3, "c": 8})
if assert.NoError(t, err) {
firstNode := jsonDocument.(map[string]interface{})
assert.Len(t, firstNode, 2)
sliceNode := firstNode["a"].([]interface{})
assert.Len(t, sliceNode, 1)
changedNode := sliceNode[0].(map[string]interface{})
assert.Equal(t, 3, changedNode["b"])
assert.Equal(t, 8, changedNode["c"])
}
}
}
var structDoc setJsonDoc
if assert.NoError(t, json.Unmarshal([]byte(jsonText), &structDoc)) {
g, err := New("/a")
if assert.NoError(t, err) {
_, err = g.Set(&structDoc, []struct {
B int `json:"b"`
C int `json:"c"`
}{{B: 4, C: 7}})
if assert.NoError(t, err) {
assert.Len(t, structDoc.A, 1)
changedNode := structDoc.A[0]
assert.Equal(t, 4, changedNode.B)
assert.Equal(t, 7, changedNode.C)
}
}
v, err := New("/a/0")
if assert.NoError(t, err) {
_, err = v.Set(structDoc, struct {
B int `json:"b"`
C int `json:"c"`
}{B: 3, C: 8})
if assert.NoError(t, err) {
assert.Len(t, structDoc.A, 1)
changedNode := structDoc.A[0]
assert.Equal(t, 3, changedNode.B)
assert.Equal(t, 8, changedNode.C)
}
}
p, err := New("/a/0/c")
if assert.NoError(t, err) {
_, err = p.Set(&structDoc, 999)
assert.NoError(t, err)
if assert.Len(t, structDoc.A, 1) {
assert.Equal(t, 999, structDoc.A[0].C)
}
}
}
var setDoc settableDoc
if assert.NoError(t, json.Unmarshal([]byte(jsonText), &setDoc)) {
g, err := New("/a")
if assert.NoError(t, err) {
_, err = g.Set(&setDoc, []settableCollItem{{B: 4, C: 7}})
if assert.NoError(t, err) {
assert.Len(t, setDoc.Coll.Items, 1)
changedNode := setDoc.Coll.Items[0]
assert.Equal(t, 4, changedNode.B)
assert.Equal(t, 7, changedNode.C)
}
}
v, err := New("/a/0")
if assert.NoError(t, err) {
_, err = v.Set(setDoc, settableCollItem{B: 3, C: 8})
if assert.NoError(t, err) {
assert.Len(t, setDoc.Coll.Items, 1)
changedNode := setDoc.Coll.Items[0]
assert.Equal(t, 3, changedNode.B)
assert.Equal(t, 8, changedNode.C)
}
}
p, err := New("/a/0/c")
if assert.NoError(t, err) {
_, err = p.Set(setDoc, 999)
assert.NoError(t, err)
if assert.Len(t, setDoc.Coll.Items, 1) {
assert.Equal(t, 999, setDoc.Coll.Items[0].C)
}
}
}
}

View File

@ -1 +0,0 @@
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Xe40Wx6g5Y-iN0JVMhKyFfubtOId3zAVE564szw_yYGzFNhc_cGZO9F3BtAcJ55CfHG9C_ozn9dpnUDl_zYZoy_6cPCq13Ekb95z8NAC3ekDtbAATsc9HZwRNwI7UfkhstdwxljEouGB01qoLcUn6lFutrou-Ho21COHeDb2caemnPSA-rEAnXkOiBFu0RQ1MIwMygzvHXIHHYNpNwAtXqmiggM10miSjqBM3JmRPxCi7VK6_Rxij5p6LlhmK1BDi8Y6oBh-9BX3--5GAJeWZ6Vof5TnP-Enioia18j8c8KFtfY4q0y6Ednjb-AarLZ12gj695ppkBNJUdTJQmwGwA.fVcz_RiLrUB5fgMS.rjWllDYC6m_NB-ket_LizNEy9mlJ27odBTZQcMKaUqqXZBtWUCmPrOoMXGq-_cc-c7chg7D-WMh9SPQ23pV0P-DY-jsDpbOqHG2STOMEfW9ZREoaOLJXQaWcuBldLjRyWFcq0HGj97LgE6szD1Zlou3bmdHS_Q-U9Up9YQ_8_YnDcESD_cj1w5FZom7HjchKJFeGjQjfDQpoCKCQNMJaavUqy9jHQEeQ_uVocSrETg3GpewDcUF2tuv8uGq7ZZWu7Vl8zmnY1MFTynaGBWzTCSRmCkAXjcsaUheDP_NT5D7k-xUS6LwtqEUiXAXV07SNFraorFj5lnBQZRDlZMYcA3NWR6zHiOxekR9LBYPofst6w1rIqUchj_5m1tDpVTBMPir1eAaFcnJtPgo4ch17OF-kmcmQGLhJI3U7n8wv4sTrmP1dewtRRKrvlJe5r3_6eDiK4xZ8K0rnK1D4g6zuQqU1gA8KaU7pmZkKpFx3Bew4v-6DH32YwQBvAI7Lbb8afou9WsCNB_iswz5XGimP4bifiJRwpWBEz9VGhZFdiw-hZpYWgbxzVb5gtqfTDLIvpbLDmFz1vge16uUQHHVFpo1pSozyr7A60X8qsh9pmmO3RcJ-ZGZBWqiRC-Kl5ejz7WQ.LFoK4Ibi11B2lWQ5WcPSag

View File

@ -1,33 +0,0 @@
clone:
path: github.com/go-openapi/jsonreference
matrix:
GO_VERSION:
- "1.6"
build:
integration:
image: golang:$$GO_VERSION
pull: true
commands:
- go get -u github.com/stretchr/testify/assert
- go get -u github.com/PuerkitoBio/purell
- go get -u github.com/go-openapi/jsonpointer
- go test -race
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
notify:
slack:
channel: bots
webhook_url: $$SLACK_URL
username: drone
publish:
coverage:
server: https://coverage.vmware.run
token: $$GITHUB_TOKEN
# threshold: 70
# must_increase: true
when:
matrix:
GO_VERSION: "1.6"

View File

@ -1,13 +0,0 @@
approve_by_comment: true
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
reject_regex: ^[Rr]ejected
reset_on_push: false
reviewers:
members:
- casualjim
- chancez
- frapposelli
- vburenin
- pytlesk4
name: pullapprove
required: 1

16
vendor/github.com/go-openapi/jsonreference/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- '1.9'
- 1.10.x
- 1.11.x
install:
- go get -u github.com/stretchr/testify/assert
- go get -u github.com/PuerkitoBio/purell
- go get -u github.com/go-openapi/jsonpointer
language: go
notifications:
slack:
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
script:
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...

View File

@ -1,4 +1,4 @@
# gojsonreference [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonreference/status.svg)](https://ci.vmware.run/go-openapi/jsonreference) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonreference/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference)
An implementation of JSON Reference - Go language
@ -7,7 +7,7 @@ An implementation of JSON Reference - Go language
Work in progress ( 90% done )
## Dependencies
https://github.com/xeipuuv/gojsonpointer
https://github.com/go-openapi/jsonpointer
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07

15
vendor/github.com/go-openapi/jsonreference/go.mod generated vendored Normal file
View File

@ -0,0 +1,15 @@
module github.com/go-openapi/jsonreference
require (
github.com/PuerkitoBio/purell v1.1.0
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-openapi/jsonpointer v0.17.0
github.com/go-openapi/swag v0.17.0 // indirect
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect
golang.org/x/text v0.3.0 // indirect
gopkg.in/yaml.v2 v2.2.1 // indirect
)

20
vendor/github.com/go-openapi/jsonreference/go.sum generated vendored Normal file
View File

@ -0,0 +1,20 @@
github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU=
github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -308,7 +308,7 @@ func TestFileScheme(t *testing.T) {
}
if r1.IsCanonical() != true {
t.Errorf("New(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical, true)
t.Errorf("New(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical(), true)
}
result, err := r1.Inherits(r2)

21
vendor/github.com/go-openapi/spec/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,21 @@
linters-settings:
govet:
check-shadowing: true
golint:
min-confidence: 0
gocyclo:
min-complexity: 25
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
linters:
enable-all: true
disable:
- maligned
- unparam
- lll

View File

@ -1,16 +1,18 @@
language: go
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.7
- '1.9'
- 1.10.x
- 1.11.x
install:
- go get -u github.com/stretchr/testify
- go get -u github.com/go-openapi/swag
- go get -u gopkg.in/yaml.v2
- go get -u github.com/go-openapi/jsonpointer
- go get -u github.com/go-openapi/jsonreference
script:
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)
language: go
notifications:
slack:
secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
script:
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...

View File

@ -1,5 +1,10 @@
# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE)
[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec)
The object model for OpenAPI specification documents
The object model for OpenAPI specification documents.
Currently supports Swagger 2.0.

File diff suppressed because one or more lines are too long

47
vendor/github.com/go-openapi/spec/debug.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
)
var (
// Debug is true when the SWAGGER_DEBUG env var is not empty.
// It enables a more verbose logging of validators.
Debug = os.Getenv("SWAGGER_DEBUG") != ""
// validateLogger is a debug logger for this package
specLogger *log.Logger
)
func init() {
debugOptions()
}
func debugOptions() {
specLogger = log.New(os.Stdout, "spec:", log.LstdFlags)
}
func debugLog(msg string, args ...interface{}) {
// A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog()
if Debug {
_, file1, pos1, _ := runtime.Caller(1)
specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...))
}
}

59
vendor/github.com/go-openapi/spec/debug_test.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"io/ioutil"
"os"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
var (
logMutex = &sync.Mutex{}
)
func TestDebug(t *testing.T) {
tmpFile, _ := ioutil.TempFile("", "debug-test")
tmpName := tmpFile.Name()
defer func() {
Debug = false
// mutex for -race
logMutex.Unlock()
_ = os.Remove(tmpName)
}()
// mutex for -race
logMutex.Lock()
Debug = true
debugOptions()
defer func() {
specLogger.SetOutput(os.Stdout)
}()
specLogger.SetOutput(tmpFile)
debugLog("A debug")
Debug = false
_ = tmpFile.Close()
flushed, _ := os.Open(tmpName)
buf := make([]byte, 500)
_, _ = flushed.Read(buf)
specLogger.SetOutput(os.Stdout)
assert.Contains(t, string(buf), "A debug")
}

View File

@ -30,16 +30,12 @@ import (
"github.com/go-openapi/swag"
)
var (
// Debug enables logging when SWAGGER_DEBUG env var is not empty
Debug = os.Getenv("SWAGGER_DEBUG") != ""
)
// ExpandOptions provides options for expand.
type ExpandOptions struct {
RelativeBase string
SkipSchemas bool
ContinueOnError bool
RelativeBase string
SkipSchemas bool
ContinueOnError bool
AbsoluteCircularRef bool
}
// ResolutionCache a cache for resolving urls
@ -49,7 +45,7 @@ type ResolutionCache interface {
}
type simpleCache struct {
lock sync.Mutex
lock sync.RWMutex
store map[string]interface{}
}
@ -59,6 +55,7 @@ func init() {
resCache = initResolutionCache()
}
// initResolutionCache initializes the URI resolution cache
func initResolutionCache() ResolutionCache {
return &simpleCache{store: map[string]interface{}{
"http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
@ -66,16 +63,37 @@ func initResolutionCache() ResolutionCache {
}}
}
// resolverContext allows to share a context during spec processing.
// At the moment, it just holds the index of circular references found.
type resolverContext struct {
// circulars holds all visited circular references, which allows shortcuts.
// NOTE: this is not just a performance improvement: it is required to figure out
// circular references which participate several cycles.
// This structure is privately instantiated and needs not be locked against
// concurrent access, unless we chose to implement a parallel spec walking.
circulars map[string]bool
basePath string
}
func newResolverContext(originalBasePath string) *resolverContext {
return &resolverContext{
circulars: make(map[string]bool),
basePath: originalBasePath, // keep the root base path in context
}
}
// Get retrieves a cached URI
func (s *simpleCache) Get(uri string) (interface{}, bool) {
debugLog("getting %q from resolution cache", uri)
s.lock.Lock()
s.lock.RLock()
v, ok := s.store[uri]
debugLog("got %q from resolution cache: %t", uri, ok)
s.lock.Unlock()
s.lock.RUnlock()
return v, ok
}
// Set caches a URI
func (s *simpleCache) Set(uri string, data interface{}) {
s.lock.Lock()
s.store[uri] = data
@ -84,7 +102,7 @@ func (s *simpleCache) Set(uri string, data interface{}) {
// ResolveRefWithBase resolves a reference against a context root with preservation of base path
func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
resolver, err := defaultSchemaLoader(root, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return nil, err
}
@ -116,21 +134,21 @@ func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
case map[string]interface{}:
b, _ := json.Marshal(sch)
newSch := new(Schema)
json.Unmarshal(b, newSch)
_ = json.Unmarshal(b, newSch)
return newSch, nil
default:
return nil, fmt.Errorf("unknown type for the resolved reference")
}
}
// ResolveParameter resolves a paramter reference against a context root
// ResolveParameter resolves a parameter reference against a context root
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
return ResolveParameterWithBase(root, ref, nil)
}
// ResolveParameterWithBase resolves a paramter reference against a context root and base path
// ResolveParameterWithBase resolves a parameter reference against a context root and base path
func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
resolver, err := defaultSchemaLoader(root, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return nil, err
}
@ -149,7 +167,7 @@ func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
// ResolveResponseWithBase resolves response a reference against a context root and base path
func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
resolver, err := defaultSchemaLoader(root, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return nil, err
}
@ -163,7 +181,7 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R
// ResolveItems resolves header and parameter items reference against a context root and base path
func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
resolver, err := defaultSchemaLoader(root, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return nil, err
}
@ -180,7 +198,7 @@ func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error
// ResolvePathItem resolves response a path item against a context root and base path
func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
resolver, err := defaultSchemaLoader(root, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return nil, err
}
@ -199,6 +217,7 @@ type schemaLoader struct {
root interface{}
options *ExpandOptions
cache ResolutionCache
context *resolverContext
loadDoc func(string) (json.RawMessage, error)
}
@ -221,7 +240,8 @@ func init() {
func defaultSchemaLoader(
root interface{},
expandOptions *ExpandOptions,
cache ResolutionCache) (*schemaLoader, error) {
cache ResolutionCache,
context *resolverContext) (*schemaLoader, error) {
if cache == nil {
cache = resCache
@ -229,11 +249,15 @@ func defaultSchemaLoader(
if expandOptions == nil {
expandOptions = &ExpandOptions{}
}
absBase, _ := absPath(expandOptions.RelativeBase)
if context == nil {
context = newResolverContext(absBase)
}
return &schemaLoader{
root: root,
options: expandOptions,
cache: cache,
context: context,
loadDoc: func(path string) (json.RawMessage, error) {
debugLog("fetching document at %q", path)
return PathLoader(path)
@ -312,12 +336,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
return ret
}
func debugLog(msg string, args ...interface{}) {
if Debug {
log.Printf(msg, args...)
}
}
// normalize absolute path for cache.
// on Windows, drive letters should be converted to lower as scheme in net/url.URL
func normalizeAbsPath(path string) string {
@ -336,12 +354,17 @@ func normalizeAbsPath(path string) string {
// base could be a directory or a full file path
func normalizePaths(refPath, base string) string {
refURL, _ := url.Parse(refPath)
if path.IsAbs(refURL.Path) {
if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) {
// refPath is actually absolute
if refURL.Host != "" {
return refPath
}
return filepath.FromSlash(refPath)
parts := strings.Split(refPath, "#")
result := filepath.FromSlash(parts[0])
if len(parts) == 2 {
result += "#" + parts[1]
}
return result
}
// relative refPath
@ -361,6 +384,59 @@ func normalizePaths(refPath, base string) string {
return baseURL.String()
}
// denormalizePaths returns to simplest notation on file $ref,
// i.e. strips the absolute path and sets a path relative to the base path.
//
// This is currently used when we rewrite ref after a circular ref has been detected
func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref {
debugLog("denormalizeFileRef for: %s", ref.String())
if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly {
return ref
}
// strip relativeBase from URI
relativeBaseURL, _ := url.Parse(relativeBase)
relativeBaseURL.Fragment = ""
if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) {
// this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix
r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase))
return &r
}
if relativeBaseURL.IsAbs() {
// other absolute URL get unchanged (i.e. with a non-empty scheme)
return ref
}
// for relative file URIs:
originalRelativeBaseURL, _ := url.Parse(originalRelativeBase)
originalRelativeBaseURL.Fragment = ""
if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) {
// the resulting ref is in the expanded spec: return a local ref
r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String()))
return &r
}
// check if we may set a relative path, considering the original base path for this spec.
// Example:
// spec is located at /mypath/spec.json
// my normalized ref points to: /mypath/item.json#/target
// expected result: item.json#/target
parts := strings.Split(ref.String(), "#")
relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0])
if err != nil {
// there is no common ancestor (e.g. different drives on windows)
// leaves the ref unchanged
return ref
}
if len(parts) == 2 {
relativePath += "#" + parts[1]
}
r, _ := NewRef(relativePath)
return &r
}
// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL
func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
// This is important for when the reference is pointing to the root schema
@ -369,8 +445,7 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
return &r
}
refURL := ref.GetURL()
debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String())
debugLog("normalizing %s against %s", ref.String(), relativeBase)
s := normalizePaths(ref.String(), relativeBase)
r, _ := NewRef(s)
@ -395,7 +470,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string)
// it is pointing somewhere in the root.
root := r.root
if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" {
if baseRef, err := NewRef(basePath); err == nil {
if baseRef, erb := NewRef(basePath); erb == nil {
root, _, _, _ = r.load(baseRef.GetURL())
}
}
@ -430,9 +505,11 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
toFetch := *refURL
toFetch.Fragment = ""
data, fromCache := r.cache.Get(toFetch.String())
normalized := normalizeAbsPath(toFetch.String())
data, fromCache := r.cache.Get(normalized)
if !fromCache {
b, err := r.loadDoc(toFetch.String())
b, err := r.loadDoc(normalized)
if err != nil {
return nil, url.URL{}, false, err
}
@ -440,7 +517,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
if err := json.Unmarshal(b, &data); err != nil {
return nil, url.URL{}, false, err
}
r.cache.Set(toFetch.String(), data)
r.cache.Set(normalized, data)
}
return data, toFetch, fromCache, nil
@ -468,7 +545,7 @@ func absPath(fname string) (string, error) {
// ExpandSpec expands the references in a swagger spec
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
resolver, err := defaultSchemaLoader(spec, options, nil)
resolver, err := defaultSchemaLoader(spec, options, nil, nil)
// Just in case this ever returns an error.
if shouldStopOnError(err, resolver.options) {
return err
@ -484,7 +561,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
for key, definition := range spec.Definitions {
var def *Schema
var err error
if def, err = expandSchema(definition, []string{fmt.Sprintf("#/defintions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) {
return err
}
if def != nil {
@ -531,25 +608,35 @@ func shouldStopOnError(err error, opts *ExpandOptions) bool {
return false
}
// ExpandSchema expands the refs in the schema object with reference to the root object
// go-openapi/validate uses this function
// notice that it is impossible to reference a json scema in a different file other than root
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
// Only save the root to a tmp file if it isn't nil.
var base string
// baseForRoot loads in the cache the root document and produces a fake "root" base path entry
// for further $ref resolution
func baseForRoot(root interface{}, cache ResolutionCache) string {
// cache the root document to resolve $ref's
const rootBase = "root"
if root != nil {
base, _ = absPath("root")
base, _ := absPath(rootBase)
normalizedBase := normalizeAbsPath(base)
debugLog("setting root doc in cache at: %s", normalizedBase)
if cache == nil {
cache = resCache
}
cache.Set(normalizeAbsPath(base), root)
base = "root"
cache.Set(normalizedBase, root)
return rootBase
}
return ""
}
// ExpandSchema expands the refs in the schema object with reference to the root object
// go-openapi/validate uses this function
// notice that it is impossible to reference a json schema in a different file other than root
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
opts := &ExpandOptions{
RelativeBase: base,
// when a root is specified, cache the root as an in-memory document for $ref retrieval
RelativeBase: baseForRoot(root, cache),
SkipSchemas: false,
ContinueOnError: false,
// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
AbsoluteCircularRef: true,
}
return ExpandSchemaWithBasePath(schema, cache, opts)
}
@ -565,7 +652,7 @@ func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *Expan
basePath, _ = absPath(opts.RelativeBase)
}
resolver, err := defaultSchemaLoader(nil, opts, cache)
resolver, err := defaultSchemaLoader(nil, opts, cache, nil)
if err != nil {
return err
}
@ -617,8 +704,32 @@ func basePathFromSchemaID(oldBasePath, id string) string {
return u.String()
}
func isCircular(ref *Ref, basePath string, parentRefs ...string) bool {
return basePath != "" && swag.ContainsStringsCI(parentRefs, ref.String())
// isCircular detects cycles in sequences of $ref.
// It relies on a private context (which needs not be locked).
func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) {
normalizedRef := normalizePaths(ref.String(), basePath)
if _, ok := r.context.circulars[normalizedRef]; ok {
// circular $ref has been already detected in another explored cycle
foundCycle = true
return
}
foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef)
if foundCycle {
r.context.circulars[normalizedRef] = true
}
return
}
func updateBasePath(transitive *schemaLoader, resolver *schemaLoader, basePath string) string {
if transitive != resolver {
debugLog("got a new resolver")
if transitive.options != nil && transitive.options.RelativeBase != "" {
basePath, _ = absPath(transitive.options.RelativeBase)
debugLog("new basePath = %s", basePath)
}
}
return basePath
}
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
@ -634,6 +745,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
otherwise the basePath should inherit the parent's */
// important: ID can be relative path
if target.ID != "" {
debugLog("schema has ID: %s", target.ID)
// handling the case when id is a folder
// remember that basePath has to be a file
refPath := target.ID
@ -645,7 +757,6 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
}
/* Explain here what this function does */
var t *Schema
/* if Ref is found, everything else doesn't matter */
/* Ref also changes the resolution scope of children expandSchema */
@ -654,14 +765,21 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
normalizedRef := normalizeFileRef(&target.Ref, basePath)
normalizedBasePath := normalizedRef.RemoteURI()
/* this means there is a circle in the recursion tree */
/* return the Ref */
if isCircular(normalizedRef, basePath, parentRefs...) {
target.Ref = *normalizedRef
if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
// this means there is a cycle in the recursion tree: return the Ref
// - circular refs cannot be expanded. We leave them as ref.
// - denormalization means that a new local file ref is set relative to the original basePath
debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s",
basePath, normalizedBasePath, normalizedRef.String())
if !resolver.options.AbsoluteCircularRef {
target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath)
} else {
target.Ref = *normalizedRef
}
return &target, nil
}
debugLog("\nbasePath: %s", basePath)
debugLog("basePath: %s", basePath)
if Debug {
b, _ := json.Marshal(target)
debugLog("calling Resolve with target: %s", string(b))
@ -672,7 +790,15 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
if t != nil {
parentRefs = append(parentRefs, normalizedRef.String())
return expandSchema(*t, parentRefs, resolver, normalizedBasePath)
var err error
transitiveResolver, err := transitiveResolver(basePath, target.Ref, resolver)
if shouldStopOnError(err, resolver.options) {
return nil, err
}
basePath = updateBasePath(transitiveResolver, resolver, normalizedBasePath)
return expandSchema(*t, parentRefs, transitiveResolver, basePath)
}
}
@ -781,7 +907,7 @@ func derefPathItem(pathItem *PathItem, parentRefs []string, resolver *schemaLoad
normalizedRef := normalizeFileRef(&pathItem.Ref, basePath)
normalizedBasePath := normalizedRef.RemoteURI()
if isCircular(normalizedRef, basePath, parentRefs...) {
if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
return nil
}
@ -807,9 +933,17 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string)
if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
return err
}
if pathItem.Ref.String() != "" {
var err error
resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver)
if shouldStopOnError(err, resolver.options) {
return err
}
}
pathItem.Ref = Ref{}
parentRefs = parentRefs[0:]
// Currently unused:
//parentRefs = parentRefs[0:]
for idx := range pathItem.Parameters {
if err := expandParameter(&(pathItem.Parameters[idx]), resolver, basePath); shouldStopOnError(err, resolver.options) {
@ -867,19 +1001,68 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err
return nil
}
// ExpandResponse expands a response based on a basepath
// This is the exported version of expandResponse
// all refs inside response will be resolved relative to basePath
func ExpandResponse(response *Response, basePath string) error {
opts := &ExpandOptions{
RelativeBase: basePath,
func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) {
if ref.IsRoot() || ref.HasFragmentOnly {
return resolver, nil
}
resolver, err := defaultSchemaLoader(nil, opts, nil)
baseRef, _ := NewRef(basePath)
currentRef := normalizeFileRef(&ref, basePath)
// Set a new root to resolve against
if !strings.HasPrefix(currentRef.String(), baseRef.String()) {
rootURL := currentRef.GetURL()
rootURL.Fragment = ""
root, _ := resolver.cache.Get(rootURL.String())
var err error
// shallow copy of resolver options to set a new RelativeBase when
// traversing multiple documents
newOptions := resolver.options
newOptions.RelativeBase = rootURL.String()
debugLog("setting new root: %s", newOptions.RelativeBase)
resolver, err = defaultSchemaLoader(root, newOptions, resolver.cache, resolver.context)
if err != nil {
return nil, err
}
}
return resolver, nil
}
// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document
func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error {
opts := &ExpandOptions{
RelativeBase: baseForRoot(root, cache),
SkipSchemas: false,
ContinueOnError: false,
// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
AbsoluteCircularRef: true,
}
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return err
}
return expandResponse(response, resolver, basePath)
return expandResponse(response, resolver, opts.RelativeBase)
}
// ExpandResponse expands a response based on a basepath
// This is the exported version of expandResponse
// all refs inside response will be resolved relative to basePath
func ExpandResponse(response *Response, basePath string) error {
var specBasePath string
if basePath != "" {
specBasePath, _ = absPath(basePath)
}
opts := &ExpandOptions{
RelativeBase: specBasePath,
}
resolver, err := defaultSchemaLoader(nil, opts, nil, nil)
if err != nil {
return err
}
return expandResponse(response, resolver, opts.RelativeBase)
}
func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error {
@ -889,7 +1072,7 @@ func derefResponse(response *Response, parentRefs []string, resolver *schemaLoad
normalizedRef := normalizeFileRef(&response.Ref, basePath)
normalizedBasePath := normalizedRef.RemoteURI()
if isCircular(normalizedRef, basePath, parentRefs...) {
if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
return nil
}
@ -910,16 +1093,31 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string)
if response == nil {
return nil
}
parentRefs := []string{}
if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
return err
}
if response.Ref.String() != "" {
transitiveResolver, err := transitiveResolver(basePath, response.Ref, resolver)
if shouldStopOnError(err, transitiveResolver.options) {
return err
}
basePath = updateBasePath(transitiveResolver, resolver, basePath)
resolver = transitiveResolver
}
if response.Schema != nil && response.Schema.Ref.String() != "" {
// schema expanded to a $ref in another root
var ern error
response.Schema.Ref, ern = NewRef(normalizePaths(response.Schema.Ref.String(), response.Ref.RemoteURI()))
if ern != nil {
return ern
}
}
response.Ref = Ref{}
parentRefs = parentRefs[0:]
if !resolver.options.SkipSchemas && response.Schema != nil {
parentRefs = append(parentRefs, response.Schema.Ref.String())
// parentRefs = append(parentRefs, response.Schema.Ref.String())
s, err := expandSchema(*response.Schema, parentRefs, resolver, basePath)
if shouldStopOnError(err, resolver.options) {
return err
@ -930,19 +1128,40 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string)
return nil
}
// ExpandParameter expands a parameter based on a basepath
// This is the exported version of expandParameter
// all refs inside parameter will be resolved relative to basePath
func ExpandParameter(parameter *Parameter, basePath string) error {
// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document
func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error {
opts := &ExpandOptions{
RelativeBase: basePath,
RelativeBase: baseForRoot(root, cache),
SkipSchemas: false,
ContinueOnError: false,
// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
AbsoluteCircularRef: true,
}
resolver, err := defaultSchemaLoader(nil, opts, nil)
resolver, err := defaultSchemaLoader(root, opts, nil, nil)
if err != nil {
return err
}
return expandParameter(parameter, resolver, basePath)
return expandParameter(parameter, resolver, opts.RelativeBase)
}
// ExpandParameter expands a parameter based on a basepath
// This is the exported version of expandParameter
// all refs inside parameter will be resolved relative to basePath
func ExpandParameter(parameter *Parameter, basePath string) error {
var specBasePath string
if basePath != "" {
specBasePath, _ = absPath(basePath)
}
opts := &ExpandOptions{
RelativeBase: specBasePath,
}
resolver, err := defaultSchemaLoader(nil, opts, nil, nil)
if err != nil {
return err
}
return expandParameter(parameter, resolver, opts.RelativeBase)
}
func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error {
@ -951,7 +1170,7 @@ func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaL
normalizedRef := normalizeFileRef(&parameter.Ref, basePath)
normalizedBasePath := normalizedRef.RemoteURI()
if isCircular(normalizedRef, basePath, parentRefs...) {
if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
return nil
}
@ -977,11 +1196,27 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath stri
if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) {
return err
}
if parameter.Ref.String() != "" {
transitiveResolver, err := transitiveResolver(basePath, parameter.Ref, resolver)
if shouldStopOnError(err, transitiveResolver.options) {
return err
}
basePath = updateBasePath(transitiveResolver, resolver, basePath)
resolver = transitiveResolver
}
if parameter.Schema != nil && parameter.Schema.Ref.String() != "" {
// schema expanded to a $ref in another root
var ern error
parameter.Schema.Ref, ern = NewRef(normalizePaths(parameter.Schema.Ref.String(), parameter.Ref.RemoteURI()))
if ern != nil {
return ern
}
}
parameter.Ref = Ref{}
parentRefs = parentRefs[0:]
if !resolver.options.SkipSchemas && parameter.Schema != nil {
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
s, err := expandSchema(*parameter.Schema, parentRefs, resolver, basePath)
if shouldStopOnError(err, resolver.options) {
return err

View File

@ -20,6 +20,11 @@ import (
"log"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"github.com/go-openapi/jsonpointer"
@ -27,6 +32,10 @@ import (
"github.com/stretchr/testify/assert"
)
var (
rex = regexp.MustCompile(`"\$ref":\s*"(.+)"`)
)
func jsonDoc(path string) (json.RawMessage, error) {
data, err := swag.LoadFromFileOrHTTP(path)
if err != nil {
@ -37,36 +46,72 @@ func jsonDoc(path string) (json.RawMessage, error) {
// tests that paths are normalized correctly
func TestNormalizePaths(t *testing.T) {
testCases := []struct {
type testNormalizePathsTestCases []struct {
refPath string
base string
expOutput string
}{
{
// file basePath, absolute refPath
refPath: "/another/base/path.json#/definitions/Pet",
base: "/base/path.json",
expOutput: "/another/base/path.json#/definitions/Pet",
},
{
// file basePath, relative refPath
refPath: "another/base/path.json#/definitions/Pet",
base: "/base/path.json",
expOutput: "/base/another/base/path.json#/definitions/Pet",
},
{
// http basePath, absolute refPath
refPath: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet",
base: "http://www.example.com/base/path/swagger.json",
expOutput: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet",
},
{
// http basePath, relative refPath
refPath: "another/base/path/swagger.json#/definitions/Pet",
base: "http://www.example.com/base/path/swagger.json",
expOutput: "http://www.example.com/base/path/another/base/path/swagger.json#/definitions/Pet",
},
}
testCases := func() testNormalizePathsTestCases {
testCases := testNormalizePathsTestCases{
{
// http basePath, absolute refPath
refPath: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet",
base: "http://www.example.com/base/path/swagger.json",
expOutput: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet",
},
{
// http basePath, relative refPath
refPath: "another/base/path/swagger.json#/definitions/Pet",
base: "http://www.example.com/base/path/swagger.json",
expOutput: "http://www.example.com/base/path/another/base/path/swagger.json#/definitions/Pet",
},
}
if runtime.GOOS == "windows" {
testCases = append(testCases, testNormalizePathsTestCases{
{
// file basePath, absolute refPath, no fragment
refPath: `C:\another\base\path.json`,
base: `C:\base\path.json`,
expOutput: `C:\another\base\path.json`,
},
{
// file basePath, absolute refPath
refPath: `C:\another\base\path.json#/definitions/Pet`,
base: `C:\base\path.json`,
expOutput: `C:\another\base\path.json#/definitions/Pet`,
},
{
// file basePath, relative refPath
refPath: `another\base\path.json#/definitions/Pet`,
base: `C:\base\path.json`,
expOutput: `C:\base\another\base\path.json#/definitions/Pet`,
},
}...)
return testCases
}
// linux case
testCases = append(testCases, testNormalizePathsTestCases{
{
// file basePath, absolute refPath, no fragment
refPath: "/another/base/path.json",
base: "/base/path.json",
expOutput: "/another/base/path.json",
},
{
// file basePath, absolute refPath
refPath: "/another/base/path.json#/definitions/Pet",
base: "/base/path.json",
expOutput: "/another/base/path.json#/definitions/Pet",
},
{
// file basePath, relative refPath
refPath: "another/base/path.json#/definitions/Pet",
base: "/base/path.json",
expOutput: "/base/another/base/path.json#/definitions/Pet",
},
}...)
return testCases
}()
for _, tcase := range testCases {
out := normalizePaths(tcase.refPath, tcase.base)
@ -102,7 +147,7 @@ func TestExpandResponseSchema(t *testing.T) {
func TestSpecExpansion(t *testing.T) {
spec := new(Swagger)
// resolver, err := defaultSchemaLoader(spec, nil, nil)
// resolver, err := defaultSchemaLoader(spec, nil, nil,nil)
// assert.NoError(t, err)
err := ExpandSpec(spec, nil)
@ -174,20 +219,19 @@ func TestResponseExpansion(t *testing.T) {
err = json.Unmarshal(specDoc, spec)
assert.NoError(t, err)
resolver, err := defaultSchemaLoader(spec, nil, nil)
resolver, err := defaultSchemaLoader(spec, nil, nil, nil)
assert.NoError(t, err)
resp := spec.Responses["anotherPet"]
r := spec.Responses["petResponse"]
err = expandResponse(&r, resolver, basePath)
expected := spec.Responses["petResponse"]
err = expandResponse(&expected, resolver, basePath)
assert.NoError(t, err)
expected := r
err = expandResponse(&resp, resolver, basePath)
b, _ := resp.MarshalJSON()
log.Printf(string(b))
b, _ = expected.MarshalJSON()
log.Printf(string(b))
// b, _ := resp.MarshalJSON()
// log.Printf(string(b))
// b, _ = expected.MarshalJSON()
// log.Printf(string(b))
assert.NoError(t, err)
assert.Equal(t, expected, resp)
@ -219,16 +263,15 @@ func TestExportedResponseExpansion(t *testing.T) {
assert.NoError(t, err)
resp := spec.Responses["anotherPet"]
r := spec.Responses["petResponse"]
err = ExpandResponse(&r, basePath)
expected := spec.Responses["petResponse"]
err = ExpandResponse(&expected, basePath)
assert.NoError(t, err)
expected := r
err = ExpandResponse(&resp, basePath)
b, _ := resp.MarshalJSON()
log.Printf(string(b))
b, _ = expected.MarshalJSON()
log.Printf(string(b))
// b, _ := resp.MarshalJSON()
// log.Printf(string(b))
// b, _ = expected.MarshalJSON()
// log.Printf(string(b))
assert.NoError(t, err)
assert.Equal(t, expected, resp)
@ -247,6 +290,39 @@ func TestExportedResponseExpansion(t *testing.T) {
// assert.Equal(t, expected, resp)
}
func TestExpandResponseAndParamWithRoot(t *testing.T) {
specDoc, err := jsonDoc("fixtures/bugs/1614/gitea.json")
if !assert.NoError(t, err) {
t.FailNow()
return
}
var spec Swagger
_ = json.Unmarshal(specDoc, &spec)
// check responses with $ref
resp := spec.Paths.Paths["/admin/users"].Post.Responses.StatusCodeResponses[201]
err = ExpandResponseWithRoot(&resp, spec, nil)
assert.NoError(t, err)
jazon, _ := json.MarshalIndent(resp, "", " ")
m := rex.FindAllStringSubmatch(string(jazon), -1)
assert.Nil(t, m)
resp = spec.Paths.Paths["/admin/users"].Post.Responses.StatusCodeResponses[403]
err = ExpandResponseWithRoot(&resp, spec, nil)
assert.NoError(t, err)
jazon, _ = json.MarshalIndent(resp, "", " ")
m = rex.FindAllStringSubmatch(string(jazon), -1)
assert.Nil(t, m)
// check param with $ref
param := spec.Paths.Paths["/admin/users"].Post.Parameters[0]
err = ExpandParameterWithRoot(&param, spec, nil)
assert.NoError(t, err)
jazon, _ = json.MarshalIndent(param, "", " ")
m = rex.FindAllStringSubmatch(string(jazon), -1)
assert.Nil(t, m)
}
func TestIssue3(t *testing.T) {
spec := new(Swagger)
specDoc, err := jsonDoc("fixtures/expansion/overflow.json")
@ -277,7 +353,7 @@ func TestParameterExpansion(t *testing.T) {
basePath, err := absPath("fixtures/expansion/params.json")
assert.NoError(t, err)
resolver, err := defaultSchemaLoader(spec, nil, nil)
resolver, err := defaultSchemaLoader(spec, nil, nil, nil)
assert.NoError(t, err)
param := spec.Parameters["query"]
@ -331,17 +407,230 @@ func TestCircularRefsExpansion(t *testing.T) {
err = json.Unmarshal(carsDoc, spec)
assert.NoError(t, err)
resolver, err := defaultSchemaLoader(spec, nil, nil)
resolver, err := defaultSchemaLoader(spec, &ExpandOptions{RelativeBase: basePath}, nil, nil)
assert.NoError(t, err)
schema := spec.Definitions["car"]
assert.NotPanics(t, func() {
_, err = expandSchema(schema, []string{"#/definitions/car"}, resolver, basePath)
_, err := expandSchema(schema, []string{"#/definitions/car"}, resolver, basePath)
assert.NoError(t, err)
}, "Calling expand schema with circular refs, should not panic!")
}
func TestCircularSpec2Expansion(t *testing.T) {
// TODO: assert repeatable results (see commented section below)
fixturePath := filepath.Join("fixtures", "expansion", "circular-minimal.json")
jazon := expandThisOrDieTrying(t, fixturePath)
assert.NotEmpty(t, jazon)
// assert stripped $ref in result
assert.NotContainsf(t, jazon, "circular-minimal.json#/",
"expected %s to be expanded with stripped circular $ref", fixturePath)
fixturePath = "fixtures/expansion/circularSpec2.json"
jazon = expandThisOrDieTrying(t, fixturePath)
assert.NotEmpty(t, jazon)
assert.NotContainsf(t, jazon, "circularSpec.json#/",
"expected %s to be expanded with stripped circular $ref", fixturePath)
/*
At the moment, the result of expanding circular references is not stable,
when several cycles have intersections:
the spec structure is randomly walked through and mutating as expansion is carried out.
detected cycles in $ref are not necessarily the shortest matches.
This may result in different, functionally correct expanded spec (e.g. with same validations)
for i := 0; i < 1; i++ {
bbb := expandThisOrDieTrying(t, fixturePath)
t.Log(bbb)
if !assert.JSONEqf(t, jazon, bbb, "on iteration %d, we should have stable expanded spec", i) {
t.FailNow()
return
}
}
*/
}
func Test_MoreCircular(t *testing.T) {
// Additional testcase for circular $ref (from go-openapi/validate):
// - $ref with file = current file
// - circular is located in remote file
//
// There are 4 variants to run:
// - with/without $ref with local file (so its not really remote)
// - with circular in a schema in #/responses
// - with circular in a schema in #/parameters
fixturePath := "fixtures/more_circulars/spec.json"
jazon := expandThisOrDieTrying(t, fixturePath)
m := rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "item.json#/item"),
"expected $ref to be relative, got: %s", matched[0])
}
}
fixturePath = "fixtures/more_circulars/spec2.json"
jazon = expandThisOrDieTrying(t, fixturePath)
m = rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "item2.json#/item"),
"expected $ref to be relative, got: %s", matched[0])
}
}
fixturePath = "fixtures/more_circulars/spec3.json"
jazon = expandThisOrDieTrying(t, fixturePath)
m = rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "item.json#/item"),
"expected $ref to be relative, got: %s", matched[0])
}
}
fixturePath = "fixtures/more_circulars/spec4.json"
jazon = expandThisOrDieTrying(t, fixturePath)
m = rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "item4.json#/item"),
"expected $ref to be relative, got: %s", matched[0])
}
}
}
func Test_Issue957(t *testing.T) {
fixturePath := "fixtures/bugs/957/fixture-957.json"
jazon := expandThisOrDieTrying(t, fixturePath)
if assert.NotEmpty(t, jazon) {
assert.NotContainsf(t, jazon, "fixture-957.json#/",
"expected %s to be expanded with stripped circular $ref", fixturePath)
m := rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "#/definitions/"),
"expected $ref to be inlined, got: %s", matched[0])
}
}
//t.Log(jazon)
}
}
func Test_Bitbucket(t *testing.T) {
// Additional testcase for circular $ref (from bitbucket api)
fixturePath := "fixtures/more_circulars/bitbucket.json"
jazon := expandThisOrDieTrying(t, fixturePath)
m := rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "#/definitions/"),
"expected $ref to be inlined, got: %s", matched[0])
}
}
}
func Test_ExpandJSONSchemaDraft4(t *testing.T) {
fixturePath := filepath.Join("schemas", "jsonschema-draft-04.json")
jazon := expandThisSchemaOrDieTrying(t, fixturePath)
// assert all $ref maches "$ref": "http://json-schema.org/draft-04/something"
m := rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "http://json-schema.org/draft-04/"),
"expected $ref to be remote, got: %s", matched[0])
}
}
}
func Test_ExpandSwaggerSchema(t *testing.T) {
fixturePath := filepath.Join("schemas", "v2", "schema.json")
jazon := expandThisSchemaOrDieTrying(t, fixturePath)
// assert all $ref maches "$ref": "#/definitions/something"
m := rex.FindAllStringSubmatch(jazon, -1)
if assert.NotNil(t, m) {
for _, matched := range m {
subMatch := matched[1]
assert.True(t, strings.HasPrefix(subMatch, "#/definitions/"),
"expected $ref to be inlined, got: %s", matched[0])
}
}
}
func expandThisSchemaOrDieTrying(t *testing.T, fixturePath string) string {
doc, err := jsonDoc(fixturePath)
if !assert.NoError(t, err) {
t.FailNow()
return ""
}
specPath, _ := absPath(fixturePath)
opts := &ExpandOptions{
RelativeBase: specPath,
}
sch := new(Schema)
err = json.Unmarshal(doc, sch)
if !assert.NoError(t, err) {
t.FailNow()
return ""
}
assert.NotPanics(t, func() {
err = ExpandSchemaWithBasePath(sch, nil, opts)
assert.NoError(t, err)
}, "Calling expand schema circular refs, should not panic!")
bbb, _ := json.MarshalIndent(sch, "", " ")
return string(bbb)
}
func expandThisOrDieTrying(t *testing.T, fixturePath string) string {
doc, err := jsonDoc(fixturePath)
if !assert.NoError(t, err) {
t.FailNow()
return ""
}
specPath, _ := absPath(fixturePath)
opts := &ExpandOptions{
RelativeBase: specPath,
}
spec := new(Swagger)
err = json.Unmarshal(doc, spec)
if !assert.NoError(t, err) {
t.FailNow()
return ""
}
assert.NotPanics(t, func() {
err = ExpandSpec(spec, opts)
assert.NoError(t, err)
}, "Calling expand spec with circular refs, should not panic!")
bbb, _ := json.MarshalIndent(spec, "", " ")
return string(bbb)
}
func TestContinueOnErrorExpansion(t *testing.T) {
defer log.SetOutput(os.Stdout)
log.SetOutput(ioutil.Discard)
missingRefDoc, err := jsonDoc("fixtures/expansion/missingRef.json")
assert.NoError(t, err)
@ -360,8 +649,8 @@ func TestContinueOnErrorExpansion(t *testing.T) {
}
err = ExpandSpec(testCase.Input, opts)
assert.NoError(t, err)
b, _ := testCase.Input.MarshalJSON()
log.Printf(string(b))
// b, _ := testCase.Input.MarshalJSON()
// log.Printf(string(b))
assert.Equal(t, testCase.Input, testCase.Expected, "Should continue expanding spec when a definition can't be found.")
doc, err := jsonDoc("fixtures/expansion/missingItemRef.json")
@ -425,7 +714,7 @@ func TestItemsExpansion(t *testing.T) {
err = json.Unmarshal(carsDoc, spec)
assert.NoError(t, err)
resolver, err := defaultSchemaLoader(spec, nil, nil)
resolver, err := defaultSchemaLoader(spec, nil, nil, nil)
assert.NoError(t, err)
schema := spec.Definitions["car"]
@ -552,7 +841,7 @@ func TestSchemaExpansion(t *testing.T) {
err = json.Unmarshal(carsDoc, spec)
assert.NoError(t, err)
resolver, err := defaultSchemaLoader(spec, nil, nil)
resolver, err := defaultSchemaLoader(spec, nil, nil, nil)
assert.NoError(t, err)
schema := spec.Definitions["car"]
@ -698,7 +987,7 @@ func TestRelativeBaseURI(t *testing.T) {
defer server.Close()
spec := new(Swagger)
// resolver, err := defaultSchemaLoader(spec, nil, nil)
// resolver, err := defaultSchemaLoader(spec, nil, nil,nil)
// assert.NoError(t, err)
err := ExpandSpec(spec, nil)
@ -838,20 +1127,20 @@ func TestResolveRemoteRef_RootSame(t *testing.T) {
// the filename doesn't matter because ref will eventually point to refed.json
specBase, _ := absPath("fixtures/specs/anyotherfile.json")
if assert.NoError(t, err) && assert.NoError(t, json.Unmarshal(b, rootDoc)) {
var result_0 Swagger
ref_0, _ := NewRef(server.URL + "/refed.json#")
resolver_0, _ := defaultSchemaLoader(rootDoc, nil, nil)
if assert.NoError(t, resolver_0.Resolve(&ref_0, &result_0, "")) {
assertSpecs(t, result_0, *rootDoc)
var result0 Swagger
ref0, _ := NewRef(server.URL + "/refed.json#")
resolver0, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver0.Resolve(&ref0, &result0, "")) {
assertSpecs(t, result0, *rootDoc)
}
var result_1 Swagger
ref_1, _ := NewRef("./refed.json")
resolver_1, _ := defaultSchemaLoader(rootDoc, &ExpandOptions{
var result1 Swagger
ref1, _ := NewRef("./refed.json")
resolver1, _ := defaultSchemaLoader(rootDoc, &ExpandOptions{
RelativeBase: specBase,
}, nil)
if assert.NoError(t, resolver_1.Resolve(&ref_1, &result_1, specBase)) {
assertSpecs(t, result_1, *rootDoc)
}, nil, nil)
if assert.NoError(t, resolver1.Resolve(&ref1, &result1, specBase)) {
assertSpecs(t, result1, *rootDoc)
}
}
}
@ -889,7 +1178,7 @@ func TestResolveRemoteRef_FromInvalidFragment(t *testing.T) {
var tgt Schema
ref, err := NewRef(server.URL + "/refed.json#/definitions/NotThere")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
assert.Error(t, resolver.Resolve(&ref, &tgt, ""))
}
}
@ -963,7 +1252,7 @@ func TestResolveRemoteRef_WithNestedResolutionContextWithFragment(t *testing.T)
// var tgt Schema
// ref, err := NewRef(server.URL + "/resolution2.json#/items/items")
// if assert.NoError(t, err) {
// resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
// resolver, _ := defaultSchemaLoader(rootDoc, nil, nil,nil)
// if assert.NoError(t, resolver.Resolve(&ref, &tgt, "")) {
// assert.Equal(t, StringOrArray([]string{"file"}), tgt.Type)
// }
@ -984,7 +1273,7 @@ func TestResolveRemoteRef_ToParameter(t *testing.T) {
ref, err := NewRef(server.URL + "/refed.json#/parameters/idParam")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, "")) {
assert.Equal(t, "id", tgt.Name)
assert.Equal(t, "path", tgt.In)
@ -1010,7 +1299,7 @@ func TestResolveRemoteRef_ToPathItem(t *testing.T) {
ref, err := NewRef(server.URL + "/refed.json#/paths/" + jsonpointer.Escape("/pets/{id}"))
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, "")) {
assert.Equal(t, rootDoc.Paths.Paths["/pets/{id}"].Get, tgt.Get)
}
@ -1031,7 +1320,7 @@ func TestResolveRemoteRef_ToResponse(t *testing.T) {
ref, err := NewRef(server.URL + "/refed.json#/responses/petResponse")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, "")) {
assert.Equal(t, rootDoc.Responses["petResponse"], tgt)
}
@ -1045,7 +1334,7 @@ func TestResolveLocalRef_SameRoot(t *testing.T) {
result := new(Swagger)
ref, _ := NewRef("#")
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
err := resolver.Resolve(&ref, result, "")
if assert.NoError(t, err) {
assert.Equal(t, rootDoc, result)
@ -1059,7 +1348,7 @@ func TestResolveLocalRef_FromFragment(t *testing.T) {
var tgt Schema
ref, err := NewRef("#/definitions/Category")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
err := resolver.Resolve(&ref, &tgt, "")
if assert.NoError(t, err) {
assert.Equal(t, "Category", tgt.ID)
@ -1074,7 +1363,7 @@ func TestResolveLocalRef_FromInvalidFragment(t *testing.T) {
var tgt Schema
ref, err := NewRef("#/definitions/NotThere")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
err := resolver.Resolve(&ref, &tgt, "")
assert.Error(t, err)
}
@ -1088,7 +1377,7 @@ func TestResolveLocalRef_Parameter(t *testing.T) {
var tgt Parameter
ref, err := NewRef("#/parameters/idParam")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, basePath)) {
assert.Equal(t, "id", tgt.Name)
assert.Equal(t, "path", tgt.In)
@ -1109,7 +1398,7 @@ func TestResolveLocalRef_PathItem(t *testing.T) {
var tgt PathItem
ref, err := NewRef("#/paths/" + jsonpointer.Escape("/pets/{id}"))
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, basePath)) {
assert.Equal(t, rootDoc.Paths.Paths["/pets/{id}"].Get, tgt.Get)
}
@ -1125,7 +1414,7 @@ func TestResolveLocalRef_Response(t *testing.T) {
var tgt Response
ref, err := NewRef("#/responses/petResponse")
if assert.NoError(t, err) {
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil)
resolver, _ := defaultSchemaLoader(rootDoc, nil, nil, nil)
if assert.NoError(t, resolver.Resolve(&ref, &tgt, basePath)) {
assert.Equal(t, rootDoc.Responses["petResponse"], tgt)
}
@ -1133,6 +1422,96 @@ func TestResolveLocalRef_Response(t *testing.T) {
}
}
func TestResolveForTransitiveRefs(t *testing.T) {
var spec *Swagger
rawSpec, err := ioutil.ReadFile("fixtures/specs/todos.json")
assert.NoError(t, err)
basePath, err := absPath("fixtures/specs/todos.json")
assert.NoError(t, err)
opts := &ExpandOptions{
RelativeBase: basePath,
}
err = json.Unmarshal(rawSpec, &spec)
assert.NoError(t, err)
err = ExpandSpec(spec, opts)
assert.NoError(t, err)
}
const (
withoutSchemaID = "removed"
withSchemaID = "schema"
)
func TestExpandSchemaWithRoot(t *testing.T) {
root := new(Swagger)
_ = json.Unmarshal(PetStoreJSONMessage, root)
// 1. remove ID from root definition
origPet := root.Definitions["Pet"]
newPet := origPet
newPet.ID = ""
root.Definitions["Pet"] = newPet
expandRootWithID(t, root, withoutSchemaID)
// 2. put back ID in Pet definition
// nested $ref should fail
//Debug = true
root.Definitions["Pet"] = origPet
expandRootWithID(t, root, withSchemaID)
}
func expandRootWithID(t *testing.T, root *Swagger, testcase string) {
t.Logf("case: expanding $ref to schema without ID, with nested $ref with %s ID", testcase)
sch := &Schema{
SchemaProps: SchemaProps{
Ref: MustCreateRef("#/definitions/newPet"),
},
}
err := ExpandSchema(sch, root, nil)
if testcase == withSchemaID {
assert.Errorf(t, err, "expected %s NOT to expand properly because of the ID in the parent schema", sch.Ref.String())
} else {
assert.NoErrorf(t, err, "expected %s to expand properly", sch.Ref.String())
}
if Debug {
bbb, _ := json.MarshalIndent(sch, "", " ")
t.Log(string(bbb))
}
t.Log("case: expanding $ref to schema without nested $ref")
sch = &Schema{
SchemaProps: SchemaProps{
Ref: MustCreateRef("#/definitions/Category"),
},
}
err = ExpandSchema(sch, root, nil)
assert.NoErrorf(t, err, "expected %s to expand properly", sch.Ref.String())
if Debug {
bbb, _ := json.MarshalIndent(sch, "", " ")
t.Log(string(bbb))
}
t.Logf("case: expanding $ref to schema with %s ID and nested $ref", testcase)
sch = &Schema{
SchemaProps: SchemaProps{
Ref: MustCreateRef("#/definitions/Pet"),
},
}
err = ExpandSchema(sch, root, nil)
if testcase == withSchemaID {
assert.Errorf(t, err, "expected %s NOT to expand properly because of the ID in the parent schema", sch.Ref.String())
} else {
assert.NoErrorf(t, err, "expected %s to expand properly", sch.Ref.String())
}
if Debug {
bbb, _ := json.MarshalIndent(sch, "", " ")
t.Log(string(bbb))
}
}
// PetStoreJSONMessage json raw message for Petstore20
var PetStoreJSONMessage = json.RawMessage([]byte(PetStore20))

View File

@ -0,0 +1,5 @@
farFarAway:
type: object
properties:
farFarAwayProp:
type: integer

View File

@ -0,0 +1,11 @@
aRemotePlace:
type: object
properties:
remoteProp:
type: integer
fartherProp:
$ref: './farther/farther.yaml#/farFarAway'
moreRemoteThanYouCanThink:
#$ref: './remote/remote.yaml#/farFarAway'
type: integer

View File

@ -0,0 +1,5 @@
farFarAway:
type: object
properties:
farFarAwayProp:
type: integer

View File

@ -0,0 +1,104 @@
swagger: '2.0'
info:
title: Responses
version: 0.1.0
definitions:
Error:
type: object
description: |
Contains all the properties any error response from the API will contain.
Some properties are optional so might be empty most of the time
required:
- code
- message
properties:
code:
description: the error code, this is not necessarily the http status code
type: integer
format: int32
message:
description: a human readable version of the error
type: string
helpUrl:
description: an optional url for getting more help about this error
type: string
format: uri
myArray:
type: array
items:
$ref: '#/definitions/myItems'
myItems:
type: object
properties:
propItems1:
type: integer
propItems2:
$ref: 'remote/remote.yaml#/aRemotePlace'
otherPlace:
Error:
type: object
properties:
message:
type: string
parameters:
BadRequest:
name: badRequest
in: body
schema:
$ref: '#/definitions/Error'
GoodRequest:
name: goodRequest
in: body
schema:
$ref: '#/otherPlace/Error'
PlainRequest:
name: plainRequest
in: body
schema:
type: integer
StrangeRequest:
name: stangeRequest
in: body
schema:
$ref: 'responses.yaml#/otherPlace/Error'
RemoteRequest:
name: remoteRequest
in: body
schema:
$ref: './remote/remote.yaml#/moreRemoteThanYouCanThink'
responses:
BadRequest:
description: Bad request
schema:
$ref: '#/definitions/Error'
GoodRequest:
description: good request
schema:
$ref: '#/otherPlace/Error'
PlainRequest:
description: plain request
schema:
type: integer
StrangeRequest:
description: strange request
schema:
$ref: 'responses.yaml#/otherPlace/Error'
RemoteRequest:
description: remote request
schema:
$ref: './remote/remote.yaml#/moreRemoteThanYouCanThink'
paths:
/:
get:
summary: GET
operationId: getAll
responses:
200:
description: Ok

View File

@ -0,0 +1,39 @@
swagger: '2.0'
info:
title: Object
version: 0.1.0
paths:
/:
get:
summary: GET
operationId: getAll
parameters:
- $ref: 'responses.yaml#/parameters/BadRequest'
- $ref: 'responses.yaml#/parameters/GoodRequest'
- $ref: 'responses.yaml#/parameters/PlainRequest'
- $ref: 'responses.yaml#/parameters/StrangeRequest'
- $ref: 'responses.yaml#/parameters/RemoteRequest'
- name: nestedBody
in: body
schema:
$ref: '#/definitions/nestedRefDefinition'
responses:
200:
description: Ok
400:
$ref: 'responses.yaml#/responses/BadRequest'
403:
$ref: 'responses.yaml#/responses/GoodRequest'
404:
$ref: 'responses.yaml#/responses/PlainRequest'
304:
$ref: 'responses.yaml#/responses/StrangeRequest'
204:
$ref: 'responses.yaml#/responses/RemoteRequest'
definitions:
badDefinition:
$ref: 'responses.yaml#/definitions/Error'
nestedRefDefinition:
$ref: 'responses.yaml#/definitions/myArray'

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,618 @@
definitions:
# Generic response model
V4GenericResponse:
type: object
properties:
message:
type: string
description: A human readable message
code:
type: string
description: |
A machine readable [response code](https://github.com/giantswarm/api-spec/blob/master/details/RESPONSE_CODES.md) like e. g. `INVALID_CREDENTIALS`
# Info resposne
V4InfoResponse:
type: object
properties:
general:
description: General information
type: object
properties:
installation_name:
description: Unique name of the installation
type: string
provider:
description: The technical provider used in this installation. Either "kvm", "aws", or "azure".
type: string
datacenter:
description: Identifier of the datacenter or cloud provider region, e. g. "eu-west-1"
type: string
workers:
description: Information related to worker nodes
type: object
properties:
count_per_cluster:
description: Number of workers per cluster
type: object
properties:
max:
description: Maximum number of worker a cluster can have
type: number
default:
description: Default number of workers in a new cluster will have, if not specifiec otherwise
type: number
instance_type:
description: Instance types to be used for worker nodes. Only available for AWS clusters.
type: object
properties:
options:
description: List of available instance types
type: array
items:
type: string
default:
description: The instance type used in new cluster, if not specified
type: string
vm_size:
description: Azure Virtual Machine size to be used for worker nodes. Only available for Azure clusters.
type: object
properties:
options:
description: List of available instance types
type: array
items:
type: string
default:
description: The instance type used in new cluster, if not specified
type: string
# Request to create a new cluster
V4AddClusterRequest:
type: object
required:
- owner
description: Request model for creating a new cluster
properties:
owner:
type: string
description: Name of the organization owning the cluster
name:
type: string
description: Cluster name
release_version:
type: string
description: |
The [release](https://docs.giantswarm.io/api/#tag/releases) version
to use in the new cluster
kubernetes_version:
type: string
description: |
Kubernetes version number (deprecated). Doesn't have any effect.
This attribute is going to be removed in future API versions.
workers:
type: array
items:
$ref: '#/definitions/V4NodeDefinition'
V4ModifyClusterRequest:
type: object
required: []
description: Request body for cluster modification
properties:
name:
type: string
description: Name for the cluster
owner:
type: string
description: Name of the organization owning the cluster
release_version:
type: string
description: Release version to use after an upgrade
workers:
type: array
description: Worker node array
items:
$ref: '#/definitions/V4NodeDefinition'
# Details on existing cluster
V4ClusterDetailsResponse:
type: object
description: Response model showing details of a cluster
properties:
id:
type: string
description: Unique cluster identifier
api_endpoint:
type: string
description: URI of the Kubernetes API endpoint
create_date:
type: string
description: Date/time of cluster creation
owner:
type: string
description: Name of the organization owning the cluster
name:
type: string
description: Cluster name
release_version:
type: string
description: |
The [release](https://docs.giantswarm.io/api/#tag/releases) version
currently running this cluster.
kubernetes_version:
type: string
description: Deprecated. Will be removed in a future API version.
workers:
type: array
items:
$ref: '#/definitions/V4NodeDefinition'
kvm:
type: object
description: Attributes specific to clusters running on KVM (on-prem) installations.
properties:
port_mappings:
type: array
description: |
Reveals the ports on the host cluster that are mapped to this guest cluster's ingress
and which protocol that port supports. Only shown and relevant on our on-prem KVM clusters.
items:
type: object
properties:
port:
description: |
The port on the host cluster that will forward traffic to the guest cluster
type: integer
protocol:
description: |
The protocol this port mapping is made for.
type: string
# Definition of a cluster node
V4NodeDefinition:
type: object
properties:
aws:
type: object
description: |
Attributes specific to nodes running on Amazon Web Services (AWS)
properties:
instance_type:
type: string
description: |
EC2 instance type name. Must be the same for all worker nodes
of a cluster.
azure:
type: object
description: |
Attributes specific to nodes running on Microsoft Azure
properties:
vm_size:
type: string
description: |
Azure Virtual Machine size. Must be the same for all worker nodes
of a cluster.
memory:
type: object
properties:
size_gb:
type: number
description: RAM size in GB. Can be an integer or float.
storage:
type: object
properties:
size_gb:
type: number
description: Node storage size in GB. Can be an integer or float.
cpu:
type: object
properties:
cores:
type: integer
description: Number of CPU cores
labels:
type: object
additionalProperties: true
# List of key pairs
V4GetKeyPairsResponse:
type: array
description: Array of sparse key pair objects
items:
type: object
properties:
id:
type: string
description: Unique identifier of the key pair
description:
type: string
description: Free text information about the key pair
ttl_hours:
type: integer
description: Expiration time (from creation) in hours
create_date:
type: string
description: Date/time of creation
common_name:
type: string
description: The common name of the certificate subject.
certificate_organizations:
type: string
description: The certificate subject's `organization` fields.
# Add key pair request
V4AddKeyPairRequest:
type: object
required:
- description
properties:
description:
type: string
description: Free text information about the key pair
ttl_hours:
type: integer
format: int32
description: Expiration time (from creation) in hours
cn_prefix:
type: string
description: The common name prefix of the certificate subject. This only allows characters that are usable in domain names (`a-z`, `0-9`, and `.-`, where `.-` must not occur at either the start or the end).
certificate_organizations:
type: string
description: |
This will set the certificate subject's `organization` fields.
Use a comma seperated list of values.
V4AddKeyPairResponse:
type: object
properties:
id:
type: string
description: Unique identifier of the key pair
description:
type: string
description: Free text information about the key pair
ttl_hours:
type: integer
description: Expiration time (from creation) in hours
create_date:
type: string
description: Date/time of creation
certificate_authority_data:
type: string
description: PEM-encoded CA certificate of the cluster
client_key_data:
type: string
description: PEM-encoded RSA private key
client_certificate_data:
type: string
description: PEM-encoded certificate
# cluster metrics
V4GetClusterMetricsResponse:
description: Response for the getClusterMetrics operation
type: object
properties:
workers:
description: Group of metrics regarding workers
type: array
items:
$ref: '#/definitions/V4NodeMetrics'
V4NodeMetrics:
type: object
properties:
id:
description: String identifying the node
type: string
metrics:
description: Container object for all metrics available for the node
type: object
properties:
container_count:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
pod_count:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
cpu_used:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
ram_free:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
ram_available:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
ram_cached:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
ram_buffers:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
ram_mapped:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
node_storage_used:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
network_rx:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
network_tx:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
resource_cpu_requests:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
resource_cpu_limits:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
resource_ram_requests:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
resource_ram_limits:
type: object
properties:
timestamp:
description: Time when the given value has been recorded
type: string
value:
description: The value for the metric. Can be an integer or float.
type: number
# a complete organization object
V4Organization:
type: object
properties:
id:
type: string
description: Unique name/identifier of the organization
members:
type: array
description: List of members that belong to this organization
items:
$ref: '#/definitions/V4OrganizationMember'
# An organization as returned by getOrganizations as an array item
V4OrganizationListItem:
type: object
properties:
id:
type: string
description: Unique name/identifier of the organization
# A user that belongs to an organization
V4OrganizationMember:
type: object
properties:
email:
type: string
description: Email address of the user
# One of the users in the array as returned by getUsers
V4UserListItem:
type: object
properties:
email:
type: string
description: Email address of the user
created:
type: string
description: The date and time that this account was created
expiry:
type: string
description: The date and time when this account will expire
# A cluster array item, as return by getClusters
V4ClusterListItem:
type: object
properties:
id:
type: string
description: Unique cluster identifier
create_date:
type: string
description: Date/time of cluster creation
name:
type: string
description: Cluster name
owner:
type: string
description: Name of the organization owning the cluster
release_version:
type: string
description: The semantic version number of this cluster
# A cluster array item, as return by getClusters
V4ReleaseListItem:
type: object
required: ["version", "timestamp", "changelog", "components"]
properties:
version:
type: string
description: The semantic version number
timestamp:
type: string
description: Date and time of the release creation
active:
type: boolean
description: |
If true, the version is available for new clusters and cluster
upgrades. Older versions become unavailable and thus have the
value `false` here.
changelog:
description: |
Structured list of changes in this release, in comparison to the
previous version, with respect to the contained components.
type: array
items:
type: object
properties:
component:
type: string
description: |
If the changed item was a component, this attribute is the
name of the component.
description:
type: string
description: Human-friendly description of the change
components:
description: |
List of components and their version contained in the release
type: array
items:
type: object
required: ["name", "version"]
properties:
name:
type: string
description: Name of the component
version:
type: string
description: Version number of the component
V4CreateUserRequest:
type: object
required:
- password
description: Request model for creating a new user
properties:
password:
type: string
description: A Base64 encoded password
expiry:
type: string
description: The date and time when this account will expire
V4AddCredentialsRequest:
type: object
required:
- provider
description: Request model for adding a set of credentials
properties:
provider:
type: string
aws:
type: object
description: Credentials specific to an AWS account
required:
- roles
properties:
roles:
type: object
description: IAM roles to assume by certain entities
required:
- awsoperator
- admin
properties:
admin:
type: string
description: ARN of the IAM role to assume by Giant Swarm support staff
awsoperator:
type: string
description: ARN of the IAM role to assume by the software operating clusters
# A request for an auth token
V4CreateAuthTokenRequest:
type: object
properties:
email:
type: string
description: Your email address
password_base64:
type: string
description: Your password as a base64 encoded string
# A response to a successful auth token request
V4CreateAuthTokenResponse:
type: object
properties:
auth_token:
type: string
description: The newly created API token

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
parameters:
RequiredGiantSwarmAuthorizationHeader:
name: Authorization
type: string
in: header
required: true
description: As described in the [authentication](#section/Authentication) section
ClusterIdPathParameter:
name: cluster_id
in: path
required: true
type: string
description: Cluster ID
UserEmailPathParameter:
name: email
in: path
required: true
type: string
description: The user's email address
OrganizationIdPathParameter:
name: organization_id
in: path
required: true
type: string
description: |
An ID for the organization.
This ID must be unique and match this regular
expression: ^[a-z0-9_]{4,30}$
XRequestIDHeader:
name: X-Request-ID
in: header
type: string
required: false
description: |
A randomly generated key that can be used to track a request throughout
services of Giant Swarm.
XGiantSwarmActivityHeader:
name: X-Giant-Swarm-Activity
in: header
type: string
required: false
description: |
Name of an activity to track, like "list-clusters". This allows to
analyze several API requests sent in context and gives an idea on
the purpose.
XGiantSwarmCmdLineHeader:
name: X-Giant-Swarm-CmdLine
in: header
type: string
required: false
description: |
If activity has been issued by a CLI, this header can contain the
command line

View File

@ -0,0 +1,13 @@
responses:
V4Generic401Response:
description: Permission denied
schema:
$ref: "./definitions.yaml#/definitions/V4GenericResponse"
examples:
application/json:
{
"code": "PERMISSION_DENIED",
"message": "The requested resource cannot be accessed using the provided authentication details."
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More