mirror of
https://github.com/kubevirt/containerized-data-importer.git
synced 2025-06-03 06:30:22 +00:00
Introduce customizeComponents option (#3070)
* init Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> * add e2e Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> * fix unit tests Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> * fix matchselector for cdi-deployment Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> * rebase Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> * refactor test of customizeComponents Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com> --------- Signed-off-by: Yaroslav Borbat <yaroslav.borbat@flant.com>
This commit is contained in:
parent
4f8140a5c7
commit
d04225a00b
@ -4081,6 +4081,10 @@
|
||||
"description": "CDIConfig at CDI level",
|
||||
"$ref": "#/definitions/v1beta1.CDIConfigSpec"
|
||||
},
|
||||
"customizeComponents": {
|
||||
"default": {},
|
||||
"$ref": "#/definitions/v1beta1.CustomizeComponents"
|
||||
},
|
||||
"imagePullPolicy": {
|
||||
"description": "PullPolicy describes a policy for if/when to pull a container image\n\nPossible enum values:\n - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.\n - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.\n - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present",
|
||||
"type": "string",
|
||||
@ -4194,6 +4198,52 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.CustomizeComponents": {
|
||||
"description": "CustomizeComponents defines patches for components deployed by the CDI operator.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"flags": {
|
||||
"description": "Configure the value used for deployment and daemonset resources",
|
||||
"$ref": "#/definitions/v1beta1.Flags"
|
||||
},
|
||||
"patches": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"default": {},
|
||||
"$ref": "#/definitions/v1beta1.CustomizeComponentsPatch"
|
||||
},
|
||||
"x-kubernetes-list-type": "atomic"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.CustomizeComponentsPatch": {
|
||||
"description": "CustomizeComponentsPatch defines a patch for some resource.",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"resourceName",
|
||||
"resourceType",
|
||||
"patch",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"patch": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"resourceName": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"resourceType": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.DataImportCron": {
|
||||
"description": "DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace",
|
||||
"type": "object",
|
||||
@ -4970,6 +5020,33 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.Flags": {
|
||||
"description": "Flags will create a patch that will replace all flags for the container's command field. The only flags that will be used are those define. There are no guarantees around forward/backward compatibility. If set incorrectly this will cause the resource when rolled out to error until flags are updated.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
},
|
||||
"controller": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
},
|
||||
"uploadProxy": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.ImportProxy": {
|
||||
"description": "ImportProxy provides the information on how to configure the importer pod proxy.",
|
||||
"type": "object",
|
||||
|
2
go.mod
2
go.mod
@ -10,6 +10,7 @@ require (
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/emicklei/go-restful/v3 v3.11.0
|
||||
github.com/evanphx/json-patch/v5 v5.8.1
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-logr/logr v1.2.4
|
||||
github.com/golang/snappy v0.0.4
|
||||
@ -84,7 +85,6 @@ require (
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||
|
5
go.sum
5
go.sum
@ -766,8 +766,8 @@ github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/evanphx/json-patch/v5 v5.8.1 h1:iPEdwg0XayoS+E7Mth9JxwUtOgyVxnDTXHtKhZPlZxA=
|
||||
github.com/evanphx/json-patch/v5 v5.8.1/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
@ -1040,7 +1040,6 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
@ -565,6 +565,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ClaimPropertySet": schema_pkg_apis_core_v1beta1_ClaimPropertySet(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ComponentConfig": schema_pkg_apis_core_v1beta1_ComponentConfig(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ConditionState": schema_pkg_apis_core_v1beta1_ConditionState(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponents": schema_pkg_apis_core_v1beta1_CustomizeComponents(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponentsPatch": schema_pkg_apis_core_v1beta1_CustomizeComponentsPatch(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.DataImportCron": schema_pkg_apis_core_v1beta1_DataImportCron(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.DataImportCronCondition": schema_pkg_apis_core_v1beta1_DataImportCronCondition(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.DataImportCronList": schema_pkg_apis_core_v1beta1_DataImportCronList(ref),
|
||||
@ -595,6 +597,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.DataVolumeSpec": schema_pkg_apis_core_v1beta1_DataVolumeSpec(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.DataVolumeStatus": schema_pkg_apis_core_v1beta1_DataVolumeStatus(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.FilesystemOverhead": schema_pkg_apis_core_v1beta1_FilesystemOverhead(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.Flags": schema_pkg_apis_core_v1beta1_Flags(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ImportProxy": schema_pkg_apis_core_v1beta1_ImportProxy(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ImportSourceType": schema_pkg_apis_core_v1beta1_ImportSourceType(ref),
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ImportStatus": schema_pkg_apis_core_v1beta1_ImportStatus(ref),
|
||||
@ -27014,6 +27017,12 @@ func schema_pkg_apis_core_v1beta1_CDISpec(ref common.ReferenceCallback) common.O
|
||||
Ref: ref("kubevirt.io/controller-lifecycle-operator-sdk/api.NodePlacement"),
|
||||
},
|
||||
},
|
||||
"customizeComponents": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponents"),
|
||||
},
|
||||
},
|
||||
"cloneStrategyOverride": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Clone strategy override: should we use a host-assisted copy even if snapshots are available?",
|
||||
@ -27044,7 +27053,7 @@ func schema_pkg_apis_core_v1beta1_CDISpec(ref common.ReferenceCallback) common.O
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CDICertConfig", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CDIConfigSpec", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ComponentConfig", "kubevirt.io/controller-lifecycle-operator-sdk/api.NodePlacement"},
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CDICertConfig", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CDIConfigSpec", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.ComponentConfig", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponents", "kubevirt.io/controller-lifecycle-operator-sdk/api.NodePlacement"},
|
||||
}
|
||||
}
|
||||
|
||||
@ -27286,6 +27295,87 @@ func schema_pkg_apis_core_v1beta1_ConditionState(ref common.ReferenceCallback) c
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_core_v1beta1_CustomizeComponents(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "CustomizeComponents defines patches for components deployed by the CDI operator.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"patches": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-type": "atomic",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponentsPatch"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"flags": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Configure the value used for deployment and daemonset resources",
|
||||
Ref: ref("kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.Flags"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.CustomizeComponentsPatch", "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1.Flags"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_core_v1beta1_CustomizeComponentsPatch(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "CustomizeComponentsPatch defines a patch for some resource.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"resourceName": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"resourceType": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"patch": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"type": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"resourceName", "resourceType", "patch", "type"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_core_v1beta1_DataImportCron(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
@ -28616,6 +28706,64 @@ func schema_pkg_apis_core_v1beta1_FilesystemOverhead(ref common.ReferenceCallbac
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_core_v1beta1_Flags(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Flags will create a patch that will replace all flags for the container's command field. The only flags that will be used are those define. There are no guarantees around forward/backward compatibility. If set incorrectly this will cause the resource when rolled out to error until flags are updated.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"api": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"controller": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"uploadProxy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_core_v1beta1_ImportProxy(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
@ -24,6 +24,15 @@ const (
|
||||
// CDIOperatorName is the CDI operator name
|
||||
CDIOperatorName = "cdi-operator"
|
||||
|
||||
// CDIControllerResourceName is the CDI controller resource name
|
||||
CDIControllerResourceName = "cdi-deployment"
|
||||
// CDIApiServerResourceName is the CDI apiserver resource name
|
||||
CDIApiServerResourceName = "cdi-apiserver"
|
||||
// CDIUploadProxyResourceName is the CDI uploadproxy resource name
|
||||
CDIUploadProxyResourceName = "cdi-uploadproxy"
|
||||
// CDICronJobResourceName is the CDI cronjob resource name
|
||||
CDICronJobResourceName = "cdi-cronjob"
|
||||
|
||||
// AppKubernetesPartOfLabel is the Kubernetes recommended part-of label
|
||||
AppKubernetesPartOfLabel = "app.kubernetes.io/part-of"
|
||||
// AppKubernetesVersionLabel is the Kubernetes recommended version label
|
||||
|
@ -192,7 +192,7 @@ const (
|
||||
// AnnCloneType is the comuuted/requested clone type
|
||||
AnnCloneType = AnnAPIGroup + "/cloneType"
|
||||
// AnnCloneSourcePod name of the source clone pod
|
||||
AnnCloneSourcePod = "cdi.kubevirt.io/storage.sourceClonePodName"
|
||||
AnnCloneSourcePod = AnnAPIGroup + "/storage.sourceClonePodName"
|
||||
|
||||
// AnnUploadRequest marks that a PVC should be made available for upload
|
||||
AnnUploadRequest = AnnAPIGroup + "/storage.upload.target"
|
||||
@ -249,7 +249,7 @@ const (
|
||||
AnnGarbageCollected = AnnAPIGroup + "/garbageCollected"
|
||||
|
||||
// CloneUniqueID is used as a special label to be used when we search for the pod
|
||||
CloneUniqueID = "cdi.kubevirt.io/storage.clone.cloneUniqeId"
|
||||
CloneUniqueID = AnnAPIGroup + "/storage.clone.cloneUniqeId"
|
||||
|
||||
// CloneSourceInUse is reason for event created when clone source pvc is in use
|
||||
CloneSourceInUse = "CloneSourceInUse"
|
||||
@ -320,12 +320,15 @@ const (
|
||||
ProgressDone = "100.0%"
|
||||
|
||||
// AnnEventSourceKind is the source kind that should be related to events
|
||||
AnnEventSourceKind = "cdi.kubevirt.io/events.source.kind"
|
||||
AnnEventSourceKind = AnnAPIGroup + "/events.source.kind"
|
||||
// AnnEventSource is the source that should be related to events (namespace/name)
|
||||
AnnEventSource = "cdi.kubevirt.io/events.source"
|
||||
AnnEventSource = AnnAPIGroup + "/events.source"
|
||||
|
||||
// AnnAllowClaimAdoption is the annotation that allows a claim to be adopted by a DataVolume
|
||||
AnnAllowClaimAdoption = "cdi.kubevirt.io/allowClaimAdoption"
|
||||
AnnAllowClaimAdoption = AnnAPIGroup + "/allowClaimAdoption"
|
||||
|
||||
// AnnCdiCustomizeComponentHash annotation is a hash of all customizations that live under spec.CustomizeComponents
|
||||
AnnCdiCustomizeComponentHash = AnnAPIGroup + "/customizer-identifier"
|
||||
)
|
||||
|
||||
// Size-detection pod error codes
|
||||
|
@ -9,6 +9,7 @@ go_library(
|
||||
"cr-manager.go",
|
||||
"cruft.go",
|
||||
"handler.go",
|
||||
"patches.go",
|
||||
"prometheus.go",
|
||||
"reconciler-hooks.go",
|
||||
"route.go",
|
||||
@ -31,6 +32,7 @@ go_library(
|
||||
"//pkg/operator/resources/utils:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch/v5:go_default_library",
|
||||
"//vendor/github.com/go-logr/logr:go_default_library",
|
||||
"//vendor/github.com/kelseyhightower/envconfig:go_default_library",
|
||||
"//vendor/github.com/openshift/api/route/v1:go_default_library",
|
||||
@ -57,6 +59,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
@ -85,6 +88,7 @@ go_test(
|
||||
"certrotation_test.go",
|
||||
"controller_suite_test.go",
|
||||
"controller_test.go",
|
||||
"patches_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
@ -166,5 +166,13 @@ func (r *ReconcileCDI) GetAllResources(crObject client.Object) ([]client.Object,
|
||||
}
|
||||
}
|
||||
|
||||
customizer, err := NewCustomizer(cr.Spec.CustomizeComponents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := customizer.Apply(resources); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
290
pkg/operator/controller/patches.go
Normal file
290
pkg/operator/controller/patches.go
Normal file
@ -0,0 +1,290 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch/v5"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
"kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// The Customizer structure is used for customizing components with a collection of patches.
|
||||
// It includes an array of CustomizeComponentsPatch, along with a hash value.
|
||||
// The Apply method allows applying these patches to a group of objects.
|
||||
type Customizer struct {
|
||||
Patches []v1beta1.CustomizeComponentsPatch
|
||||
hash string
|
||||
}
|
||||
|
||||
// Hash provides the hash of the patches.
|
||||
func (c *Customizer) Hash() string {
|
||||
return c.hash
|
||||
}
|
||||
|
||||
// GetPatches provides slice of patches.
|
||||
func (c *Customizer) GetPatches() []v1beta1.CustomizeComponentsPatch {
|
||||
return c.Patches
|
||||
}
|
||||
|
||||
// GetPatchesForResource provides slice of patches for specific resource.
|
||||
func (c *Customizer) GetPatchesForResource(resourceType, name string) []v1beta1.CustomizeComponentsPatch {
|
||||
allPatches := c.Patches
|
||||
patches := make([]v1beta1.CustomizeComponentsPatch, 0)
|
||||
|
||||
for _, p := range allPatches {
|
||||
if valueMatchesKey(p.ResourceType, resourceType) && valueMatchesKey(p.ResourceName, name) {
|
||||
patches = append(patches, p)
|
||||
}
|
||||
}
|
||||
|
||||
return patches
|
||||
}
|
||||
|
||||
func valueMatchesKey(value, key string) bool {
|
||||
if value == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
return strings.EqualFold(key, value)
|
||||
}
|
||||
|
||||
// Apply applies all patches to the slice of objects.
|
||||
func (c *Customizer) Apply(objects []client.Object) error {
|
||||
var deployments []*appsv1.Deployment
|
||||
var services []*corev1.Service
|
||||
var validatingWebhooks []*admissionregistrationv1.ValidatingWebhookConfiguration
|
||||
var mutatingWebhooks []*admissionregistrationv1.MutatingWebhookConfiguration
|
||||
var apiServices []*apiregistrationv1.APIService
|
||||
|
||||
for _, obj := range objects {
|
||||
kind := obj.GetObjectKind().GroupVersionKind().Kind
|
||||
switch kind {
|
||||
case "Deployment":
|
||||
deployments = append(deployments, obj.(*appsv1.Deployment))
|
||||
case "Service":
|
||||
services = append(services, obj.(*corev1.Service))
|
||||
case "ValidatingWebhookConfiguration":
|
||||
validatingWebhooks = append(validatingWebhooks, obj.(*admissionregistrationv1.ValidatingWebhookConfiguration))
|
||||
case "MutatingWebhookConfiguration":
|
||||
mutatingWebhooks = append(mutatingWebhooks, obj.(*admissionregistrationv1.MutatingWebhookConfiguration))
|
||||
case "APIService":
|
||||
apiServices = append(apiServices, obj.(*apiregistrationv1.APIService))
|
||||
}
|
||||
}
|
||||
|
||||
err := c.GenericApplyPatches(deployments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.GenericApplyPatches(services)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.GenericApplyPatches(validatingWebhooks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.GenericApplyPatches(mutatingWebhooks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.GenericApplyPatches(apiServices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenericApplyPatches applies patches to a slice of resources.
|
||||
func (c *Customizer) GenericApplyPatches(objects interface{}) error {
|
||||
switch reflect.TypeOf(objects).Kind() {
|
||||
case reflect.Slice:
|
||||
s := reflect.ValueOf(objects)
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
o := s.Index(i)
|
||||
obj, ok := o.Interface().(runtime.Object)
|
||||
if !ok {
|
||||
return errors.New("Slice must contain objects of type 'runtime.Object'")
|
||||
}
|
||||
|
||||
kind := obj.GetObjectKind().GroupVersionKind().Kind
|
||||
|
||||
v := reflect.Indirect(o).FieldByName("ObjectMeta").FieldByName("Name")
|
||||
name := v.String()
|
||||
|
||||
patches := c.GetPatchesForResource(kind, name)
|
||||
|
||||
if len(patches) > 0 {
|
||||
patches = append(patches, v1beta1.CustomizeComponentsPatch{
|
||||
Patch: fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, cc.AnnCdiCustomizeComponentHash, c.hash),
|
||||
Type: v1beta1.StrategicMergePatchType,
|
||||
})
|
||||
if err := applyPatches(obj, patches); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyPatches(obj runtime.Object, patches []v1beta1.CustomizeComponentsPatch) error {
|
||||
if len(patches) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range patches {
|
||||
err := applyPatch(obj, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyPatch(obj runtime.Object, patch v1beta1.CustomizeComponentsPatch) error {
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
old, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
|
||||
// in obj that are removed by patch are cleared
|
||||
value := reflect.ValueOf(obj)
|
||||
value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
|
||||
|
||||
switch patch.Type {
|
||||
case v1beta1.JSONPatchType:
|
||||
patch, err := jsonpatch.DecodePatch([]byte(patch.Patch))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := jsonpatch.NewApplyOptions()
|
||||
opts.AllowMissingPathOnRemove = true
|
||||
opts.EnsurePathExistsOnAdd = true
|
||||
modified, err := patch.ApplyWithOptions(old, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(modified, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
case v1beta1.MergePatchType:
|
||||
modified, err := jsonpatch.MergePatch(old, []byte(patch.Patch))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(modified, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
case v1beta1.StrategicMergePatchType:
|
||||
mergedByte, err := strategicpatch.StrategicMergePatch(old, []byte(patch.Patch), obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(mergedByte, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("PatchType is not supported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewCustomizer returns a new Customizer.
|
||||
func NewCustomizer(customizations v1beta1.CustomizeComponents) (*Customizer, error) {
|
||||
hash, err := getHash(customizations)
|
||||
if err != nil {
|
||||
return &Customizer{}, err
|
||||
}
|
||||
|
||||
patches := customizations.Patches
|
||||
flagPatches := flagsToPatches(customizations.Flags)
|
||||
patches = append(patches, flagPatches...)
|
||||
|
||||
return &Customizer{
|
||||
Patches: patches,
|
||||
hash: hash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func flagsToPatches(flags *v1beta1.Flags) []v1beta1.CustomizeComponentsPatch {
|
||||
patches := []v1beta1.CustomizeComponentsPatch{}
|
||||
if flags == nil {
|
||||
return patches
|
||||
}
|
||||
patches = addFlagsPatch(common.CDIApiServerResourceName, "Deployment", flags.API, patches)
|
||||
patches = addFlagsPatch(common.CDIControllerResourceName, "Deployment", flags.Controller, patches)
|
||||
patches = addFlagsPatch(common.CDIUploadProxyResourceName, "Deployment", flags.UploadProxy, patches)
|
||||
|
||||
return patches
|
||||
}
|
||||
|
||||
func addFlagsPatch(name, resource string, flags map[string]string, patches []v1beta1.CustomizeComponentsPatch) []v1beta1.CustomizeComponentsPatch {
|
||||
if len(flags) == 0 {
|
||||
return patches
|
||||
}
|
||||
|
||||
return append(patches, v1beta1.CustomizeComponentsPatch{
|
||||
ResourceName: name,
|
||||
ResourceType: resource,
|
||||
Patch: fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":%q,"args":["%s"]}]}}}}`, name, strings.Join(flagsToArray(flags), `","`)),
|
||||
Type: v1beta1.StrategicMergePatchType,
|
||||
})
|
||||
}
|
||||
|
||||
func flagsToArray(flags map[string]string) []string {
|
||||
farr := make([]string, 0)
|
||||
|
||||
for flag, v := range flags {
|
||||
farr = append(farr, fmt.Sprintf("-%s", strings.ToLower(flag)))
|
||||
if v != "" {
|
||||
farr = append(farr, v)
|
||||
}
|
||||
}
|
||||
|
||||
return farr
|
||||
}
|
||||
|
||||
func getHash(customizations v1beta1.CustomizeComponents) (string, error) {
|
||||
// #nosec CWE: 326 - Use of weak cryptographic primitive (http://cwe.mitre.org/data/definitions/326.html)
|
||||
// reason: sha1 is not used for encryption but for creating a hash value
|
||||
hasher := sha1.New()
|
||||
|
||||
sort.SliceStable(customizations.Patches, func(i, j int) bool {
|
||||
return len(customizations.Patches[i].Patch) < len(customizations.Patches[j].Patch)
|
||||
})
|
||||
|
||||
values, err := json.Marshal(customizations)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hasher.Write(values)
|
||||
|
||||
return hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
202
pkg/operator/controller/patches_test.go
Normal file
202
pkg/operator/controller/patches_test.go
Normal file
@ -0,0 +1,202 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = Describe("Patches", func() {
|
||||
namespace := "fake-namespace"
|
||||
|
||||
getControllerDeployment := func() *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: common.CDIControllerResourceName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{},
|
||||
}
|
||||
}
|
||||
|
||||
Context("generically apply patches", func() {
|
||||
|
||||
flags := &cdiv1.Flags{
|
||||
Controller: map[string]string{
|
||||
"v": "4",
|
||||
},
|
||||
}
|
||||
|
||||
customizer, _ := NewCustomizer(cdiv1.CustomizeComponents{
|
||||
Patches: []cdiv1.CustomizeComponentsPatch{
|
||||
{
|
||||
ResourceName: common.CDIControllerResourceName,
|
||||
ResourceType: "Deployment",
|
||||
Patch: `{"metadata":{"labels":{"new-key":"added-this-label"}}}`,
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
},
|
||||
{
|
||||
ResourceName: "*",
|
||||
ResourceType: "Deployment",
|
||||
Patch: `{"spec":{"template":{"spec":{"imagePullSecrets":[{"name":"image-pull"}]}}}}`,
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
},
|
||||
},
|
||||
Flags: flags,
|
||||
})
|
||||
|
||||
deployment := getControllerDeployment()
|
||||
|
||||
It("should apply to deployments", func() {
|
||||
deployments := []*appsv1.Deployment{
|
||||
deployment,
|
||||
}
|
||||
|
||||
err := customizer.GenericApplyPatches(deployments)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(deployment.ObjectMeta.Labels["new-key"]).To(Equal("added-this-label"))
|
||||
Expect(deployment.Spec.Template.Spec.ImagePullSecrets[0].Name).To(Equal("image-pull"))
|
||||
// check flags are applied
|
||||
Expect(deployment.Spec.Template.Spec.Containers[0].Args).To(Equal(flagsToArray(flags.Controller)))
|
||||
|
||||
// check objects implement runtime.Object
|
||||
err = customizer.GenericApplyPatches([]string{"string"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("apply patch", func() {
|
||||
|
||||
It("should not error on empty patch", func() {
|
||||
err := applyPatch(nil, cdiv1.CustomizeComponentsPatch{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("get hash", func() {
|
||||
patch1 := cdiv1.CustomizeComponentsPatch{
|
||||
ResourceName: common.CDIControllerResourceName,
|
||||
ResourceType: "Deployment",
|
||||
Patch: `{"metadata":{"labels":{"new-key":"added-this-label"}}}`,
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
}
|
||||
patch2 := cdiv1.CustomizeComponentsPatch{
|
||||
ResourceName: common.CDIApiServerResourceName,
|
||||
ResourceType: "Deployment",
|
||||
Patch: `{"metadata":{"labels":{"my-custom-label":"custom-label"}}}`,
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
}
|
||||
patch3 := cdiv1.CustomizeComponentsPatch{
|
||||
ResourceName: common.CDIControllerResourceName,
|
||||
ResourceType: "Deployment",
|
||||
Patch: `{"metadata":{"annotation":{"key":"value"}}}`,
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
}
|
||||
c1 := cdiv1.CustomizeComponents{
|
||||
Patches: []cdiv1.CustomizeComponentsPatch{patch1, patch2, patch3},
|
||||
}
|
||||
|
||||
c2 := cdiv1.CustomizeComponents{
|
||||
Patches: []cdiv1.CustomizeComponentsPatch{patch2, patch1, patch3},
|
||||
}
|
||||
|
||||
flags1 := &cdiv1.Flags{
|
||||
API: map[string]string{
|
||||
"v": "4",
|
||||
},
|
||||
}
|
||||
|
||||
flags2 := &cdiv1.Flags{
|
||||
API: map[string]string{
|
||||
"v": "1",
|
||||
},
|
||||
}
|
||||
|
||||
It("should be equal", func() {
|
||||
h1, err := getHash(c1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
h2, err := getHash(c2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(h1).To(Equal(h2))
|
||||
})
|
||||
|
||||
It("should not be equal", func() {
|
||||
c1.Flags = flags1
|
||||
c2.Flags = flags2
|
||||
|
||||
h1, err := getHash(c1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
h2, err := getHash(c2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(h1).ToNot(Equal(h2))
|
||||
})
|
||||
})
|
||||
|
||||
DescribeTable("valueMatchesKey", func(value, key string, expected bool) {
|
||||
matches := valueMatchesKey(value, key)
|
||||
Expect(matches).To(Equal(expected))
|
||||
},
|
||||
Entry("should match wildcard", "*", "Deployment", true),
|
||||
Entry("should match with different cases", "deployment", "Deployment", true),
|
||||
Entry("should not match", "Service", "Deployment", false),
|
||||
)
|
||||
|
||||
Describe("Config controller flags", func() {
|
||||
flags := map[string]string{
|
||||
"flag-one": "1",
|
||||
"flag": "3",
|
||||
"bool-flag": "",
|
||||
}
|
||||
resource := "Deployment"
|
||||
|
||||
It("should return flags in the proper format", func() {
|
||||
fa := flagsToArray(flags)
|
||||
Expect(fa).To(HaveLen(5))
|
||||
|
||||
Expect(strings.Join(fa, " ")).To(ContainSubstring("-flag-one 1"))
|
||||
Expect(strings.Join(fa, " ")).To(ContainSubstring("-flag 3"))
|
||||
Expect(strings.Join(fa, " ")).To(ContainSubstring("-bool-flag"))
|
||||
})
|
||||
|
||||
It("should add flag patch", func() {
|
||||
patches := addFlagsPatch(common.CDIApiServerResourceName, resource, flags, []cdiv1.CustomizeComponentsPatch{})
|
||||
Expect(patches).To(HaveLen(1))
|
||||
patch := patches[0]
|
||||
|
||||
Expect(patch.ResourceName).To(Equal(common.CDIApiServerResourceName))
|
||||
Expect(patch.ResourceType).To(Equal(resource))
|
||||
})
|
||||
|
||||
It("should return empty patch", func() {
|
||||
patches := addFlagsPatch(common.CDIApiServerResourceName, resource, map[string]string{}, []cdiv1.CustomizeComponentsPatch{})
|
||||
Expect(patches).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should chain patches", func() {
|
||||
patches := addFlagsPatch(common.CDIApiServerResourceName, resource, flags, []cdiv1.CustomizeComponentsPatch{})
|
||||
Expect(patches).To(HaveLen(1))
|
||||
|
||||
patches = addFlagsPatch(common.CDIControllerResourceName, resource, flags, patches)
|
||||
Expect(patches).To(HaveLen(2))
|
||||
})
|
||||
|
||||
It("should return all flag patches", func() {
|
||||
f := &cdiv1.Flags{
|
||||
API: flags,
|
||||
}
|
||||
|
||||
patches := flagsToPatches(f)
|
||||
Expect(patches).To(HaveLen(1))
|
||||
})
|
||||
})
|
||||
})
|
@ -19,6 +19,7 @@ go_library(
|
||||
importpath = "kubevirt.io/containerized-data-importer/pkg/operator/resources/cluster",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/common:go_default_library",
|
||||
"//pkg/operator/resources:go_default_library",
|
||||
"//pkg/operator/resources/utils:go_default_library",
|
||||
"//staging/src/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1:go_default_library",
|
||||
|
@ -19,13 +19,13 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
cdicorev1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
||||
@ -34,8 +34,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
apiServerResourceName = "cdi-apiserver"
|
||||
apiServerServiceName = "cdi-api"
|
||||
apiServerServiceName = "cdi-api"
|
||||
)
|
||||
|
||||
func createStaticAPIServerResources(args *FactoryArgs) []client.Object {
|
||||
@ -639,9 +638,9 @@ func getAPIServerCABundle(namespace string, c client.Client, l logr.Logger) []by
|
||||
}
|
||||
|
||||
func createAPIServerClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(apiServerResourceName, apiServerResourceName, apiServerResourceName, namespace)
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(common.CDIApiServerResourceName, common.CDIApiServerResourceName, common.CDIApiServerResourceName, namespace)
|
||||
}
|
||||
|
||||
func createAPIServerClusterRole() *rbacv1.ClusterRole {
|
||||
return utils.ResourceBuilder.CreateClusterRole(apiServerResourceName, getAPIServerClusterPolicyRules())
|
||||
return utils.ResourceBuilder.CreateClusterRole(common.CDIApiServerResourceName, getAPIServerClusterPolicyRules())
|
||||
}
|
||||
|
@ -18,15 +18,12 @@ package cluster
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
cronJobResourceName = "cdi-cronjob"
|
||||
)
|
||||
|
||||
func createCronJobResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createCronJobClusterRole(),
|
||||
@ -53,9 +50,9 @@ func getCronJobClusterPolicyRules() []rbacv1.PolicyRule {
|
||||
}
|
||||
|
||||
func createCronJobClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(cronJobResourceName, cronJobResourceName, cronJobResourceName, namespace)
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(common.CDICronJobResourceName, common.CDICronJobResourceName, common.CDICronJobResourceName, namespace)
|
||||
}
|
||||
|
||||
func createCronJobClusterRole() *rbacv1.ClusterRole {
|
||||
return utils.ResourceBuilder.CreateClusterRole(cronJobResourceName, getCronJobClusterPolicyRules())
|
||||
return utils.ResourceBuilder.CreateClusterRole(common.CDICronJobResourceName, getCronJobClusterPolicyRules())
|
||||
}
|
||||
|
@ -18,15 +18,12 @@ package cluster
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
uploadProxyResourceName = "cdi-uploadproxy"
|
||||
)
|
||||
|
||||
func createUploadProxyResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createUploadProxyClusterRole(),
|
||||
@ -51,9 +48,9 @@ func getUploadProxyClusterPolicyRules() []rbacv1.PolicyRule {
|
||||
}
|
||||
|
||||
func createUploadProxyClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(uploadProxyResourceName, uploadProxyResourceName, uploadProxyResourceName, namespace)
|
||||
return utils.ResourceBuilder.CreateClusterRoleBinding(common.CDIUploadProxyResourceName, common.CDIUploadProxyResourceName, common.CDIUploadProxyResourceName, namespace)
|
||||
}
|
||||
|
||||
func createUploadProxyClusterRole() *rbacv1.ClusterRole {
|
||||
return utils.ResourceBuilder.CreateClusterRole(uploadProxyResourceName, getUploadProxyClusterPolicyRules())
|
||||
return utils.ResourceBuilder.CreateClusterRole(common.CDIUploadProxyResourceName, getUploadProxyClusterPolicyRules())
|
||||
}
|
||||
|
@ -338,6 +338,52 @@ spec:
|
||||
description: Override the URL used when uploading to a DataVolume
|
||||
type: string
|
||||
type: object
|
||||
customizeComponents:
|
||||
description: CustomizeComponents defines patches for components deployed
|
||||
by the CDI operator.
|
||||
properties:
|
||||
flags:
|
||||
description: Configure the value used for deployment and daemonset
|
||||
resources
|
||||
properties:
|
||||
api:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
controller:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
uploadProxy:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
patches:
|
||||
items:
|
||||
description: CustomizeComponentsPatch defines a patch for some
|
||||
resource.
|
||||
properties:
|
||||
patch:
|
||||
type: string
|
||||
resourceName:
|
||||
minLength: 1
|
||||
type: string
|
||||
resourceType:
|
||||
minLength: 1
|
||||
type: string
|
||||
type:
|
||||
description: PatchType defines the patch type.
|
||||
type: string
|
||||
required:
|
||||
- patch
|
||||
- resourceName
|
||||
- resourceType
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
imagePullPolicy:
|
||||
description: PullPolicy describes a policy for if/when to pull a container
|
||||
image
|
||||
@ -2592,6 +2638,52 @@ spec:
|
||||
description: Override the URL used when uploading to a DataVolume
|
||||
type: string
|
||||
type: object
|
||||
customizeComponents:
|
||||
description: CustomizeComponents defines patches for components deployed
|
||||
by the CDI operator.
|
||||
properties:
|
||||
flags:
|
||||
description: Configure the value used for deployment and daemonset
|
||||
resources
|
||||
properties:
|
||||
api:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
controller:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
uploadProxy:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
patches:
|
||||
items:
|
||||
description: CustomizeComponentsPatch defines a patch for some
|
||||
resource.
|
||||
properties:
|
||||
patch:
|
||||
type: string
|
||||
resourceName:
|
||||
minLength: 1
|
||||
type: string
|
||||
resourceType:
|
||||
minLength: 1
|
||||
type: string
|
||||
type:
|
||||
description: PatchType defines the patch type.
|
||||
type: string
|
||||
required:
|
||||
- patch
|
||||
- resourceName
|
||||
- resourceType
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
imagePullPolicy:
|
||||
description: PullPolicy describes a policy for if/when to pull a container
|
||||
image
|
||||
|
@ -33,14 +33,6 @@ import (
|
||||
utils "kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
apiServerRessouceName = "cdi-apiserver"
|
||||
)
|
||||
|
||||
const (
|
||||
cdiLabel = common.CDIComponentLabel
|
||||
)
|
||||
|
||||
func createAPIServerResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createAPIServerServiceAccount(),
|
||||
@ -52,11 +44,11 @@ func createAPIServerResources(args *FactoryArgs) []client.Object {
|
||||
}
|
||||
|
||||
func createAPIServerServiceAccount() *corev1.ServiceAccount {
|
||||
return utils.ResourceBuilder.CreateServiceAccount(apiServerRessouceName)
|
||||
return utils.ResourceBuilder.CreateServiceAccount(common.CDIApiServerResourceName)
|
||||
}
|
||||
|
||||
func createAPIServerRoleBinding() *rbacv1.RoleBinding {
|
||||
return utils.ResourceBuilder.CreateRoleBinding(apiServerRessouceName, apiServerRessouceName, apiServerRessouceName, "")
|
||||
return utils.ResourceBuilder.CreateRoleBinding(common.CDIApiServerResourceName, common.CDIApiServerResourceName, common.CDIApiServerResourceName, "")
|
||||
}
|
||||
|
||||
func getAPIServerNamespacedRules() []rbacv1.PolicyRule {
|
||||
@ -80,11 +72,11 @@ func getAPIServerNamespacedRules() []rbacv1.PolicyRule {
|
||||
}
|
||||
|
||||
func createAPIServerRole() *rbacv1.Role {
|
||||
return utils.ResourceBuilder.CreateRole(apiServerRessouceName, getAPIServerNamespacedRules())
|
||||
return utils.ResourceBuilder.CreateRole(common.CDIApiServerResourceName, getAPIServerNamespacedRules())
|
||||
}
|
||||
|
||||
func createAPIServerService() *corev1.Service {
|
||||
service := utils.ResourceBuilder.CreateService("cdi-api", cdiLabel, apiServerRessouceName, nil)
|
||||
service := utils.ResourceBuilder.CreateService("cdi-api", common.CDIComponentLabel, common.CDIApiServerResourceName, nil)
|
||||
service.Spec.Ports = []corev1.ServicePort{
|
||||
{
|
||||
Port: 443,
|
||||
@ -100,14 +92,14 @@ func createAPIServerService() *corev1.Service {
|
||||
|
||||
func createAPIServerDeployment(image, verbosity, pullPolicy string, imagePullSecrets []corev1.LocalObjectReference, priorityClassName string, infraNodePlacement *sdkapi.NodePlacement, replicas int32) *appsv1.Deployment {
|
||||
defaultMode := corev1.ConfigMapVolumeSourceDefaultMode
|
||||
deployment := utils.CreateDeployment(apiServerRessouceName, cdiLabel, apiServerRessouceName, apiServerRessouceName, imagePullSecrets, 1, infraNodePlacement)
|
||||
deployment := utils.CreateDeployment(common.CDIApiServerResourceName, common.CDIComponentLabel, common.CDIApiServerResourceName, common.CDIApiServerResourceName, imagePullSecrets, 1, infraNodePlacement)
|
||||
if priorityClassName != "" {
|
||||
deployment.Spec.Template.Spec.PriorityClassName = priorityClassName
|
||||
}
|
||||
if replicas > 1 {
|
||||
deployment.Spec.Replicas = &replicas
|
||||
}
|
||||
container := utils.CreateContainer(apiServerRessouceName, image, verbosity, pullPolicy)
|
||||
container := utils.CreateContainer(common.CDIApiServerResourceName, image, verbosity, pullPolicy)
|
||||
container.Env = []corev1.EnvVar{
|
||||
{
|
||||
Name: common.InstallerPartOfLabel,
|
||||
|
@ -34,10 +34,6 @@ import (
|
||||
"kubevirt.io/containerized-data-importer/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerResourceName = "cdi-deployment"
|
||||
)
|
||||
|
||||
func createControllerResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createControllerServiceAccount(),
|
||||
@ -58,7 +54,7 @@ func createControllerResources(args *FactoryArgs) []client.Object {
|
||||
}
|
||||
|
||||
func createControllerRoleBinding() *rbacv1.RoleBinding {
|
||||
return utils.ResourceBuilder.CreateRoleBinding(controllerResourceName, controllerResourceName, common.ControllerServiceAccountName, "")
|
||||
return utils.ResourceBuilder.CreateRoleBinding(common.CDIControllerResourceName, common.CDIControllerResourceName, common.ControllerServiceAccountName, "")
|
||||
}
|
||||
|
||||
func getControllerNamespacedRules() []rbacv1.PolicyRule {
|
||||
@ -166,7 +162,7 @@ func getControllerNamespacedRules() []rbacv1.PolicyRule {
|
||||
}
|
||||
|
||||
func createControllerRole() *rbacv1.Role {
|
||||
return utils.ResourceBuilder.CreateRole(controllerResourceName, getControllerNamespacedRules())
|
||||
return utils.ResourceBuilder.CreateRole(common.CDIControllerResourceName, getControllerNamespacedRules())
|
||||
}
|
||||
|
||||
func createControllerServiceAccount() *corev1.ServiceAccount {
|
||||
@ -175,14 +171,16 @@ func createControllerServiceAccount() *corev1.ServiceAccount {
|
||||
|
||||
func createControllerDeployment(controllerImage, importerImage, clonerImage, uploadServerImage, verbosity, pullPolicy string, imagePullSecrets []corev1.LocalObjectReference, priorityClassName string, infraNodePlacement *sdkapi.NodePlacement, replicas int32) *appsv1.Deployment {
|
||||
defaultMode := corev1.ConfigMapVolumeSourceDefaultMode
|
||||
deployment := utils.CreateDeployment(controllerResourceName, "app", "containerized-data-importer", common.ControllerServiceAccountName, imagePullSecrets, int32(1), infraNodePlacement)
|
||||
// The match selector is immutable. that's why we should always use the same labels.
|
||||
deployment := utils.CreateDeployment(common.CDIControllerResourceName, common.CDILabelKey, common.CDILabelValue, common.ControllerServiceAccountName, imagePullSecrets, int32(1), infraNodePlacement)
|
||||
deployment.ObjectMeta.Labels[common.CDIComponentLabel] = common.CDIControllerResourceName
|
||||
if priorityClassName != "" {
|
||||
deployment.Spec.Template.Spec.PriorityClassName = priorityClassName
|
||||
}
|
||||
if replicas > 1 {
|
||||
deployment.Spec.Replicas = &replicas
|
||||
}
|
||||
container := utils.CreateContainer("cdi-controller", controllerImage, verbosity, pullPolicy)
|
||||
container := utils.CreateContainer(common.CDIControllerResourceName, controllerImage, verbosity, pullPolicy)
|
||||
container.Ports = []corev1.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
@ -192,7 +190,6 @@ func createControllerDeployment(controllerImage, importerImage, clonerImage, upl
|
||||
}
|
||||
labels := util.MergeLabels(deployment.Spec.Template.GetLabels(), map[string]string{common.PrometheusLabelKey: common.PrometheusLabelValue})
|
||||
//Add label for pod affinity
|
||||
labels = util.AppendLabels(labels, map[string]string{cdiLabel: controllerResourceName})
|
||||
deployment.SetLabels(labels)
|
||||
deployment.Spec.Template.SetLabels(labels)
|
||||
container.Env = []corev1.EnvVar{
|
||||
@ -210,7 +207,7 @@ func createControllerDeployment(controllerImage, importerImage, clonerImage, upl
|
||||
},
|
||||
{
|
||||
Name: "UPLOADPROXY_SERVICE",
|
||||
Value: uploadProxyResourceName,
|
||||
Value: common.CDIUploadProxyResourceName,
|
||||
},
|
||||
{
|
||||
Name: "PULL_POLICY",
|
||||
|
@ -18,15 +18,12 @@ package namespaced
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
utils "kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
cronJobResourceName = "cdi-cronjob"
|
||||
)
|
||||
|
||||
func createCronJobResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createCronJobServiceAccount(),
|
||||
@ -34,5 +31,5 @@ func createCronJobResources(args *FactoryArgs) []client.Object {
|
||||
}
|
||||
|
||||
func createCronJobServiceAccount() *corev1.ServiceAccount {
|
||||
return utils.ResourceBuilder.CreateServiceAccount(cronJobResourceName)
|
||||
return utils.ResourceBuilder.CreateServiceAccount(common.CDICronJobResourceName)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api"
|
||||
@ -29,10 +30,6 @@ import (
|
||||
utils "kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
uploadProxyResourceName = "cdi-uploadproxy"
|
||||
)
|
||||
|
||||
func createUploadProxyResources(args *FactoryArgs) []client.Object {
|
||||
return []client.Object{
|
||||
createUploadProxyServiceAccount(),
|
||||
@ -44,7 +41,7 @@ func createUploadProxyResources(args *FactoryArgs) []client.Object {
|
||||
}
|
||||
|
||||
func createUploadProxyService() *corev1.Service {
|
||||
service := utils.ResourceBuilder.CreateService(uploadProxyResourceName, cdiLabel, uploadProxyResourceName, nil)
|
||||
service := utils.ResourceBuilder.CreateService(common.CDIUploadProxyResourceName, common.CDIComponentLabel, common.CDIUploadProxyResourceName, nil)
|
||||
service.Spec.Ports = []corev1.ServicePort{
|
||||
{
|
||||
Port: 443,
|
||||
@ -60,11 +57,11 @@ func createUploadProxyService() *corev1.Service {
|
||||
}
|
||||
|
||||
func createUploadProxyServiceAccount() *corev1.ServiceAccount {
|
||||
return utils.ResourceBuilder.CreateServiceAccount(uploadProxyResourceName)
|
||||
return utils.ResourceBuilder.CreateServiceAccount(common.CDIUploadProxyResourceName)
|
||||
}
|
||||
|
||||
func createUploadProxyRoleBinding() *rbacv1.RoleBinding {
|
||||
return utils.ResourceBuilder.CreateRoleBinding(uploadProxyResourceName, uploadProxyResourceName, uploadProxyResourceName, "")
|
||||
return utils.ResourceBuilder.CreateRoleBinding(common.CDIUploadProxyResourceName, common.CDIUploadProxyResourceName, common.CDIUploadProxyResourceName, "")
|
||||
}
|
||||
|
||||
func getUploadProxyNamespacedRules() []rbacv1.PolicyRule {
|
||||
@ -84,19 +81,19 @@ func getUploadProxyNamespacedRules() []rbacv1.PolicyRule {
|
||||
}
|
||||
|
||||
func createUploadProxyRole() *rbacv1.Role {
|
||||
return utils.ResourceBuilder.CreateRole(uploadProxyResourceName, getUploadProxyNamespacedRules())
|
||||
return utils.ResourceBuilder.CreateRole(common.CDIUploadProxyResourceName, getUploadProxyNamespacedRules())
|
||||
}
|
||||
|
||||
func createUploadProxyDeployment(image, verbosity, pullPolicy string, imagePullSecrets []corev1.LocalObjectReference, priorityClassName string, infraNodePlacement *sdkapi.NodePlacement, replicas int32) *appsv1.Deployment {
|
||||
defaultMode := corev1.ConfigMapVolumeSourceDefaultMode
|
||||
deployment := utils.CreateDeployment(uploadProxyResourceName, cdiLabel, uploadProxyResourceName, uploadProxyResourceName, imagePullSecrets, int32(1), infraNodePlacement)
|
||||
deployment := utils.CreateDeployment(common.CDIUploadProxyResourceName, common.CDIComponentLabel, common.CDIUploadProxyResourceName, common.CDIUploadProxyResourceName, imagePullSecrets, int32(1), infraNodePlacement)
|
||||
if priorityClassName != "" {
|
||||
deployment.Spec.Template.Spec.PriorityClassName = priorityClassName
|
||||
}
|
||||
if replicas > 1 {
|
||||
deployment.Spec.Replicas = &replicas
|
||||
}
|
||||
container := utils.CreateContainer(uploadProxyResourceName, image, verbosity, pullPolicy)
|
||||
container := utils.CreateContainer(common.CDIUploadProxyResourceName, image, verbosity, pullPolicy)
|
||||
container.Env = []corev1.EnvVar{
|
||||
{
|
||||
Name: "APISERVER_PUBLIC_KEY",
|
||||
|
@ -39,6 +39,7 @@ var commonLabels = map[string]string{
|
||||
CDILabel: "",
|
||||
common.AppKubernetesManagedByLabel: "cdi-operator",
|
||||
common.AppKubernetesComponentLabel: "storage",
|
||||
common.CDILabelKey: common.CDILabelValue,
|
||||
}
|
||||
|
||||
var operatorLabels = map[string]string{
|
||||
|
@ -827,7 +827,8 @@ type CDISpec struct {
|
||||
// Selectors and tolerations that should apply to cdi infrastructure components
|
||||
Infra ComponentConfig `json:"infra,omitempty"`
|
||||
// Restrict on which nodes CDI workload pods will be scheduled
|
||||
Workloads sdkapi.NodePlacement `json:"workload,omitempty"`
|
||||
Workloads sdkapi.NodePlacement `json:"workload,omitempty"`
|
||||
CustomizeComponents CustomizeComponents `json:"customizeComponents,omitempty"`
|
||||
// Clone strategy override: should we use a host-assisted copy even if snapshots are available?
|
||||
// +kubebuilder:validation:Enum="copy";"snapshot";"csi-clone"
|
||||
CloneStrategyOverride *CDICloneStrategy `json:"cloneStrategyOverride,omitempty"`
|
||||
@ -868,6 +869,47 @@ const (
|
||||
CloneStrategyCsiClone CDICloneStrategy = "csi-clone"
|
||||
)
|
||||
|
||||
// CustomizeComponents defines patches for components deployed by the CDI operator.
|
||||
type CustomizeComponents struct {
|
||||
// +listType=atomic
|
||||
Patches []CustomizeComponentsPatch `json:"patches,omitempty"`
|
||||
|
||||
// Configure the value used for deployment and daemonset resources
|
||||
Flags *Flags `json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
// Flags will create a patch that will replace all flags for the container's
|
||||
// command field. The only flags that will be used are those define. There are no
|
||||
// guarantees around forward/backward compatibility. If set incorrectly this will
|
||||
// cause the resource when rolled out to error until flags are updated.
|
||||
type Flags struct {
|
||||
API map[string]string `json:"api,omitempty"`
|
||||
Controller map[string]string `json:"controller,omitempty"`
|
||||
UploadProxy map[string]string `json:"uploadProxy,omitempty"`
|
||||
}
|
||||
|
||||
// CustomizeComponentsPatch defines a patch for some resource.
|
||||
type CustomizeComponentsPatch struct {
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
ResourceName string `json:"resourceName"`
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
ResourceType string `json:"resourceType"`
|
||||
Patch string `json:"patch"`
|
||||
Type PatchType `json:"type"`
|
||||
}
|
||||
|
||||
// PatchType defines the patch type.
|
||||
type PatchType string
|
||||
|
||||
const (
|
||||
// JSONPatchType is a constant that represents the type of JSON patch.
|
||||
JSONPatchType PatchType = "json"
|
||||
// MergePatchType is a constant that represents the type of JSON Merge patch.
|
||||
MergePatchType PatchType = "merge"
|
||||
// StrategicMergePatchType is a constant that represents the type of Strategic Merge patch.
|
||||
StrategicMergePatchType PatchType = "strategic"
|
||||
)
|
||||
|
||||
// DataImportCronSourceFormat defines the format of the DataImportCron-created disk image sources
|
||||
type DataImportCronSourceFormat string
|
||||
|
||||
|
@ -446,6 +446,28 @@ func (ComponentConfig) SwaggerDoc() map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func (CustomizeComponents) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "CustomizeComponents defines patches for components deployed by the CDI operator.",
|
||||
"patches": "+listType=atomic",
|
||||
"flags": "Configure the value used for deployment and daemonset resources",
|
||||
}
|
||||
}
|
||||
|
||||
func (Flags) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "Flags will create a patch that will replace all flags for the container's\ncommand field. The only flags that will be used are those define. There are no\nguarantees around forward/backward compatibility. If set incorrectly this will\ncause the resource when rolled out to error until flags are updated.",
|
||||
}
|
||||
}
|
||||
|
||||
func (CustomizeComponentsPatch) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "CustomizeComponentsPatch defines a patch for some resource.",
|
||||
"resourceName": "+kubebuilder:validation:MinLength=1",
|
||||
"resourceType": "+kubebuilder:validation:MinLength=1",
|
||||
}
|
||||
}
|
||||
|
||||
func (CDIStatus) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "CDIStatus defines the status of the installation",
|
||||
|
@ -303,6 +303,7 @@ func (in *CDISpec) DeepCopyInto(out *CDISpec) {
|
||||
}
|
||||
in.Infra.DeepCopyInto(&out.Infra)
|
||||
in.Workloads.DeepCopyInto(&out.Workloads)
|
||||
in.CustomizeComponents.DeepCopyInto(&out.CustomizeComponents)
|
||||
if in.CloneStrategyOverride != nil {
|
||||
in, out := &in.CloneStrategyOverride, &out.CloneStrategyOverride
|
||||
*out = new(CDICloneStrategy)
|
||||
@ -455,6 +456,48 @@ func (in *ConditionState) DeepCopy() *ConditionState {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) {
|
||||
*out = *in
|
||||
if in.Patches != nil {
|
||||
in, out := &in.Patches, &out.Patches
|
||||
*out = make([]CustomizeComponentsPatch, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Flags != nil {
|
||||
in, out := &in.Flags, &out.Flags
|
||||
*out = new(Flags)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponents.
|
||||
func (in *CustomizeComponents) DeepCopy() *CustomizeComponents {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomizeComponents)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomizeComponentsPatch) DeepCopyInto(out *CustomizeComponentsPatch) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponentsPatch.
|
||||
func (in *CustomizeComponentsPatch) DeepCopy() *CustomizeComponentsPatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomizeComponentsPatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataImportCron) DeepCopyInto(out *DataImportCron) {
|
||||
*out = *in
|
||||
@ -1225,6 +1268,43 @@ func (in *FilesystemOverhead) DeepCopy() *FilesystemOverhead {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Flags) DeepCopyInto(out *Flags) {
|
||||
*out = *in
|
||||
if in.API != nil {
|
||||
in, out := &in.API, &out.API
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Controller != nil {
|
||||
in, out := &in.Controller, &out.Controller
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.UploadProxy != nil {
|
||||
in, out := &in.UploadProxy, &out.UploadProxy
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flags.
|
||||
func (in *Flags) DeepCopy() *Flags {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Flags)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImportProxy) DeepCopyInto(out *ImportProxy) {
|
||||
*out = *in
|
||||
|
@ -4,39 +4,38 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
routev1 "github.com/openshift/api/route/v1"
|
||||
routeclient "github.com/openshift/client-go/route/clientset/versioned"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
schedulev1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"kubevirt.io/containerized-data-importer/pkg/controller"
|
||||
resourcesutils "kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
"kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk"
|
||||
"reflect"
|
||||
"regexp"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
secclient "github.com/openshift/client-go/security/clientset/versioned"
|
||||
conditions "github.com/openshift/custom-resource-status/conditions/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
schedulev1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"kubevirt.io/containerized-data-importer/pkg/controller"
|
||||
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
|
||||
resourcesutils "kubevirt.io/containerized-data-importer/pkg/operator/resources/utils"
|
||||
"kubevirt.io/containerized-data-importer/tests/framework"
|
||||
"kubevirt.io/containerized-data-importer/tests/utils"
|
||||
sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api"
|
||||
"kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -879,6 +878,93 @@ var _ = Describe("ALL Operator tests", func() {
|
||||
}, 5*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
|
||||
})
|
||||
It("Should update infra deployments when modify customizeComponents in CDI Cr", func() {
|
||||
By("Modify the customizeComponents separately")
|
||||
cdi := getCDI(f)
|
||||
testJsonPatch := "test-json-patch"
|
||||
testStrategicPatch := "test-strategic-patch"
|
||||
testMergePatch := "test-merge-patch"
|
||||
cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{
|
||||
Patches: []cdiv1.CustomizeComponentsPatch{
|
||||
{
|
||||
ResourceName: "cdi-apiserver",
|
||||
ResourceType: "Deployment",
|
||||
Patch: fmt.Sprintf(`[{"op":"add","path":"/metadata/annotations/%s","value":"%s"}]`, testJsonPatch, testJsonPatch),
|
||||
Type: cdiv1.JSONPatchType,
|
||||
},
|
||||
{
|
||||
ResourceName: "cdi-deployment",
|
||||
ResourceType: "Deployment",
|
||||
Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testStrategicPatch, testStrategicPatch),
|
||||
Type: cdiv1.StrategicMergePatchType,
|
||||
},
|
||||
{
|
||||
ResourceName: "cdi-uploadproxy",
|
||||
ResourceType: "Deployment",
|
||||
Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testMergePatch, testMergePatch),
|
||||
Type: cdiv1.MergePatchType,
|
||||
},
|
||||
},
|
||||
Flags: &cdiv1.Flags{
|
||||
API: map[string]string{"v": "5", "skip_headers": ""},
|
||||
Controller: map[string]string{"v": "6", "skip_headers": ""},
|
||||
UploadProxy: map[string]string{"v": "7", "skip_headers": ""},
|
||||
},
|
||||
}
|
||||
_, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} {
|
||||
depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
By("Patches applied")
|
||||
return true
|
||||
}, 5*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
|
||||
verifyPatches := func(deployment, annoKey, annoValue string, desiredArgs ...string) {
|
||||
By(fmt.Sprintf("Verify patches of %s", deployment))
|
||||
Eventually(func() bool {
|
||||
depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deployment, metav1.GetOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
args := strings.Join(depl.Spec.Template.Spec.Containers[0].Args, " ")
|
||||
for _, a := range desiredArgs {
|
||||
if !strings.Contains(args, a) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return depl.GetAnnotations()[annoKey] == annoValue
|
||||
}, 5*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
}
|
||||
verifyPatches("cdi-apiserver", testJsonPatch, testJsonPatch, "-v 5", "-skip_headers")
|
||||
verifyPatches("cdi-deployment", testStrategicPatch, testStrategicPatch, "-v 6", "-skip_headers")
|
||||
verifyPatches("cdi-uploadproxy", testMergePatch, testMergePatch, "-v 7", "-skip_headers")
|
||||
|
||||
By("Reset CustomizeComponents for CDI CR")
|
||||
cdi = getCDI(f)
|
||||
|
||||
cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{}
|
||||
_, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() bool {
|
||||
for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} {
|
||||
depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName)
|
||||
if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] != "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, 5*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Operator cert config tests", func() {
|
||||
@ -1074,9 +1160,9 @@ var _ = Describe("ALL Operator tests", func() {
|
||||
// Deployment
|
||||
verifyPodPriorityClass(cdiDeploymentPodPrefix, string(prioClass), common.CDILabelSelector)
|
||||
// API server
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(prioClass), "")
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(prioClass), common.CDILabelSelector)
|
||||
// Upload server
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(prioClass), "")
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(prioClass), common.CDILabelSelector)
|
||||
By("Verifying there is just a single cdi controller pod")
|
||||
Eventually(func() error {
|
||||
_, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector)
|
||||
@ -1102,9 +1188,9 @@ var _ = Describe("ALL Operator tests", func() {
|
||||
By("Verifying the CDI deployment is updated")
|
||||
verifyPodPriorityClass(cdiDeploymentPodPrefix, string(systemClusterCritical), common.CDILabelSelector)
|
||||
By("Verifying the CDI api server is updated")
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(systemClusterCritical), "")
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(systemClusterCritical), common.CDILabelSelector)
|
||||
By("Verifying the CDI upload proxy server is updated")
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), "")
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), common.CDILabelSelector)
|
||||
})
|
||||
|
||||
It("should use openshift priority class if not set and available", func() {
|
||||
@ -1118,9 +1204,9 @@ var _ = Describe("ALL Operator tests", func() {
|
||||
// Deployment
|
||||
verifyPodPriorityClass(cdiDeploymentPodPrefix, string(osUserCrit.Name), common.CDILabelSelector)
|
||||
// API server
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(osUserCrit.Name), "")
|
||||
verifyPodPriorityClass(cdiApiServerPodPrefix, string(osUserCrit.Name), common.CDILabelSelector)
|
||||
// Upload server
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(osUserCrit.Name), "")
|
||||
verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(osUserCrit.Name), common.CDILabelSelector)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
5
vendor/github.com/evanphx/json-patch/v5/BUILD.bazel
generated
vendored
5
vendor/github.com/evanphx/json-patch/v5/BUILD.bazel
generated
vendored
@ -10,5 +10,8 @@ go_library(
|
||||
importmap = "kubevirt.io/containerized-data-importer/vendor/github.com/evanphx/json-patch/v5",
|
||||
importpath = "github.com/evanphx/json-patch/v5",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/pkg/errors:go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/evanphx/json-patch/v5/internal/json:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
18
vendor/github.com/evanphx/json-patch/v5/internal/json/BUILD.bazel
generated
vendored
Normal file
18
vendor/github.com/evanphx/json-patch/v5/internal/json/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"decode.go",
|
||||
"encode.go",
|
||||
"fold.go",
|
||||
"indent.go",
|
||||
"scanner.go",
|
||||
"stream.go",
|
||||
"tables.go",
|
||||
"tags.go",
|
||||
],
|
||||
importmap = "kubevirt.io/containerized-data-importer/vendor/github.com/evanphx/json-patch/v5/internal/json",
|
||||
importpath = "github.com/evanphx/json-patch/v5/internal/json",
|
||||
visibility = ["//vendor/github.com/evanphx/json-patch/v5:__subpackages__"],
|
||||
)
|
1385
vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go
generated
vendored
Normal file
1385
vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1473
vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
generated
vendored
Normal file
1473
vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
141
vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go
generated
vendored
Normal file
141
vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// - S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// - k maps to K and to U+212A 'K' Kelvin sign
|
||||
//
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
return len(t) == 0
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
42
vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go
generated
vendored
Normal file
42
vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gofuzz
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func Fuzz(data []byte) (score int) {
|
||||
for _, ctor := range []func() any{
|
||||
func() any { return new(any) },
|
||||
func() any { return new(map[string]any) },
|
||||
func() any { return new([]any) },
|
||||
} {
|
||||
v := ctor()
|
||||
err := Unmarshal(data, v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
score = 1
|
||||
|
||||
m, err := Marshal(v)
|
||||
if err != nil {
|
||||
fmt.Printf("v=%#v\n", v)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
u := ctor()
|
||||
err = Unmarshal(m, u)
|
||||
if err != nil {
|
||||
fmt.Printf("v=%#v\n", v)
|
||||
fmt.Printf("m=%s\n", m)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
143
vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go
generated
vendored
Normal file
143
vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(scan, c)
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||
// Although leading space characters (space, tab, carriage return, newline)
|
||||
// at the beginning of src are dropped, trailing space characters
|
||||
// at the end of src are preserved and copied to dst.
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(scan, c)
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
610
vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go
generated
vendored
Normal file
610
vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go
generated
vendored
Normal file
@ -0,0 +1,610 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Valid reports whether data is a valid JSON encoding.
|
||||
func Valid(data []byte) bool {
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
return checkValid(data, scan) == nil
|
||||
}
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
// checkValid returns nil or a SyntaxError.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, byte) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode (and deliberately
|
||||
// not set to zero by scan.reset)
|
||||
bytes int64
|
||||
}
|
||||
|
||||
var scannerPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &scanner{}
|
||||
},
|
||||
}
|
||||
|
||||
func newScanner() *scanner {
|
||||
scan := scannerPool.Get().(*scanner)
|
||||
// scan.reset by design doesn't set bytes to zero
|
||||
scan.bytes = 0
|
||||
scan.reset()
|
||||
return scan
|
||||
}
|
||||
|
||||
func freeScanner(scan *scanner) {
|
||||
// Avoid hanging on to too much memory in extreme cases.
|
||||
if len(scan.parseState) > 1024 {
|
||||
scan.parseState = nil
|
||||
}
|
||||
scannerPool.Put(scan)
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// This limits the max nesting depth to prevent stack overflow.
|
||||
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
|
||||
const maxNestingDepth = 10000
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
|
||||
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
|
||||
s.parseState = append(s.parseState, newParseState)
|
||||
if len(s.parseState) <= maxNestingDepth {
|
||||
return successState
|
||||
}
|
||||
return s.error(c, "exceeded max depth")
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
return s.pushParseState(c, parseObjectKey, scanBeginObject)
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
return s.pushParseState(c, parseArrayValue, scanBeginArray)
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) int {
|
||||
if !isSpace(c) {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) int {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal.
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
495
vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
generated
vendored
Normal file
495
vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
generated
vendored
Normal file
@ -0,0 +1,495 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON values from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scanned int64 // amount of data already scanned
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
// help the compiler see that scanp is never negative, so it can remove
|
||||
// some bounds checks below.
|
||||
for scanp >= 0 {
|
||||
|
||||
// Look in the buffer for a new value.
|
||||
for ; scanp < len(dec.buf); scanp++ {
|
||||
c := dec.buf[scanp]
|
||||
dec.scan.bytes++
|
||||
switch dec.scan.step(&dec.scan, c) {
|
||||
case scanEnd:
|
||||
// scanEnd is delayed one byte so we decrement
|
||||
// the scanner bytes count by 1 to ensure that
|
||||
// this value is correct in the next call of Decode.
|
||||
dec.scan.bytes--
|
||||
break Input
|
||||
case scanEndObject, scanEndArray:
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if stateEndValue(&dec.scan, ' ') == scanEnd {
|
||||
scanp++
|
||||
break Input
|
||||
}
|
||||
case scanError:
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
dec.scanned += int64(dec.scanp)
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON values to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
escapeHTML bool
|
||||
|
||||
indentBuf *bytes.Buffer
|
||||
indentPrefix string
|
||||
indentValue string
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w, escapeHTML: true}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
|
||||
e := newEncodeState()
|
||||
defer encodeStatePool.Put(e)
|
||||
|
||||
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
b := e.Bytes()
|
||||
if enc.indentPrefix != "" || enc.indentValue != "" {
|
||||
if enc.indentBuf == nil {
|
||||
enc.indentBuf = new(bytes.Buffer)
|
||||
}
|
||||
enc.indentBuf.Reset()
|
||||
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b = enc.indentBuf.Bytes()
|
||||
}
|
||||
if _, err = enc.w.Write(b); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetIndent instructs the encoder to format each subsequent encoded
|
||||
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
|
||||
// Calling SetIndent("", "") disables indentation.
|
||||
func (enc *Encoder) SetIndent(prefix, indent string) {
|
||||
enc.indentPrefix = prefix
|
||||
enc.indentValue = indent
|
||||
}
|
||||
|
||||
// SetEscapeHTML specifies whether problematic HTML characters
|
||||
// should be escaped inside JSON quoted strings.
|
||||
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
|
||||
// to avoid certain safety problems that can arise when embedding JSON in HTML.
|
||||
//
|
||||
// In non-HTML settings where the escaping interferes with the readability
|
||||
// of the output, SetEscapeHTML(false) disables this behavior.
|
||||
func (enc *Encoder) SetEscapeHTML(on bool) {
|
||||
enc.escapeHTML = on
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON value.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage = json.RawMessage
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
type Token any
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x any
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
// InputOffset returns the input stream byte offset of the current decoder position.
|
||||
// The offset gives the location of the end of the most recently returned token
|
||||
// and the beginning of the next token.
|
||||
func (dec *Decoder) InputOffset() int64 {
|
||||
return dec.scanned + int64(dec.scanp)
|
||||
}
|
218
vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go
generated
vendored
Normal file
218
vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||
// array position can be safely represented inside a JSON string, embedded
|
||||
// inside of HTML <script> tags, without any additional escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||
// tags ("<" and ">"), and the ampersand ("&").
|
||||
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': false,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': false,
|
||||
'=': true,
|
||||
'>': false,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
38
vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go
generated
vendored
Normal file
38
vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
tag, opt, _ := strings.Cut(tag, ",")
|
||||
return tag, tagOptions(opt)
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var name string
|
||||
name, s, _ = strings.Cut(s, ",")
|
||||
if name == optionName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
58
vendor/github.com/evanphx/json-patch/v5/merge.go
generated
vendored
58
vendor/github.com/evanphx/json-patch/v5/merge.go
generated
vendored
@ -2,9 +2,12 @@ package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/evanphx/json-patch/v5/internal/json"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
@ -88,14 +91,14 @@ func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
for _, v := range ary.nodes {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
}
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
ary.nodes = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
@ -117,20 +120,28 @@ func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
if !json.Valid(docData) {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if !json.Valid(patchData) {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
docErr := doc.UnmarshalJSON(docData)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
patchErr := patch.UnmarshalJSON(patchData)
|
||||
|
||||
if isSyntaxError(docErr) {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if isSyntaxError(patchErr) {
|
||||
return nil, errBadJSONPatch
|
||||
return patchData, nil
|
||||
}
|
||||
|
||||
if docErr == nil && doc.obj == nil {
|
||||
@ -138,7 +149,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
}
|
||||
|
||||
if patchErr == nil && patch.obj == nil {
|
||||
return nil, errBadJSONPatch
|
||||
return patchData, nil
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
@ -151,15 +162,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
patchErr = unmarshal(patchData, &patchAry.nodes)
|
||||
|
||||
if patchErr != nil {
|
||||
// Not an array either, a literal is the result directly.
|
||||
if json.Valid(patchData) {
|
||||
return patchData, nil
|
||||
}
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
out, patchErr := json.Marshal(patchAry.nodes)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
@ -175,6 +190,12 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
}
|
||||
|
||||
func isSyntaxError(err error) bool {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return true
|
||||
}
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*json.SyntaxError); ok {
|
||||
return true
|
||||
}
|
||||
@ -227,12 +248,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
err := unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
err = unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
@ -245,6 +266,10 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
func unmarshal(data []byte, into interface{}) error {
|
||||
return json.UnmarshalValid(data, into)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
@ -253,12 +278,12 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
err := unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
err = unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
@ -314,6 +339,11 @@ func matchesValue(av, bv interface{}) bool {
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case json.Number:
|
||||
bt := bv.(json.Number)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
@ -377,7 +407,7 @@ func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
case string, float64, bool, json.Number:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
|
378
vendor/github.com/evanphx/json-patch/v5/patch.go
generated
vendored
378
vendor/github.com/evanphx/json-patch/v5/patch.go
generated
vendored
@ -2,11 +2,12 @@ package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/evanphx/json-patch/v5/internal/json"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -45,7 +46,7 @@ var (
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc *partialDoc
|
||||
ary partialArray
|
||||
ary *partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
@ -56,11 +57,15 @@ type Operation map[string]*json.RawMessage
|
||||
type Patch []Operation
|
||||
|
||||
type partialDoc struct {
|
||||
self *lazyNode
|
||||
keys []string
|
||||
obj map[string]*lazyNode
|
||||
}
|
||||
|
||||
type partialArray []*lazyNode
|
||||
type partialArray struct {
|
||||
self *lazyNode
|
||||
nodes []*lazyNode
|
||||
}
|
||||
|
||||
type container interface {
|
||||
get(key string, options *ApplyOptions) (*lazyNode, error)
|
||||
@ -107,14 +112,14 @@ func newRawMessage(buf []byte) *json.RawMessage {
|
||||
return &ra
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
func (n *lazyNode) RedirectMarshalJSON() (any, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
return n.raw, nil
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
return n.doc, nil
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
return n.ary.nodes, nil
|
||||
default:
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
@ -128,39 +133,38 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *partialDoc) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if _, err := buf.WriteString("{"); err != nil {
|
||||
return nil, err
|
||||
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
|
||||
if err := buf.WriteByte('{'); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, k := range n.keys {
|
||||
if i > 0 {
|
||||
if _, err := buf.WriteString(", "); err != nil {
|
||||
return nil, err
|
||||
if err := buf.WriteByte(','); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
key, err := json.Marshal(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if _, err := buf.Write(key); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if _, err := buf.WriteString(": "); err != nil {
|
||||
return nil, err
|
||||
if err := buf.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := json.Marshal(n.obj[k])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if _, err := buf.Write(value); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := buf.WriteString("}"); err != nil {
|
||||
return nil, err
|
||||
if err := buf.WriteByte('}'); err != nil {
|
||||
return err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type syntaxError struct {
|
||||
@ -172,70 +176,29 @@ func (err *syntaxError) Error() string {
|
||||
}
|
||||
|
||||
func (n *partialDoc) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(data, &n.obj); err != nil {
|
||||
keys, err := json.UnmarshalValidWithKeys(data, &n.obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := bytes.NewBuffer(data)
|
||||
d := json.NewDecoder(buffer)
|
||||
if t, err := d.Token(); err != nil {
|
||||
return err
|
||||
} else if t != startObject {
|
||||
return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)}
|
||||
}
|
||||
for d.More() {
|
||||
k, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key, ok := k.(string)
|
||||
if !ok {
|
||||
return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)}
|
||||
}
|
||||
if err := skipValue(d); err != nil {
|
||||
return err
|
||||
}
|
||||
n.keys = append(n.keys, key)
|
||||
}
|
||||
|
||||
n.keys = keys
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func skipValue(d *json.Decoder) error {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t != startObject && t != startArray {
|
||||
return nil
|
||||
}
|
||||
for d.More() {
|
||||
if t == startObject {
|
||||
// consume key token
|
||||
if _, err := d.Token(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := skipValue(d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
end, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t == startObject && end != endObject {
|
||||
return &syntaxError{msg: "expected close object token"}
|
||||
}
|
||||
if t == startArray && end != endArray {
|
||||
return &syntaxError{msg: "expected close object token"}
|
||||
}
|
||||
return nil
|
||||
func (n *partialArray) UnmarshalJSON(data []byte) error {
|
||||
return json.UnmarshalValid(data, &n.nodes)
|
||||
}
|
||||
|
||||
func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
|
||||
return n.nodes, nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
if src == nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
a, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@ -243,6 +206,16 @@ func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
return newLazyNode(newRawMessage(a)), sz, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) nextByte() byte {
|
||||
s := []byte(*n.raw)
|
||||
|
||||
for unicode.IsSpace(rune(s[0])) {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
return s[0]
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return n.doc, nil
|
||||
@ -252,7 +225,15 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
if n.nextByte() != '{' {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if n.doc == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -264,21 +245,21 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
return n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
err := unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
return n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
@ -302,12 +283,16 @@ func (n *lazyNode) tryDoc() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
err := unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if n.doc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
@ -317,7 +302,7 @@ func (n *lazyNode) tryAry() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
err := unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
@ -327,6 +312,18 @@ func (n *lazyNode) tryAry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) isNull() bool {
|
||||
if n == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), rawJSONNull)
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
@ -334,7 +331,27 @@ func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
nc := n.compact()
|
||||
oc := o.compact()
|
||||
|
||||
if nc[0] == '"' && oc[0] == '"' {
|
||||
// ok, 2 strings
|
||||
|
||||
var ns, os string
|
||||
|
||||
err := json.UnmarshalValid(nc, &ns)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
err = json.UnmarshalValid(oc, &os)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return ns == os
|
||||
}
|
||||
|
||||
return bytes.Equal(nc, oc)
|
||||
}
|
||||
}
|
||||
|
||||
@ -380,12 +397,12 @@ func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
if len(n.ary.nodes) != len(o.ary.nodes) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
for idx, val := range n.ary.nodes {
|
||||
if !val.equal(o.ary.nodes[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -398,7 +415,7 @@ func (o Operation) Kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
err := unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
@ -415,7 +432,7 @@ func (o Operation) Path() (string, error) {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
err := unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
@ -432,7 +449,7 @@ func (o Operation) From() (string, error) {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
err := unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
@ -446,6 +463,10 @@ func (o Operation) From() (string, error) {
|
||||
|
||||
func (o Operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
// A `null` gets decoded as a nil RawMessage, so let's fix it up here.
|
||||
if obj == nil {
|
||||
return newLazyNode(newRawMessage(rawJSONNull))
|
||||
}
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
@ -454,10 +475,14 @@ func (o Operation) value() *lazyNode {
|
||||
|
||||
// ValueInterface decodes the operation value into an interface.
|
||||
func (o Operation) ValueInterface() (interface{}, error) {
|
||||
if obj, ok := o["value"]; ok && obj != nil {
|
||||
if obj, ok := o["value"]; ok {
|
||||
if obj == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var v interface{}
|
||||
|
||||
err := json.Unmarshal(*obj, &v)
|
||||
err := unmarshal(*obj, &v)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -493,6 +518,9 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
if path == "" {
|
||||
return doc, ""
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
@ -548,6 +576,9 @@ func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
|
||||
if key == "" {
|
||||
return d.self, nil
|
||||
}
|
||||
v, ok := d.obj[key]
|
||||
if !ok {
|
||||
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
|
||||
@ -587,19 +618,19 @@ func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) err
|
||||
if !options.SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
if idx < -len(d.nodes) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
idx += len(d.nodes)
|
||||
}
|
||||
|
||||
(*d)[idx] = val
|
||||
d.nodes[idx] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
d.nodes = append(d.nodes, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -608,11 +639,11 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
|
||||
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
sz := len(d.nodes) + 1
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
cur := d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
@ -628,15 +659,19 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
|
||||
idx += len(ary)
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[0:idx], cur.nodes[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
copy(ary[idx+1:], cur.nodes[idx:])
|
||||
|
||||
*d = ary
|
||||
d.nodes = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) {
|
||||
if key == "" {
|
||||
return d.self, nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
@ -647,17 +682,17 @@ func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error)
|
||||
if !options.SupportNegativeIndices {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
if idx < -len(d.nodes) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
idx += len(d.nodes)
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
if idx >= len(d.nodes) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
return d.nodes[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string, options *ApplyOptions) error {
|
||||
@ -666,9 +701,9 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
cur := d
|
||||
|
||||
if idx >= len(cur) {
|
||||
if idx >= len(cur.nodes) {
|
||||
if options.AllowMissingPathOnRemove {
|
||||
return nil
|
||||
}
|
||||
@ -679,21 +714,21 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
|
||||
if !options.SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(cur) {
|
||||
if idx < -len(cur.nodes) {
|
||||
if options.AllowMissingPathOnRemove {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(cur)
|
||||
idx += len(cur.nodes)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
ary := make([]*lazyNode, len(cur.nodes)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
copy(ary[0:idx], cur.nodes[0:idx])
|
||||
copy(ary[idx:], cur.nodes[idx+1:])
|
||||
|
||||
*d = ary
|
||||
d.nodes = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -703,6 +738,32 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
|
||||
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
|
||||
}
|
||||
|
||||
// special case, adding to empty means replacing the container with the value given
|
||||
if path == "" {
|
||||
val := op.value()
|
||||
|
||||
var pd container
|
||||
if (*val.raw)[0] == '[' {
|
||||
pd = &partialArray{
|
||||
self: val,
|
||||
}
|
||||
} else {
|
||||
pd = &partialDoc{
|
||||
self: val,
|
||||
}
|
||||
}
|
||||
|
||||
err := json.UnmarshalValid(*val.raw, pd)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*doc = pd
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.EnsurePathExistsOnAdd {
|
||||
err = ensurePathExists(doc, path, options)
|
||||
|
||||
@ -758,9 +819,9 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
|
||||
if arrIndex, err = strconv.Atoi(part); err == nil {
|
||||
pa, ok := doc.(*partialArray)
|
||||
|
||||
if ok && arrIndex >= len(*pa)+1 {
|
||||
if ok && arrIndex >= len(pa.nodes)+1 {
|
||||
// Pad the array with null values up to the required index.
|
||||
for i := len(*pa); i <= arrIndex-1; i++ {
|
||||
for i := len(pa.nodes); i <= arrIndex-1; i++ {
|
||||
doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
|
||||
}
|
||||
}
|
||||
@ -794,7 +855,10 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
|
||||
newNode := newLazyNode(newRawMessage(rawJSONObject))
|
||||
|
||||
doc.add(part, newNode, options)
|
||||
doc, _ = newNode.intoDoc()
|
||||
doc, err = newNode.intoDoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if isArray(*target.raw) {
|
||||
@ -816,6 +880,43 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOperation(op Operation) error {
|
||||
switch op.Kind() {
|
||||
case "add", "replace":
|
||||
if _, err := op.ValueInterface(); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode 'value'")
|
||||
}
|
||||
case "move", "copy":
|
||||
if _, err := op.From(); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode 'from'")
|
||||
}
|
||||
case "remove", "test":
|
||||
default:
|
||||
return fmt.Errorf("unsupported operation")
|
||||
}
|
||||
|
||||
if _, err := op.Path(); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode 'path'")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validatePatch(p Patch) error {
|
||||
for _, op := range p {
|
||||
if err := validateOperation(op); err != nil {
|
||||
opData, infoErr := json.Marshal(op)
|
||||
if infoErr != nil {
|
||||
return errors.Wrapf(err, "invalid operation")
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "invalid operation %s", opData)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
@ -858,7 +959,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
|
||||
|
||||
switch val.which {
|
||||
case eAry:
|
||||
*doc = &val.ary
|
||||
*doc = val.ary
|
||||
case eDoc:
|
||||
*doc = val.doc
|
||||
case eRaw:
|
||||
@ -893,6 +994,10 @@ func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
|
||||
return errors.Wrapf(err, "move operation failed to decode from")
|
||||
}
|
||||
|
||||
if from == "" {
|
||||
return errors.Wrapf(ErrInvalid, "unable to move entire document to another path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from, options)
|
||||
|
||||
if con == nil {
|
||||
@ -942,7 +1047,7 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
|
||||
self.doc = sv
|
||||
self.which = eDoc
|
||||
case *partialArray:
|
||||
self.ary = *sv
|
||||
self.ary = sv
|
||||
self.which = eAry
|
||||
}
|
||||
|
||||
@ -964,12 +1069,14 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
|
||||
return errors.Wrapf(err, "error in test for path: '%s'", path)
|
||||
}
|
||||
|
||||
ov := op.value()
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
if ov.isNull() {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
} else if ov.isNull() {
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
@ -989,7 +1096,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
|
||||
con, key := findObject(doc, from, options)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: \"%s\"", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key, options)
|
||||
@ -1036,14 +1143,22 @@ func Equal(a, b []byte) bool {
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
if !json.Valid(buf) {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
err := unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validatePatch(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@ -1072,14 +1187,25 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
if !json.Valid(doc) {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
raw := json.RawMessage(doc)
|
||||
self := newLazyNode(&raw)
|
||||
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{
|
||||
self: self,
|
||||
}
|
||||
} else {
|
||||
pd = &partialDoc{
|
||||
self: self,
|
||||
}
|
||||
}
|
||||
|
||||
err := unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@ -177,9 +177,10 @@ github.com/emicklei/go-restful/v3/log
|
||||
# github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
## explicit
|
||||
github.com/evanphx/json-patch
|
||||
# github.com/evanphx/json-patch/v5 v5.6.0
|
||||
## explicit; go 1.12
|
||||
# github.com/evanphx/json-patch/v5 v5.8.1
|
||||
## explicit; go 1.18
|
||||
github.com/evanphx/json-patch/v5
|
||||
github.com/evanphx/json-patch/v5/internal/json
|
||||
# github.com/fsnotify/fsnotify v1.6.0
|
||||
## explicit; go 1.16
|
||||
github.com/fsnotify/fsnotify
|
||||
|
Loading…
Reference in New Issue
Block a user