From af0a0f4cc634d2b34161edf95e2f381e6a9ddffc Mon Sep 17 00:00:00 2001 From: Fabian Fulga Date: Fri, 20 Sep 2024 13:14:46 +0300 Subject: [PATCH] Add json-schema generator --- go.mod | 11 +- go.sum | 15 +- provider/spec.go | 118 +- provider/spec_test.go | 412 +++++- .../github.com/bahlo/generic-list-go/LICENSE | 27 + .../bahlo/generic-list-go/README.md | 5 + .../github.com/bahlo/generic-list-go/list.go | 235 +++ vendor/github.com/buger/jsonparser/.gitignore | 12 + .../github.com/buger/jsonparser/.travis.yml | 11 + vendor/github.com/buger/jsonparser/Dockerfile | 12 + vendor/github.com/buger/jsonparser/LICENSE | 21 + vendor/github.com/buger/jsonparser/Makefile | 36 + vendor/github.com/buger/jsonparser/README.md | 365 +++++ vendor/github.com/buger/jsonparser/bytes.go | 47 + .../github.com/buger/jsonparser/bytes_safe.go | 25 + .../buger/jsonparser/bytes_unsafe.go | 44 + vendor/github.com/buger/jsonparser/escape.go | 173 +++ vendor/github.com/buger/jsonparser/fuzz.go | 117 ++ .../buger/jsonparser/oss-fuzz-build.sh | 47 + vendor/github.com/buger/jsonparser/parser.go | 1283 +++++++++++++++++ .../garm-provider-common/cloudconfig/util.go | 6 +- .../github.com/invopop/jsonschema/.gitignore | 2 + .../invopop/jsonschema/.golangci.yml | 70 + vendor/github.com/invopop/jsonschema/COPYING | 19 + .../github.com/invopop/jsonschema/README.md | 373 +++++ .../invopop/jsonschema/comment_extractor.go | 93 ++ vendor/github.com/invopop/jsonschema/id.go | 76 + .../github.com/invopop/jsonschema/reflect.go | 1150 +++++++++++++++ .../github.com/invopop/jsonschema/schema.go | 94 ++ vendor/github.com/invopop/jsonschema/utils.go | 26 + vendor/github.com/mailru/easyjson/LICENSE | 7 + .../github.com/mailru/easyjson/buffer/pool.go | 278 ++++ .../mailru/easyjson/jwriter/writer.go | 405 ++++++ .../wk8/go-ordered-map/v2/.gitignore | 1 + .../wk8/go-ordered-map/v2/.golangci.yml | 80 + .../wk8/go-ordered-map/v2/CHANGELOG.md | 38 + .../github.com/wk8/go-ordered-map/v2/LICENSE | 201 +++ .../github.com/wk8/go-ordered-map/v2/Makefile | 32 + .../wk8/go-ordered-map/v2/README.md | 154 ++ .../github.com/wk8/go-ordered-map/v2/json.go | 182 +++ .../wk8/go-ordered-map/v2/orderedmap.go | 296 ++++ .../github.com/wk8/go-ordered-map/v2/yaml.go | 71 + vendor/modules.txt | 18 +- 43 files changed, 6528 insertions(+), 160 deletions(-) create mode 100644 vendor/github.com/bahlo/generic-list-go/LICENSE create mode 100644 vendor/github.com/bahlo/generic-list-go/README.md create mode 100644 vendor/github.com/bahlo/generic-list-go/list.go create mode 100644 vendor/github.com/buger/jsonparser/.gitignore create mode 100644 vendor/github.com/buger/jsonparser/.travis.yml create mode 100644 vendor/github.com/buger/jsonparser/Dockerfile create mode 100644 vendor/github.com/buger/jsonparser/LICENSE create mode 100644 vendor/github.com/buger/jsonparser/Makefile create mode 100644 vendor/github.com/buger/jsonparser/README.md create mode 100644 vendor/github.com/buger/jsonparser/bytes.go create mode 100644 vendor/github.com/buger/jsonparser/bytes_safe.go create mode 100644 vendor/github.com/buger/jsonparser/bytes_unsafe.go create mode 100644 vendor/github.com/buger/jsonparser/escape.go create mode 100644 vendor/github.com/buger/jsonparser/fuzz.go create mode 100644 vendor/github.com/buger/jsonparser/oss-fuzz-build.sh create mode 100644 vendor/github.com/buger/jsonparser/parser.go create mode 100644 vendor/github.com/invopop/jsonschema/.gitignore create mode 100644 vendor/github.com/invopop/jsonschema/.golangci.yml create mode 100644 vendor/github.com/invopop/jsonschema/COPYING create mode 100644 vendor/github.com/invopop/jsonschema/README.md create mode 100644 vendor/github.com/invopop/jsonschema/comment_extractor.go create mode 100644 vendor/github.com/invopop/jsonschema/id.go create mode 100644 vendor/github.com/invopop/jsonschema/reflect.go create mode 100644 vendor/github.com/invopop/jsonschema/schema.go create mode 100644 vendor/github.com/invopop/jsonschema/utils.go create mode 100644 vendor/github.com/mailru/easyjson/LICENSE create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/.gitignore create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/LICENSE create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/Makefile create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/README.md create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/json.go create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go create mode 100644 vendor/github.com/wk8/go-ordered-map/v2/yaml.go diff --git a/go.mod b/go.mod index b966168..530e9a2 100644 --- a/go.mod +++ b/go.mod @@ -6,15 +6,22 @@ toolchain go1.22.3 require ( github.com/BurntSushi/toml v1.2.1 - github.com/cloudbase/garm-provider-common v0.1.4-0.20240906095211-57315d4ac8ae + github.com/cloudbase/garm-provider-common v0.1.4-0.20240912084949-899c120c80ce github.com/google/uuid v1.6.0 github.com/gophercloud/gophercloud v1.11.0 github.com/gophercloud/utils v0.0.0-20230324070755-05e9e7f5ea4d + github.com/invopop/jsonschema v0.12.0 github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 ) -require github.com/stretchr/objx v0.5.2 // indirect +require ( + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect +) require ( github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/go.sum b/go.sum index d9a8d9b..5501434 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,11 @@ github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/cloudbase/garm-provider-common v0.1.4-0.20240906095211-57315d4ac8ae h1:GDcVb/ForsCtpGj2kFH+iPu6/IIj0b5pV/h27qL6+GI= -github.com/cloudbase/garm-provider-common v0.1.4-0.20240906095211-57315d4ac8ae/go.mod h1:sK26i2NpjjAjhanNKiWw8iPkqt+XeohTKpFnEP7JdZ4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cloudbase/garm-provider-common v0.1.4-0.20240912084949-899c120c80ce h1:spSF26dB6llkdyEeVPDdobdbv2z09zL10MUJG/YYkoE= +github.com/cloudbase/garm-provider-common v0.1.4-0.20240912084949-899c120c80ce/go.mod h1:sK26i2NpjjAjhanNKiWw8iPkqt+XeohTKpFnEP7JdZ4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -17,6 +21,11 @@ github.com/gophercloud/utils v0.0.0-20230324070755-05e9e7f5ea4d/go.mod h1:z4Dey7 github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc= @@ -35,6 +44,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI= github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= diff --git a/provider/spec.go b/provider/spec.go index 38003af..a8bad9c 100644 --- a/provider/spec.go +++ b/provider/spec.go @@ -26,6 +26,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/invopop/jsonschema" "github.com/xeipuuv/gojsonschema" "github.com/cloudbase/garm-provider-openstack/config" @@ -42,104 +43,35 @@ var ( DefaultGetCloudconfig GetCloudConfigFunc = cloudconfig.GetCloudConfig ) -const jsonSchema string = ` - -{ - "$schema": "http://cloudbase.it/garm-provider-openstack/schemas/extra_specs#", - "type": "object", - "description": "Schema defining supported extra specs for the Garm OpenStack Provider", - "properties": { - "security_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "network_id": { - "type": "string", - "description": "The tenant network to which runners will be connected to." - }, - "storage_backend": { - "type": "string", - "description": "The cinder backend to use when creating volumes." - }, - "boot_from_volume": { - "type": "boolean", - "description": "Whether to boot from volume or not. Use this option if the root disk size defined by the flavor is not enough." - }, - "boot_disk_size": { - "type": "integer", - "description": "The size of the root disk in GB. Default is 50 GB." - }, - "use_config_drive": { - "type": "boolean", - "description": "Use config drive." - }, - "enable_boot_debug": { - "type": "boolean", - "description": "Enable cloud-init debug mode. Adds 'set -x' into the cloud-init script." - }, - "allowed_image_owners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of image owners to allow when creating the instance. If not specified, all images will be allowed." - }, - "image_visibility": { - "type": "string", - "description": "The visibility of the image to use." - }, - "disable_updates": { - "type": "boolean", - "description": "Disable automatic updates on the VM." - }, - "extra_packages": { - "type": "array", - "description": "Extra packages to install on the VM.", - "items": { - "type": "string" - } - }, - "runner_install_template": { - "type": "string", - "description": "This option can be used to override the default runner install template. If used, the caller is responsible for the correctness of the template as well as the suitability of the template for the target OS. Use the extra_context extra spec if your template has variables in it that need to be expanded." - }, - "extra_context": { - "type": "object", - "description": "Extra context that will be passed to the runner_install_template.", - "additionalProperties": { - "type": "string" - } - }, - "pre_install_scripts": { - "type": "object", - "description": "A map of pre-install scripts that will be run before the runner install script. These will run as root and can be used to prep a generic image before we attempt to install the runner. The key of the map is the name of the script as it will be written to disk. The value is a byte array with the contents of the script.", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false -} -` - type extraSpecs struct { SecurityGroups []string `json:"security_groups,omitempty"` - AllowedImageOwners []string `json:"allowed_image_owners,omitempty"` - ImageVisibility string `json:"image_visibility,omitempty"` - NetworkID string `json:"network_id"` - StorageBackend string `json:"storage_backend,omitempty"` - BootFromVolume *bool `json:"boot_from_volume,omitempty"` - BootDiskSize *int64 `json:"boot_disk_size,omitempty"` - UseConfigDrive *bool `json:"use_config_drive"` - EnableBootDebug *bool `json:"enable_boot_debug"` - DisableUpdates *bool `json:"disable_updates"` - ExtraPackages []string `json:"extra_packages"` + AllowedImageOwners []string `json:"allowed_image_owners,omitempty" jsonschema:"description=A list of image owners to allow when creating the instance. If not specified, all images will be allowed."` + ImageVisibility string `json:"image_visibility,omitempty" jsonschema:"description=The visibility of the image to use."` + NetworkID string `json:"network_id,omitempty" jsonschema:"description=The tenant network to which runners will be connected to."` + StorageBackend string `json:"storage_backend,omitempty" jsonschema:"description=The cinder backend to use when creating volumes."` + BootFromVolume *bool `json:"boot_from_volume,omitempty" jsonschema:"description=Whether to boot from volume or not. Use this option if the root disk size defined by the flavor is not enough."` + BootDiskSize *int64 `json:"boot_disk_size,omitempty" jsonschema:"description=The size of the root disk in GB. Default is 50 GB."` + UseConfigDrive *bool `json:"use_config_drive,omitempty" jsonschema:"description=Use config drive."` + EnableBootDebug *bool `json:"enable_boot_debug,omitempty" jsonschema:"description=Enable cloud-init debug mode. Adds 'set -x' into the cloud-init script."` + DisableUpdates *bool `json:"disable_updates,omitempty" jsonschema:"description=Disable automatic updates on the VM."` + ExtraPackages []string `json:"extra_packages,omitempty" jsonschema:"description=Extra packages to install on the VM."` + // The Cloudconfig struct from common package + cloudconfig.CloudConfigSpec +} + +func generateJSONSchema() *jsonschema.Schema { + reflector := jsonschema.Reflector{ + AllowAdditionalProperties: false, + } + // Reflect the extraSpecs struct + schema := reflector.Reflect(extraSpecs{}) + + return schema } func jsonSchemaValidation(schema json.RawMessage) error { - schemaLoader := gojsonschema.NewStringLoader(jsonSchema) + jsonSchema := generateJSONSchema() + schemaLoader := gojsonschema.NewGoLoader(jsonSchema) extraSpecsLoader := gojsonschema.NewBytesLoader(schema) result, err := gojsonschema.Validate(schemaLoader, extraSpecsLoader) if err != nil { diff --git a/provider/spec_test.go b/provider/spec_test.go index dd37d50..a2b0130 100644 --- a/provider/spec_test.go +++ b/provider/spec_test.go @@ -18,66 +18,12 @@ import ( "encoding/json" "testing" + "github.com/cloudbase/garm-provider-common/cloudconfig" "github.com/cloudbase/garm-provider-common/params" "github.com/cloudbase/garm-provider-openstack/config" "github.com/stretchr/testify/assert" ) -func TestJsonSchemaValidation(t *testing.T) { - tests := []struct { - name string - input json.RawMessage - errString string - }{ - { - name: "valid", - input: json.RawMessage(`{ - "boot_from_volume": true, - "security_groups": ["allow_ssh", "allow_web"], - "network_id": "542b68dd-4b3d-459d-8531-34d5e779d4d6", - "storage_backend": "cinder_nvme", - "boot_disk_size": 150, - "use_config_drive": false, - "enable_boot_debug": true, - "allowed_image_owners": ["123456"], - "image_visibility": "all" - }`), - errString: "", - }, - { - name: "invalid input - wrong data type", - input: json.RawMessage(`{ - "boot_from_volume": "true" - }`), - errString: "schema validation failed: [boot_from_volume: Invalid type. Expected: boolean, given: string]", - }, - { - name: "invalid input - extra field", - input: json.RawMessage(`{ - "boot_from_volume": true, - "extra_field": "extra" - }`), - errString: "Additional property extra_field is not allowed", - }, - { - name: "valid input - empty extra specs", - input: json.RawMessage(`{}`), - errString: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := jsonSchemaValidation(tt.input) - if tt.errString == "" { - assert.NoError(t, err) - } else { - assert.ErrorContains(t, err, tt.errString) - } - }) - } -} - func Test_machineSpec_MergeExtraSpecs(t *testing.T) { tests := []struct { name string @@ -138,7 +84,7 @@ func TestExtraSpecsFromBootstrapParams(t *testing.T) { errString string }{ { - name: "valid", + name: "full specs", input: params.BootstrapInstance{ ExtraSpecs: json.RawMessage(`{ "security_groups": ["allow_ssh", "allow_web"], @@ -149,7 +95,12 @@ func TestExtraSpecsFromBootstrapParams(t *testing.T) { "boot_from_volume": true, "boot_disk_size": 150, "use_config_drive": false, - "enable_boot_debug": true + "enable_boot_debug": true, + "disable_updates": true, + "extra_packages": ["package1", "package2"], + "runner_install_template": "IyEvYmluL2Jhc2gKZWNobyBJbnN0YWxsaW5nIHJ1bm5lci4uLg==", + "pre_install_scripts": {"setup.sh": "IyEvYmluL2Jhc2gKZWNobyBTZXR1cCBzY3JpcHQuLi4="}, + "extra_context": {"key": "value"} }`), }, wantSpec: extraSpecs{ @@ -162,14 +113,357 @@ func TestExtraSpecsFromBootstrapParams(t *testing.T) { BootDiskSize: Ptr(int64(150)), UseConfigDrive: Ptr(false), EnableBootDebug: Ptr(true), + DisableUpdates: Ptr(true), + ExtraPackages: []string{"package1", "package2"}, + CloudConfigSpec: cloudconfig.CloudConfigSpec{ + RunnerInstallTemplate: []byte("#!/bin/bash\necho Installing runner..."), + PreInstallScripts: map[string][]byte{ + "setup.sh": []byte("#!/bin/bash\necho Setup script..."), + }, + ExtraContext: map[string]string{"key": "value"}, + }, + }, + errString: "", + }, + { + name: "specs just with security groups", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "security_groups": ["allow_ssh", "allow_web"] + }`), + }, + wantSpec: extraSpecs{ + SecurityGroups: []string{"allow_ssh", "allow_web"}, + }, + errString: "", + }, + { + name: "specs just with allowed image owners", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "allowed_image_owners": ["123456"] + }`), + }, + wantSpec: extraSpecs{ + AllowedImageOwners: []string{"123456"}, + }, + errString: "", + }, + { + name: "specs just with image visibility", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "image_visibility": "all" + }`), + }, + wantSpec: extraSpecs{ + ImageVisibility: "all", + }, + errString: "", + }, + { + name: "specs just with network ID", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "network_id": "542b68dd-4b3d-459d-8531-34d5e779d4d6" + }`), + }, + wantSpec: extraSpecs{ + NetworkID: "542b68dd-4b3d-459d-8531-34d5e779d4d6", + }, + errString: "", + }, + { + name: "specs just with storage backend", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "storage_backend": "cinder_nvme" + }`), + }, + wantSpec: extraSpecs{ + StorageBackend: "cinder_nvme", + }, + errString: "", + }, + { + name: "specs just with boot from volume", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "boot_from_volume": true + }`), + }, + wantSpec: extraSpecs{ + BootFromVolume: Ptr(true), + }, + errString: "", + }, + { + name: "specs just with boot disk size", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "boot_disk_size": 150 + }`), + }, + wantSpec: extraSpecs{ + BootDiskSize: Ptr(int64(150)), + }, + errString: "", + }, + { + name: "specs just with use config drive", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "use_config_drive": false + }`), + }, + wantSpec: extraSpecs{ + UseConfigDrive: Ptr(false), + }, + errString: "", + }, + { + name: "specs just with enable boot debug", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "enable_boot_debug": true + }`), + }, + wantSpec: extraSpecs{ + EnableBootDebug: Ptr(true), + }, + errString: "", + }, + { + name: "specs just with disable updates", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "disable_updates": true + }`), + }, + wantSpec: extraSpecs{ + DisableUpdates: Ptr(true), + }, + errString: "", + }, + { + name: "specs just with extra packages", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "extra_packages": ["package1", "package2"] + }`), + }, + wantSpec: extraSpecs{ + ExtraPackages: []string{"package1", "package2"}, + }, + errString: "", + }, + { + name: "specs just with runner install template", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "runner_install_template": "IyEvYmluL2Jhc2gKZWNobyBJbnN0YWxsaW5nIHJ1bm5lci4uLg==" + }`), + }, + wantSpec: extraSpecs{ + CloudConfigSpec: cloudconfig.CloudConfigSpec{ + RunnerInstallTemplate: []byte("#!/bin/bash\necho Installing runner..."), + }, + }, + errString: "", + }, + { + name: "specs just with pre install scripts", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "pre_install_scripts": {"setup.sh": "IyEvYmluL2Jhc2gKZWNobyBTZXR1cCBzY3JpcHQuLi4="} + }`), + }, + wantSpec: extraSpecs{ + CloudConfigSpec: cloudconfig.CloudConfigSpec{ + PreInstallScripts: map[string][]byte{ + "setup.sh": []byte("#!/bin/bash\necho Setup script..."), + }, + }, }, errString: "", }, { - name: "invalid", + name: "specs just with extra context", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "extra_context": {"key": "value"} + }`), + }, + wantSpec: extraSpecs{ + CloudConfigSpec: cloudconfig.CloudConfigSpec{ + ExtraContext: map[string]string{"key": "value"}, + }, + }, + errString: "", + }, + { + name: "empty specs", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{}`), + }, + wantSpec: extraSpecs{}, + errString: "", + }, + { + name: "invalid json", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "image_visibility": + }`), + }, + wantSpec: extraSpecs{}, + errString: "failed to validate extra specs", + }, + { + name: "invalid input for security groups - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "security_groups": "allow_ssh" + }`), + }, + wantSpec: extraSpecs{}, + errString: "security_groups: Invalid type. Expected: array, given: string", + }, + { + name: "invalid input for allowed image owners - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "allowed_image_owners": "123456" + }`), + }, + wantSpec: extraSpecs{}, + errString: "allowed_image_owners: Invalid type. Expected: array, given: string", + }, + { + name: "invalid input for image visibility - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "image_visibility": 123456 + }`), + }, + wantSpec: extraSpecs{}, + errString: "image_visibility: Invalid type. Expected: string, given: integer", + }, + { + name: "invalid input for network ID - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "network_id": 123456 + }`), + }, + wantSpec: extraSpecs{}, + errString: "network_id: Invalid type. Expected: string, given: integer", + }, + { + name: "invalid input for storage backend - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "storage_backend": 123456 + }`), + }, + wantSpec: extraSpecs{}, + errString: "storage_backend: Invalid type. Expected: string, given: integer", + }, + { + name: "invalid input for boot from volume - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "boot_from_volume": "true" + }`), + }, + wantSpec: extraSpecs{}, + errString: "boot_from_volume: Invalid type. Expected: boolean, given: string", + }, + { + name: "invalid input for boot disk size - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "boot_disk_size": "150" + }`), + }, + wantSpec: extraSpecs{}, + errString: "boot_disk_size: Invalid type. Expected: integer, given: string", + }, + { + name: "invalid input for use config drive - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "use_config_drive": "false" + }`), + }, + wantSpec: extraSpecs{}, + errString: "use_config_drive: Invalid type. Expected: boolean, given: string", + }, + { + name: "invalid input for enable boot debug - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "enable_boot_debug": "true" + }`), + }, + wantSpec: extraSpecs{}, + errString: "enable_boot_debug: Invalid type. Expected: boolean, given: string", + }, + { + name: "invalid input for disable updates - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "disable_updates": "true" + }`), + }, + wantSpec: extraSpecs{}, + errString: "disable_updates: Invalid type. Expected: boolean, given: string", + }, + { + name: "invalid input for extra packages - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "extra_packages": "package1" + }`), + }, + wantSpec: extraSpecs{}, + errString: "extra_packages: Invalid type. Expected: array, given: string", + }, + { + name: "invalid input for runner install template - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "runner_install_template": 123456 + }`), + }, + wantSpec: extraSpecs{}, + errString: "runner_install_template: Invalid type. Expected: string, given: integer", + }, + { + name: "invalid input for pre install scripts - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "pre_install_scripts": "setup.sh" + }`), + }, + wantSpec: extraSpecs{}, + errString: "pre_install_scripts: Invalid type. Expected: object, given: string", + }, + { + name: "invalid input for extra context - wrong data type", + input: params.BootstrapInstance{ + ExtraSpecs: json.RawMessage(`{ + "extra_context": "key" + }`), + }, + wantSpec: extraSpecs{}, + errString: "extra_context: Invalid type. Expected: object, given: string", + }, + { + name: "invalid input - additional property", input: params.BootstrapInstance{ ExtraSpecs: json.RawMessage(`{ - "image_visibility": "invalid", + "invalid": "property" }`), }, wantSpec: extraSpecs{}, diff --git a/vendor/github.com/bahlo/generic-list-go/LICENSE b/vendor/github.com/bahlo/generic-list-go/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/github.com/bahlo/generic-list-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bahlo/generic-list-go/README.md b/vendor/github.com/bahlo/generic-list-go/README.md new file mode 100644 index 0000000..68bbce9 --- /dev/null +++ b/vendor/github.com/bahlo/generic-list-go/README.md @@ -0,0 +1,5 @@ +# generic-list-go [![CI](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml/badge.svg)](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml) + +Go [container/list](https://pkg.go.dev/container/list) but with generics. + +The code is based on `container/list` in `go1.18beta2`. diff --git a/vendor/github.com/bahlo/generic-list-go/list.go b/vendor/github.com/bahlo/generic-list-go/list.go new file mode 100644 index 0000000..a06a7c6 --- /dev/null +++ b/vendor/github.com/bahlo/generic-list-go/list.go @@ -0,0 +1,235 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package list implements a doubly linked list. +// +// To iterate over a list (where l is a *List): +// for e := l.Front(); e != nil; e = e.Next() { +// // do something with e.Value +// } +// +package list + +// Element is an element of a linked list. +type Element[T any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Element[T] + + // The list to which this element belongs. + list *List[T] + + // The value stored with this element. + Value T +} + +// Next returns the next list element or nil. +func (e *Element[T]) Next() *Element[T] { + if p := e.next; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// Prev returns the previous list element or nil. +func (e *Element[T]) Prev() *Element[T] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// List represents a doubly linked list. +// The zero value for List is an empty list ready to use. +type List[T any] struct { + root Element[T] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *List[T]) Init() *List[T] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// New returns an initialized list. +func New[T any]() *List[T] { return new(List[T]).Init() } + +// Len returns the number of elements of list l. +// The complexity is O(1). +func (l *List[T]) Len() int { return l.len } + +// Front returns the first element of list l or nil if the list is empty. +func (l *List[T]) Front() *Element[T] { + if l.len == 0 { + return nil + } + return l.root.next +} + +// Back returns the last element of list l or nil if the list is empty. +func (l *List[T]) Back() *Element[T] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List value. +func (l *List[T]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *List[T]) insert(e, at *Element[T]) *Element[T] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). +func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] { + return l.insert(&Element[T]{Value: v}, at) +} + +// remove removes e from its list, decrements l.len +func (l *List[T]) remove(e *Element[T]) { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- +} + +// move moves e to next to at. +func (l *List[T]) move(e, at *Element[T]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// Remove removes e from l if e is an element of list l. +// It returns the element value e.Value. +// The element must not be nil. +func (l *List[T]) Remove(e *Element[T]) T { + if e.list == l { + // if e.list == l, l must have been initialized when e was inserted + // in l or l == nil (e is a zero Element) and l.remove will crash + l.remove(e) + } + return e.Value +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *List[T]) PushFront(v T) *Element[T] { + l.lazyInit() + return l.insertValue(v, &l.root) +} + +// PushBack inserts a new element e with value v at the back of list l and returns e. +func (l *List[T]) PushBack(v T) *Element[T] { + l.lazyInit() + return l.insertValue(v, l.root.prev) +} + +// InsertBefore inserts a new element e with value v immediately before mark and returns e. +// If mark is not an element of l, the list is not modified. +// The mark must not be nil. +func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] { + if mark.list != l { + return nil + } + // see comment in List.Remove about initialization of l + return l.insertValue(v, mark.prev) +} + +// InsertAfter inserts a new element e with value v immediately after mark and returns e. +// If mark is not an element of l, the list is not modified. +// The mark must not be nil. +func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] { + if mark.list != l { + return nil + } + // see comment in List.Remove about initialization of l + return l.insertValue(v, mark) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[T]) MoveToFront(e *Element[T]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} + +// MoveToBack moves element e to the back of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[T]) MoveToBack(e *Element[T]) { + if e.list != l || l.root.prev == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, l.root.prev) +} + +// MoveBefore moves element e to its new position before mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[T]) MoveBefore(e, mark *Element[T]) { + if e.list != l || e == mark || mark.list != l { + return + } + l.move(e, mark.prev) +} + +// MoveAfter moves element e to its new position after mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[T]) MoveAfter(e, mark *Element[T]) { + if e.list != l || e == mark || mark.list != l { + return + } + l.move(e, mark) +} + +// PushBackList inserts a copy of another list at the back of list l. +// The lists l and other may be the same. They must not be nil. +func (l *List[T]) PushBackList(other *List[T]) { + l.lazyInit() + for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { + l.insertValue(e.Value, l.root.prev) + } +} + +// PushFrontList inserts a copy of another list at the front of list l. +// The lists l and other may be the same. They must not be nil. +func (l *List[T]) PushFrontList(other *List[T]) { + l.lazyInit() + for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { + l.insertValue(e.Value, &l.root) + } +} diff --git a/vendor/github.com/buger/jsonparser/.gitignore b/vendor/github.com/buger/jsonparser/.gitignore new file mode 100644 index 0000000..5598d8a --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.gitignore @@ -0,0 +1,12 @@ + +*.test + +*.out + +*.mprof + +.idea + +vendor/github.com/buger/goterm/ +prof.cpu +prof.mem diff --git a/vendor/github.com/buger/jsonparser/.travis.yml b/vendor/github.com/buger/jsonparser/.travis.yml new file mode 100644 index 0000000..dbfb7cf --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.travis.yml @@ -0,0 +1,11 @@ +language: go +arch: + - amd64 + - ppc64le +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x +script: go test -v ./. diff --git a/vendor/github.com/buger/jsonparser/Dockerfile b/vendor/github.com/buger/jsonparser/Dockerfile new file mode 100644 index 0000000..37fc9fd --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.6 + +RUN go get github.com/Jeffail/gabs +RUN go get github.com/bitly/go-simplejson +RUN go get github.com/pquerna/ffjson +RUN go get github.com/antonholmquist/jason +RUN go get github.com/mreiferson/go-ujson +RUN go get -tags=unsafe -u github.com/ugorji/go/codec +RUN go get github.com/mailru/easyjson + +WORKDIR /go/src/github.com/buger/jsonparser +ADD . /go/src/github.com/buger/jsonparser \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/LICENSE b/vendor/github.com/buger/jsonparser/LICENSE new file mode 100644 index 0000000..ac25aeb --- /dev/null +++ b/vendor/github.com/buger/jsonparser/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/buger/jsonparser/Makefile b/vendor/github.com/buger/jsonparser/Makefile new file mode 100644 index 0000000..e843368 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Makefile @@ -0,0 +1,36 @@ +SOURCE = parser.go +CONTAINER = jsonparser +SOURCE_PATH = /go/src/github.com/buger/jsonparser +BENCHMARK = JsonParser +BENCHTIME = 5s +TEST = . +DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER) + +build: + docker build -t $(CONTAINER) . + +race: + $(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s + +bench: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v + +bench_local: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v + +profile: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c + +test: + $(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v + +fmt: + $(DRUN) go fmt ./... + +vet: + $(DRUN) go vet ./. + +bash: + $(DRUN) /bin/bash \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/README.md b/vendor/github.com/buger/jsonparser/README.md new file mode 100644 index 0000000..d7e0ec3 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/README.md @@ -0,0 +1,365 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg) +# Alternative JSON parser for Go (10x times faster standard library) + +It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below. + +## Rationale +Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex. +I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage. +I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures. + + +Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience. + +## Example +For the given JSON our goal is to extract the user's full name, number of github followers and avatar. + +```go +import "github.com/buger/jsonparser" + +... + +data := []byte(`{ + "person": { + "name": { + "first": "Leonid", + "last": "Bugaev", + "fullName": "Leonid Bugaev" + }, + "github": { + "handle": "buger", + "followers": 109 + }, + "avatars": [ + { "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" } + ] + }, + "company": { + "name": "Acme" + } +}`) + +// You can specify key path by providing arguments to Get function +jsonparser.Get(data, "person", "name", "fullName") + +// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type +jsonparser.GetInt(data, "person", "github", "followers") + +// When you try to get object, it will return you []byte slice pointer to data containing it +// In `company` it will be `{"name": "Acme"}` +jsonparser.Get(data, "company") + +// If the key doesn't exist it will throw an error +var size int64 +if value, err := jsonparser.GetInt(data, "company", "size"); err == nil { + size = value +} + +// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN] +jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + fmt.Println(jsonparser.Get(value, "url")) +}, "person", "avatars") + +// Or use can access fields by index! +jsonparser.GetString(data, "person", "avatars", "[0]", "url") + +// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN } +jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType) + return nil +}, "person", "name") + +// The most efficient way to extract multiple keys is `EachKey` + +paths := [][]string{ + []string{"person", "name", "fullName"}, + []string{"person", "avatars", "[0]", "url"}, + []string{"company", "url"}, +} +jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: // []string{"person", "name", "fullName"} + ... + case 1: // []string{"person", "avatars", "[0]", "url"} + ... + case 2: // []string{"company", "url"}, + ... + } +}, paths...) + +// For more information see docs below +``` + +## Need to speedup your app? + +I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com. + +## Reference + +Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it. + +You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser) + + +### **`Get`** +```go +func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error) +``` +Receives data structure, and key path to extract value from. + +Returns: +* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist` + +Accepts multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. + +Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah? + +### **`GetString`** +```go +func GetString(data []byte, keys ...string) (val string, err error) +``` +Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations. + +### **`GetUnsafeString`** +If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations: +```go +s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title") +switch s { + case 'CEO': + ... + case 'Engineer' + ... + ... +} +``` +Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way. + + +### **`GetBoolean`**, **`GetInt`** and **`GetFloat`** +```go +func GetBoolean(data []byte, keys ...string) (val bool, err error) + +func GetFloat(data []byte, keys ...string) (val float64, err error) + +func GetInt(data []byte, keys ...string) (val int64, err error) +``` +If you know the key type, you can use the helpers above. +If key data type do not match, it will return error. + +### **`ArrayEach`** +```go +func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string) +``` +Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`. + +### **`ObjectEach`** +```go +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) +``` +Needed for iterating object, accepts a callback function. Example: +```go +var handler func([]byte, []byte, jsonparser.ValueType, int) error +handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + //do stuff here +} +jsonparser.ObjectEach(myJson, handler) +``` + + +### **`EachKey`** +```go +func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string) +``` +When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well! + +```go +paths := [][]string{ + []string{"uuid"}, + []string{"tz"}, + []string{"ua"}, + []string{"st"}, +} +var data SmallPayload + +jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: + data.Uuid, _ = value + case 1: + v, _ := jsonparser.ParseInt(value) + data.Tz = int(v) + case 2: + data.Ua, _ = value + case 3: + v, _ := jsonparser.ParseInt(value) + data.St = int(v) + } +}, paths...) +``` + +### **`Set`** +```go +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) +``` +Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with updated or added key value. +* `err` - If any parsing issue, it should return error. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")` + +### **`Delete`** +```go +func Delete(data []byte, keys ...string) value []byte +``` +Receives existing data structure, and key path to delete. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")` + + +## What makes it so fast? +* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`. +* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation. +* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included). +* Does not parse full record, only keys you specified + + +## Benchmarks + +There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads. +For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text. +Benchmarks run on standard Linode 1024 box. + +Compared libraries: +* https://golang.org/pkg/encoding/json +* https://github.com/Jeffail/gabs +* https://github.com/a8m/djson +* https://github.com/bitly/go-simplejson +* https://github.com/antonholmquist/jason +* https://github.com/mreiferson/go-ujson +* https://github.com/ugorji/go/codec +* https://github.com/pquerna/ffjson +* https://github.com/mailru/easyjson +* https://github.com/buger/jsonparser + +#### TLDR +If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`. +`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers. +`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation). + +It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified. + +If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`. + +`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want. + +With great power comes great responsibility! :) + + +#### Small payload + +Each test processes 190 bytes of http log as a JSON record. +It should read multiple fields. +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go + +Library | time/op | bytes/op | allocs/op + ------ | ------- | -------- | ------- +encoding/json struct | 7879 | 880 | 18 +encoding/json interface{} | 8946 | 1521 | 38 +Jeffail/gabs | 10053 | 1649 | 46 +bitly/go-simplejson | 10128 | 2241 | 36 +antonholmquist/jason | 27152 | 7237 | 101 +github.com/ugorji/go/codec | 8806 | 2176 | 31 +mreiferson/go-ujson | **7008** | **1409** | 37 +a8m/djson | 3862 | 1249 | 30 +pquerna/ffjson | **3769** | **624** | **15** +mailru/easyjson | **2002** | **192** | **9** +buger/jsonparser | **1367** | **0** | **0** +buger/jsonparser (EachKey API) | **809** | **0** | **0** + +Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson. +If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it. + +#### Medium payload + +Each test processes a 2.4kb JSON record (based on Clearbit API). +It should read multiple nested fields and 1 array. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| ------- | ------- | -------- | --------- | +| encoding/json struct | 57749 | 1336 | 29 | +| encoding/json interface{} | 79297 | 10627 | 215 | +| Jeffail/gabs | 83807 | 11202 | 235 | +| bitly/go-simplejson | 88187 | 17187 | 220 | +| antonholmquist/jason | 94099 | 19013 | 247 | +| github.com/ugorji/go/codec | 114719 | 6712 | 152 | +| mreiferson/go-ujson | **56972** | 11547 | 270 | +| a8m/djson | 28525 | 10196 | 198 | +| pquerna/ffjson | **20298** | **856** | **20** | +| mailru/easyjson | **10512** | **336** | **12** | +| buger/jsonparser | **15955** | **0** | **0** | +| buger/jsonparser (EachKey API) | **8916** | **0** | **0** | + +The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload. + +`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round. +`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads. + + +#### Large payload + +Each test processes a 24kb JSON record (based on Discourse API) +It should read 2 arrays, and for each item in array get a few fields. +Basically it means processing a full JSON file. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| --- | --- | --- | --- | +| encoding/json struct | 748336 | 8272 | 307 | +| encoding/json interface{} | 1224271 | 215425 | 3395 | +| a8m/djson | 510082 | 213682 | 2845 | +| pquerna/ffjson | **312271** | **7792** | **298** | +| mailru/easyjson | **154186** | **6992** | **288** | +| buger/jsonparser | **85308** | **0** | **0** | + +`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough) + +Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient. + +## Questions and support + +All bug-reports and suggestions should go though Github Issues. + +## Contributing + +1. Fork it +2. Create your feature branch (git checkout -b my-new-feature) +3. Commit your changes (git commit -am 'Added some feature') +4. Push to the branch (git push origin my-new-feature) +5. Create new Pull Request + +## Development + +All my development happens using Docker, and repo include some Make tasks to simplify development. + +* `make build` - builds docker image, usually can be called only once +* `make test` - run tests +* `make fmt` - run go fmt +* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file) +* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof` +* `make bash` - enter container (i use it for running `go tool pprof` above) diff --git a/vendor/github.com/buger/jsonparser/bytes.go b/vendor/github.com/buger/jsonparser/bytes.go new file mode 100644 index 0000000..0bb0ff3 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes.go @@ -0,0 +1,47 @@ +package jsonparser + +import ( + bio "bytes" +) + +// minInt64 '-9223372036854775808' is the smallest representable number in int64 +const minInt64 = `9223372036854775808` + +// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON +func parseInt(bytes []byte) (v int64, ok bool, overflow bool) { + if len(bytes) == 0 { + return 0, false, false + } + + var neg bool = false + if bytes[0] == '-' { + neg = true + bytes = bytes[1:] + } + + var b int64 = 0 + for _, c := range bytes { + if c >= '0' && c <= '9' { + b = (10 * v) + int64(c-'0') + } else { + return 0, false, false + } + if overflow = (b < v); overflow { + break + } + v = b + } + + if overflow { + if neg && bio.Equal(bytes, []byte(minInt64)) { + return b, true, false + } + return 0, false, true + } + + if neg { + return -v, true, false + } else { + return v, true, false + } +} diff --git a/vendor/github.com/buger/jsonparser/bytes_safe.go b/vendor/github.com/buger/jsonparser/bytes_safe.go new file mode 100644 index 0000000..ff16a4a --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_safe.go @@ -0,0 +1,25 @@ +// +build appengine appenginevm + +package jsonparser + +import ( + "strconv" +) + +// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file) + +func equalStr(b *[]byte, s string) bool { + return string(*b) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(string(*b), 64) +} + +func bytesToString(b *[]byte) string { + return string(*b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/buger/jsonparser/bytes_unsafe.go b/vendor/github.com/buger/jsonparser/bytes_unsafe.go new file mode 100644 index 0000000..589fea8 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_unsafe.go @@ -0,0 +1,44 @@ +// +build !appengine,!appenginevm + +package jsonparser + +import ( + "reflect" + "strconv" + "unsafe" + "runtime" +) + +// +// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6, +// the compiler cannot perfectly inline the function when using a non-pointer slice. That is, +// the non-pointer []byte parameter version is slower than if its function body is manually +// inlined, whereas the pointer []byte version is equally fast to the manually inlined +// version. Instruction count in assembly taken from "go tool compile" confirms this difference. +// +// TODO: Remove hack after Go 1.7 release +// +func equalStr(b *[]byte, s string) bool { + return *(*string)(unsafe.Pointer(b)) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64) +} + +// A hack until issue golang/go#2632 is fixed. +// See: https://github.com/golang/go/issues/2632 +func bytesToString(b *[]byte) string { + return *(*string)(unsafe.Pointer(b)) +} + +func StringToBytes(s string) []byte { + b := make([]byte, 0, 0) + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + runtime.KeepAlive(s) + return b +} diff --git a/vendor/github.com/buger/jsonparser/escape.go b/vendor/github.com/buger/jsonparser/escape.go new file mode 100644 index 0000000..49669b9 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/escape.go @@ -0,0 +1,173 @@ +package jsonparser + +import ( + "bytes" + "unicode/utf8" +) + +// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7 + +const supplementalPlanesOffset = 0x10000 +const highSurrogateOffset = 0xD800 +const lowSurrogateOffset = 0xDC00 + +const basicMultilingualPlaneReservedOffset = 0xDFFF +const basicMultilingualPlaneOffset = 0xFFFF + +func combineUTF16Surrogates(high, low rune) rune { + return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset) +} + +const badHex = -1 + +func h2I(c byte) int { + switch { + case c >= '0' && c <= '9': + return int(c - '0') + case c >= 'A' && c <= 'F': + return int(c - 'A' + 10) + case c >= 'a' && c <= 'f': + return int(c - 'a' + 10) + } + return badHex +} + +// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and +// is not checked. +// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together. +// This function only handles one; decodeUnicodeEscape handles this more complex case. +func decodeSingleUnicodeEscape(in []byte) (rune, bool) { + // We need at least 6 characters total + if len(in) < 6 { + return utf8.RuneError, false + } + + // Convert hex to decimal + h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5]) + if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex { + return utf8.RuneError, false + } + + // Compose the hex digits + return rune(h1<<12 + h2<<8 + h3<<4 + h4), true +} + +// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters, +// which is used to describe UTF16 chars. +// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane +func isUTF16EncodedRune(r rune) bool { + return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset +} + +func decodeUnicodeEscape(in []byte) (rune, int) { + if r, ok := decodeSingleUnicodeEscape(in); !ok { + // Invalid Unicode escape + return utf8.RuneError, -1 + } else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) { + // Valid Unicode escape in Basic Multilingual Plane + return r, 6 + } else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain + // UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate" + return utf8.RuneError, -1 + } else if r2 < lowSurrogateOffset { + // Invalid UTF16 "low surrogate" + return utf8.RuneError, -1 + } else { + // Valid UTF16 surrogate pair + return combineUTF16Surrogates(r, r2), 12 + } +} + +// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X] +var backslashCharEscapeTable = [...]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns +// how many characters were consumed from 'in' and emitted into 'out'. +// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error. +func unescapeToUTF8(in, out []byte) (inLen int, outLen int) { + if len(in) < 2 || in[0] != '\\' { + // Invalid escape due to insufficient characters for any escape or no initial backslash + return -1, -1 + } + + // https://tools.ietf.org/html/rfc7159#section-7 + switch e := in[1]; e { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid basic 2-character escapes (use lookup table) + out[0] = backslashCharEscapeTable[e] + return 2, 1 + case 'u': + // Unicode escape + if r, inLen := decodeUnicodeEscape(in); inLen == -1 { + // Invalid Unicode escape + return -1, -1 + } else { + // Valid Unicode escape; re-encode as UTF8 + outLen := utf8.EncodeRune(out, r) + return inLen, outLen + } + } + + return -1, -1 +} + +// unescape unescapes the string contained in 'in' and returns it as a slice. +// If 'in' contains no escaped characters: +// Returns 'in'. +// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)): +// 'out' is used to build the unescaped string and is returned with no extra allocation +// Else: +// A new slice is allocated and returned. +func Unescape(in, out []byte) ([]byte, error) { + firstBackslash := bytes.IndexByte(in, '\\') + if firstBackslash == -1 { + return in, nil + } + + // Get a buffer of sufficient size (allocate if needed) + if cap(out) < len(in) { + out = make([]byte, len(in)) + } else { + out = out[0:len(in)] + } + + // Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice) + copy(out, in[:firstBackslash]) + in = in[firstBackslash:] + buf := out[firstBackslash:] + + for len(in) > 0 { + // Unescape the next escaped character + inLen, bufLen := unescapeToUTF8(in, buf) + if inLen == -1 { + return nil, MalformedStringEscapeError + } + + in = in[inLen:] + buf = buf[bufLen:] + + // Copy everything up until the next backslash + nextBackslash := bytes.IndexByte(in, '\\') + if nextBackslash == -1 { + copy(buf, in) + buf = buf[len(in):] + break + } else { + copy(buf, in[:nextBackslash]) + buf = buf[nextBackslash:] + in = in[nextBackslash:] + } + } + + // Trim the out buffer to the amount that was actually emitted + return out[:len(out)-len(buf)], nil +} diff --git a/vendor/github.com/buger/jsonparser/fuzz.go b/vendor/github.com/buger/jsonparser/fuzz.go new file mode 100644 index 0000000..854bd11 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/fuzz.go @@ -0,0 +1,117 @@ +package jsonparser + +func FuzzParseString(data []byte) int { + r, err := ParseString(data) + if err != nil || r == "" { + return 0 + } + return 1 +} + +func FuzzEachKey(data []byte) int { + paths := [][]string{ + {"name"}, + {"order"}, + {"nested", "a"}, + {"nested", "b"}, + {"nested2", "a"}, + {"nested", "nested3", "b"}, + {"arr", "[1]", "b"}, + {"arrInt", "[3]"}, + {"arrInt", "[5]"}, + {"nested"}, + {"arr", "["}, + {"a\n", "b\n"}, + } + EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...) + return 1 +} + +func FuzzDelete(data []byte) int { + Delete(data, "test") + return 1 +} + +func FuzzSet(data []byte) int { + _, err := Set(data, []byte(`"new value"`), "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzObjectEach(data []byte) int { + _ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error { + return nil + }) + return 1 +} + +func FuzzParseFloat(data []byte) int { + _, err := ParseFloat(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseInt(data []byte) int { + _, err := ParseInt(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseBool(data []byte) int { + _, err := ParseBoolean(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzTokenStart(data []byte) int { + _ = tokenStart(data) + return 1 +} + +func FuzzGetString(data []byte) int { + _, err := GetString(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetFloat(data []byte) int { + _, err := GetFloat(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetInt(data []byte) int { + _, err := GetInt(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetBoolean(data []byte) int { + _, err := GetBoolean(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetUnsafeString(data []byte) int { + _, err := GetUnsafeString(data, "test") + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh new file mode 100644 index 0000000..c573b0e --- /dev/null +++ b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh @@ -0,0 +1,47 @@ +#!/bin/bash -eu + +git clone https://github.com/dvyukov/go-fuzz-corpus +zip corpus.zip go-fuzz-corpus/json/corpus/* + +cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring + +cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey + +cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete + +cp corpus.zip $OUT/fuzzset_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset + +cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach + +cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat + +cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint + +cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool + +cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart + +cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring + +cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat + +cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint + +cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean + +cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring + diff --git a/vendor/github.com/buger/jsonparser/parser.go b/vendor/github.com/buger/jsonparser/parser.go new file mode 100644 index 0000000..14b80bc --- /dev/null +++ b/vendor/github.com/buger/jsonparser/parser.go @@ -0,0 +1,1283 @@ +package jsonparser + +import ( + "bytes" + "errors" + "fmt" + "strconv" +) + +// Errors +var ( + KeyPathNotFoundError = errors.New("Key path not found") + UnknownValueTypeError = errors.New("Unknown value type") + MalformedJsonError = errors.New("Malformed JSON error") + MalformedStringError = errors.New("Value is string, but can't find closing '\"' symbol") + MalformedArrayError = errors.New("Value is array, but can't find closing ']' symbol") + MalformedObjectError = errors.New("Value looks like object, but can't find closing '}' symbol") + MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol") + OverflowIntegerError = errors.New("Value is number, but overflowed while parsing") + MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string") +) + +// How much stack space to allocate for unescaping JSON strings; if a string longer +// than this needs to be escaped, it will result in a heap allocation +const unescapeStackBufSize = 64 + +func tokenEnd(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + return i + } + } + + return len(data) +} + +func findTokenStart(data []byte, token byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case token: + return i + case '[', '{': + return 0 + } + } + + return 0 +} + +func findKeyStart(data []byte, key string) (int, error) { + i := 0 + ln := len(data) + if ln > 0 && (data[0] == '{' || data[0] == '[') { + i = 1 + } + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + if ku, err := Unescape(StringToBytes(key), stackbuf[:]); err == nil { + key = bytesToString(&ku) + } + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + break + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + break + } + + i += valueOffset + + // if string is a key, and key level match + k := data[keyBegin:keyEnd] + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + if keyEscaped { + if ku, err := Unescape(k, stackbuf[:]); err != nil { + break + } else { + k = ku + } + } + + if data[i] == ':' && len(key) == len(k) && bytesToString(&k) == key { + return keyBegin - 1, nil + } + + case '[': + end := blockEnd(data[i:], data[i], ']') + if end != -1 { + i = i + end + } + case '{': + end := blockEnd(data[i:], data[i], '}') + if end != -1 { + i = i + end + } + } + i++ + } + + return -1, KeyPathNotFoundError +} + +func tokenStart(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case '\n', '\r', '\t', ',', '{', '[': + return i + } + } + + return 0 +} + +// Find position of next character which is not whitespace +func nextToken(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Find position of last character which is not whitespace +func lastToken(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func stringEnd(data []byte) (int, bool) { + escaped := false + for i, c := range data { + if c == '"' { + if !escaped { + return i + 1, false + } else { + j := i - 1 + for { + if j < 0 || data[j] != '\\' { + return i + 1, true // even number of backslashes + } + j-- + if j < 0 || data[j] != '\\' { + break // odd number of backslashes + } + j-- + + } + } + } else if c == '\\' { + escaped = true + } + } + + return -1, escaped +} + +// Find end of the data structure, array or object. +// For array openSym and closeSym will be '[' and ']', for object '{' and '}' +func blockEnd(data []byte, openSym byte, closeSym byte) int { + level := 0 + i := 0 + ln := len(data) + + for i < ln { + switch data[i] { + case '"': // If inside string, skip it + se, _ := stringEnd(data[i+1:]) + if se == -1 { + return -1 + } + i += se + case openSym: // If open symbol, increase level + level++ + case closeSym: // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + return i + 1 + } + } + i++ + } + + return -1 +} + +func searchKeys(data []byte, keys ...string) int { + keyLevel := 0 + level := 0 + i := 0 + ln := len(data) + lk := len(keys) + lastMatched := true + + if lk == 0 { + return 0 + } + + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key + if data[i] == ':' { + if level < 1 { + return -1 + } + + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + + if level <= len(keys) { + if equalStr(&keyUnesc, keys[level-1]) { + lastMatched = true + + // if key level match + if keyLevel == level-1 { + keyLevel++ + // If we found all keys in path + if keyLevel == lk { + return i + 1 + } + } + } else { + lastMatched = false + } + } else { + return -1 + } + } else { + i-- + } + case '{': + + // in case parent key is matched then only we will increase the level otherwise can directly + // can move to the end of this block + if !lastMatched { + end := blockEnd(data[i:], '{', '}') + if end == -1 { + return -1 + } + i += end - 1 + } else { + level++ + } + case '}': + level-- + if level == keyLevel { + keyLevel-- + } + case '[': + // If we want to get array element by index + if keyLevel == level && keys[level][0] == '[' { + var keyLen = len(keys[level]) + if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' { + return -1 + } + aIdx, err := strconv.Atoi(keys[level][1 : keyLen-1]) + if err != nil { + return -1 + } + var curIdx int + var valueFound []byte + var valueOffset int + var curI = i + ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if curIdx == aIdx { + valueFound = value + valueOffset = offset + if dataType == String { + valueOffset = valueOffset - 2 + valueFound = data[curI+valueOffset : curI+valueOffset+len(value)+2] + } + } + curIdx += 1 + }) + + if valueFound == nil { + return -1 + } else { + subIndex := searchKeys(valueFound, keys[level+1:]...) + if subIndex < 0 { + return -1 + } + return i + valueOffset + subIndex + } + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ':': // If encountered, JSON data is malformed + return -1 + } + + i++ + } + + return -1 +} + +func sameTree(p1, p2 []string) bool { + minLen := len(p1) + if len(p2) < minLen { + minLen = len(p2) + } + + for pi_1, p_1 := range p1[:minLen] { + if p2[pi_1] != p_1 { + return false + } + } + + return true +} + +func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int { + var x struct{} + pathFlags := make([]bool, len(paths)) + var level, pathsMatched, i int + ln := len(data) + + var maxPath int + for _, p := range paths { + if len(p) > maxPath { + maxPath = len(p) + } + } + + pathsBuf := make([]string, maxPath) + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key, and key level match + if data[i] == ':' { + match := -1 + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else { + var stackbuf [unescapeStackBufSize]byte + if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + } + + if maxPath >= level { + if level < 1 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + pathsBuf[level-1] = bytesToString(&keyUnesc) + for pi, p := range paths { + if len(p) != level || pathFlags[pi] || !equalStr(&keyUnesc, p[level-1]) || !sameTree(p, pathsBuf[:level]) { + continue + } + + match = pi + + pathsMatched++ + pathFlags[pi] = true + + v, dt, _, e := Get(data[i+1:]) + cb(pi, v, dt, e) + + if pathsMatched == len(paths) { + break + } + } + if pathsMatched == len(paths) { + return i + } + } + + if match == -1 { + tokenOffset := nextToken(data[i+1:]) + i += tokenOffset + + if data[i] == '{' { + blockSkip := blockEnd(data[i:], '{', '}') + i += blockSkip + 1 + } + } + + if i < ln { + switch data[i] { + case '{', '}', '[', '"': + i-- + } + } + } else { + i-- + } + case '{': + level++ + case '}': + level-- + case '[': + var ok bool + arrIdxFlags := make(map[int]struct{}) + pIdxFlags := make([]bool, len(paths)) + + if level < 0 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + for pi, p := range paths { + if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { + continue + } + if len(p[level]) >= 2 { + aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) + arrIdxFlags[aIdx] = x + pIdxFlags[pi] = true + } + } + + if len(arrIdxFlags) > 0 { + level++ + + var curIdx int + arrOff, _ := ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if _, ok = arrIdxFlags[curIdx]; ok { + for pi, p := range paths { + if pIdxFlags[pi] { + aIdx, _ := strconv.Atoi(p[level-1][1 : len(p[level-1])-1]) + + if curIdx == aIdx { + of := searchKeys(value, p[level:]...) + + pathsMatched++ + pathFlags[pi] = true + + if of != -1 { + v, dt, _, e := Get(value[of:]) + cb(pi, v, dt, e) + } + } + } + } + } + + curIdx += 1 + }) + + if pathsMatched == len(paths) { + return i + } + + i += arrOff - 1 + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ']': + level-- + } + + i++ + } + + return -1 +} + +// Data types available in valid JSON data. +type ValueType int + +const ( + NotExist = ValueType(iota) + String + Number + Object + Array + Boolean + Null + Unknown +) + +func (vt ValueType) String() string { + switch vt { + case NotExist: + return "non-existent" + case String: + return "string" + case Number: + return "number" + case Object: + return "object" + case Array: + return "array" + case Boolean: + return "boolean" + case Null: + return "null" + default: + return "unknown" + } +} + +var ( + trueLiteral = []byte("true") + falseLiteral = []byte("false") + nullLiteral = []byte("null") +) + +func createInsertComponent(keys []string, setValue []byte, comma, object bool) []byte { + isIndex := string(keys[0][0]) == "[" + offset := 0 + lk := calcAllocateSpace(keys, setValue, comma, object) + buffer := make([]byte, lk, lk) + if comma { + offset += WriteToBuffer(buffer[offset:], ",") + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + if object { + offset += WriteToBuffer(buffer[offset:], "{") + } + if !isIndex { + offset += WriteToBuffer(buffer[offset:], "\"") + offset += WriteToBuffer(buffer[offset:], keys[0]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + offset += WriteToBuffer(buffer[offset:], "{\"") + offset += WriteToBuffer(buffer[offset:], keys[i]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + offset += WriteToBuffer(buffer[offset:], string(setValue)) + for i := len(keys) - 1; i > 0; i-- { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "]") + } else { + offset += WriteToBuffer(buffer[offset:], "}") + } + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "]") + } + if object && !isIndex { + offset += WriteToBuffer(buffer[offset:], "}") + } + return buffer +} + +func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int { + isIndex := string(keys[0][0]) == "[" + lk := 0 + if comma { + // , + lk += 1 + } + if isIndex && !comma { + // [] + lk += 2 + } else { + if object { + // { + lk += 1 + } + if !isIndex { + // "keys[0]" + lk += len(keys[0]) + 3 + } + } + + + lk += len(setValue) + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + // [] + lk += 2 + } else { + // {"keys[i]":setValue} + lk += len(keys[i]) + 5 + } + } + + if object && !isIndex { + // } + lk += 1 + } + + return lk +} + +func WriteToBuffer(buffer []byte, str string) int { + copy(buffer, str) + return len(str) +} + +/* + +Del - Receives existing data structure, path to delete. + +Returns: +`data` - return modified data + +*/ +func Delete(data []byte, keys ...string) []byte { + lk := len(keys) + if lk == 0 { + return data[:0] + } + + array := false + if len(keys[lk-1]) > 0 && string(keys[lk-1][0]) == "[" { + array = true + } + + var startOffset, keyOffset int + endOffset := len(data) + var err error + if !array { + if len(keys) > 1 { + _, _, startOffset, endOffset, err = internalGet(data, keys[:lk-1]...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + } + + keyOffset, err = findKeyStart(data[startOffset:endOffset], keys[lk-1]) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + keyOffset += startOffset + _, _, _, subEndOffset, _ := internalGet(data[startOffset:endOffset], keys[lk-1]) + endOffset = startOffset + subEndOffset + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == " "[0] && len(data) > endOffset+tokEnd+1 && data[endOffset+tokEnd+1] == ","[0] { + endOffset += tokEnd + 2 + } else if data[endOffset+tokEnd] == "}"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } else { + _, _, keyOffset, endOffset, err = internalGet(data, keys...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == "]"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } + + // We need to remove remaining trailing comma if we delete las element in the object + prevTok := lastToken(data[:keyOffset]) + remainedValue := data[endOffset:] + + var newOffset int + if nextToken(remainedValue) > -1 && remainedValue[nextToken(remainedValue)] == '}' && data[prevTok] == ',' { + newOffset = prevTok + } else { + newOffset = prevTok + 1 + } + + // We have to make a copy here if we don't want to mangle the original data, because byte slices are + // accessed by reference and not by value + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = append(dataCopy[:newOffset], dataCopy[endOffset:]...) + + return data +} + +/* + +Set - Receives existing data structure, path to set, and data to set at that key. + +Returns: +`value` - modified byte array +`err` - On any parsing error + +*/ +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { + // ensure keys are set + if len(keys) == 0 { + return nil, KeyPathNotFoundError + } + + _, _, startOffset, endOffset, err := internalGet(data, keys...) + if err != nil { + if err != KeyPathNotFoundError { + // problem parsing the data + return nil, err + } + // full path doesnt exist + // does any subpath exist? + var depth int + for i := range keys { + _, _, start, end, sErr := internalGet(data, keys[:i+1]...) + if sErr != nil { + break + } else { + endOffset = end + startOffset = start + depth++ + } + } + comma := true + object := false + if endOffset == -1 { + firstToken := nextToken(data) + // We can't set a top-level key if data isn't an object + if firstToken < 0 || data[firstToken] != '{' { + return nil, KeyPathNotFoundError + } + // Don't need a comma if the input is an empty object + secondToken := firstToken + 1 + nextToken(data[firstToken+1:]) + if data[secondToken] == '}' { + comma = false + } + // Set the top level key at the end (accounting for any trailing whitespace) + // This assumes last token is valid like '}', could check and return error + endOffset = lastToken(data) + } + depthOffset := endOffset + if depth != 0 { + // if subpath is a non-empty object, add to it + // or if subpath is a non-empty array, add to it + if (data[startOffset] == '{' && data[startOffset+1+nextToken(data[startOffset+1:])] != '}') || + (data[startOffset] == '[' && data[startOffset+1+nextToken(data[startOffset+1:])] == '{') && keys[depth:][0][0] == 91 { + depthOffset-- + startOffset = depthOffset + // otherwise, over-write it with a new object + } else { + comma = false + object = true + } + } else { + startOffset = depthOffset + } + value = append(data[:startOffset], append(createInsertComponent(keys[depth:], setValue, comma, object), data[depthOffset:]...)...) + } else { + // path currently exists + startComponent := data[:startOffset] + endComponent := data[endOffset:] + + value = make([]byte, len(startComponent)+len(endComponent)+len(setValue)) + newEndOffset := startOffset + len(setValue) + copy(value[0:startOffset], startComponent) + copy(value[startOffset:newEndOffset], setValue) + copy(value[newEndOffset:], endComponent) + } + return value, nil +} + +func getType(data []byte, offset int) ([]byte, ValueType, int, error) { + var dataType ValueType + endOffset := offset + + // if string value + if data[offset] == '"' { + dataType = String + if idx, _ := stringEnd(data[offset+1:]); idx != -1 { + endOffset += idx + 1 + } else { + return nil, dataType, offset, MalformedStringError + } + } else if data[offset] == '[' { // if array value + dataType = Array + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '[', ']') + + if endOffset == -1 { + return nil, dataType, offset, MalformedArrayError + } + + endOffset += offset + } else if data[offset] == '{' { // if object value + dataType = Object + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '{', '}') + + if endOffset == -1 { + return nil, dataType, offset, MalformedObjectError + } + + endOffset += offset + } else { + // Number, Boolean or None + end := tokenEnd(data[endOffset:]) + + if end == -1 { + return nil, dataType, offset, MalformedValueError + } + + value := data[offset : endOffset+end] + + switch data[offset] { + case 't', 'f': // true or false + if bytes.Equal(value, trueLiteral) || bytes.Equal(value, falseLiteral) { + dataType = Boolean + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case 'u', 'n': // undefined or null + if bytes.Equal(value, nullLiteral) { + dataType = Null + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + dataType = Number + default: + return nil, Unknown, offset, UnknownValueTypeError + } + + endOffset += end + } + return data[offset:endOffset], dataType, endOffset, nil +} + +/* +Get - Receives data structure, and key path to extract value from. + +Returns: +`value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +`dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +`offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +`err` - If key not found or any other parsing issue it should return error. If key not found it also sets `dataType` to `NotExist` + +Accept multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys provided it will try to extract closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. +*/ +func Get(data []byte, keys ...string) (value []byte, dataType ValueType, offset int, err error) { + a, b, _, d, e := internalGet(data, keys...) + return a, b, d, e +} + +func internalGet(data []byte, keys ...string) (value []byte, dataType ValueType, offset, endOffset int, err error) { + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return nil, NotExist, -1, -1, KeyPathNotFoundError + } + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return nil, NotExist, offset, -1, MalformedJsonError + } + + offset += nO + value, dataType, endOffset, err = getType(data, offset) + if err != nil { + return value, dataType, offset, endOffset, err + } + + // Strip quotes from string values + if dataType == String { + value = value[1 : len(value)-1] + } + + return value[:len(value):len(value)], dataType, offset, endOffset, nil +} + +// ArrayEach is used when iterating arrays, accepts a callback function with the same return arguments as `Get`. +func ArrayEach(data []byte, cb func(value []byte, dataType ValueType, offset int, err error), keys ...string) (offset int, err error) { + if len(data) == 0 { + return -1, MalformedObjectError + } + + nT := nextToken(data) + if nT == -1 { + return -1, MalformedJsonError + } + + offset = nT + 1 + + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return offset, KeyPathNotFoundError + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] != '[' { + return offset, MalformedArrayError + } + + offset++ + } + + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] == ']' { + return offset, nil + } + + for true { + v, t, o, e := Get(data[offset:]) + + if e != nil { + return offset, e + } + + if o == 0 { + break + } + + if t != NotExist { + cb(v, t, offset+o-len(v), e) + } + + if e != nil { + break + } + + offset += o + + skipToToken := nextToken(data[offset:]) + if skipToToken == -1 { + return offset, MalformedArrayError + } + offset += skipToToken + + if data[offset] == ']' { + break + } + + if data[offset] != ',' { + return offset, MalformedArrayError + } + + offset++ + } + + return offset, nil +} + +// ObjectEach iterates over the key-value pairs of a JSON object, invoking a given callback for each such entry +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) { + offset := 0 + + // Descend to the desired key, if requested + if len(keys) > 0 { + if off := searchKeys(data, keys...); off == -1 { + return KeyPathNotFoundError + } else { + offset = off + } + } + + // Validate and skip past opening brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedObjectError + } else if offset += off; data[offset] != '{' { + return MalformedObjectError + } else { + offset++ + } + + // Skip to the first token inside the object, or stop if we find the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] == '}' { + return nil + } + + // Loop pre-condition: data[offset] points to what should be either the next entry's key, or the closing brace (if it's anything else, the JSON is malformed) + for offset < len(data) { + // Step 1: find the next key + var key []byte + + // Check what the the next token is: start of string, end of object, or something else (error) + switch data[offset] { + case '"': + offset++ // accept as string and skip opening quote + case '}': + return nil // we found the end of the object; stop and return success + default: + return MalformedObjectError + } + + // Find the end of the key string + var keyEscaped bool + if off, esc := stringEnd(data[offset:]); off == -1 { + return MalformedJsonError + } else { + key, keyEscaped = data[offset:offset+off-1], esc + offset += off + } + + // Unescape the string if needed + if keyEscaped { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if keyUnescaped, err := Unescape(key, stackbuf[:]); err != nil { + return MalformedStringEscapeError + } else { + key = keyUnescaped + } + } + + // Step 2: skip the colon + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] != ':' { + return MalformedJsonError + } else { + offset++ + } + + // Step 3: find the associated value, then invoke the callback + if value, valueType, off, err := Get(data[offset:]); err != nil { + return err + } else if err := callback(key, value, valueType, offset+off); err != nil { // Invoke the callback here! + return err + } else { + offset += off + } + + // Step 4: skip over the next comma to the following token, or stop if we hit the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + switch data[offset] { + case '}': + return nil // Stop if we hit the close brace + case ',': + offset++ // Ignore the comma + default: + return MalformedObjectError + } + } + + // Skip to the next token after the comma + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + } + } + + return MalformedObjectError // we shouldn't get here; it's expected that we will return via finding the ending brace +} + +// GetUnsafeString returns the value retrieved by `Get`, use creates string without memory allocation by mapping string to slice memory. It does not handle escape symbols. +func GetUnsafeString(data []byte, keys ...string) (val string, err error) { + v, _, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + return bytesToString(&v), nil +} + +// GetString returns the value retrieved by `Get`, cast to a string if possible, trying to properly handle escape and utf8 symbols +// If key data type do not match, it will return an error. +func GetString(data []byte, keys ...string) (val string, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + if t != String { + return "", fmt.Errorf("Value is not a string: %s", string(v)) + } + + // If no escapes return raw content + if bytes.IndexByte(v, '\\') == -1 { + return string(v), nil + } + + return ParseString(v) +} + +// GetFloat returns the value retrieved by `Get`, cast to a float64 if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return an error. +func GetFloat(data []byte, keys ...string) (val float64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseFloat(v) +} + +// GetInt returns the value retrieved by `Get`, cast to a int64 if possible. +// If key data type do not match, it will return an error. +func GetInt(data []byte, keys ...string) (val int64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseInt(v) +} + +// GetBoolean returns the value retrieved by `Get`, cast to a bool if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return error. +func GetBoolean(data []byte, keys ...string) (val bool, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return false, e + } + + if t != Boolean { + return false, fmt.Errorf("Value is not a boolean: %s", string(v)) + } + + return ParseBoolean(v) +} + +// ParseBoolean parses a Boolean ValueType into a Go bool (not particularly useful, but here for completeness) +func ParseBoolean(b []byte) (bool, error) { + switch { + case bytes.Equal(b, trueLiteral): + return true, nil + case bytes.Equal(b, falseLiteral): + return false, nil + default: + return false, MalformedValueError + } +} + +// ParseString parses a String ValueType into a Go string (the main parsing work is unescaping the JSON string) +func ParseString(b []byte) (string, error) { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if bU, err := Unescape(b, stackbuf[:]); err != nil { + return "", MalformedValueError + } else { + return string(bU), nil + } +} + +// ParseNumber parses a Number ValueType into a Go float64 +func ParseFloat(b []byte) (float64, error) { + if v, err := parseFloat(&b); err != nil { + return 0, MalformedValueError + } else { + return v, nil + } +} + +// ParseInt parses a Number ValueType into a Go int64 +func ParseInt(b []byte) (int64, error) { + if v, ok, overflow := parseInt(b); !ok { + if overflow { + return 0, OverflowIntegerError + } + return 0, MalformedValueError + } else { + return v, nil + } +} diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go index a709dbe..2a8f4a3 100644 --- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go +++ b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go @@ -30,7 +30,7 @@ type CloudConfigSpec struct { // RunnerInstallTemplate can be used to override the default runner install template. // If used, the caller is responsible for the correctness of the template as well as the // suitability of the template for the target OS. - RunnerInstallTemplate []byte `json:"runner_install_template"` + RunnerInstallTemplate []byte `json:"runner_install_template,omitempty" jsonschema:"title=default runner install template,description=This option can be used to override the default runner install template. If used, the caller is responsible for the correctness of the template as well as the suitability of the template for the target OS. Use the extra_context extra spec if your template has variables in it that need to be expanded."` // PreInstallScripts is a map of pre-install scripts that will be run before the // runner install script. These will run as root and can be used to prep a generic image // before we attempt to install the runner. The key of the map is the name of the script @@ -44,9 +44,9 @@ type CloudConfigSpec struct { // so it's up to the user what they upload here. // Caution needs to be exercised when using this feature, as the total size of userdata is limited // on most providers. - PreInstallScripts map[string][]byte `json:"pre_install_scripts"` + PreInstallScripts map[string][]byte `json:"pre_install_scripts,omitempty" jsonschema:"title=pre-install scripts,description= map of pre-install scripts that will be run before the runner install script. These will run as root and can be used to prep a generic image before we attempt to install the runner. The key of the map is the name of the script as it will be written to disk. The value is a byte array with the contents of the script."` // ExtraContext is a map of extra context that will be passed to the runner install template. - ExtraContext map[string]string `json:"extra_context"` + ExtraContext map[string]string `json:"extra_context,omitempty" jsonschema:"title=map of extra context,description=Extra context that will be passed to the runner_install_template."` } func sortMapKeys(m map[string][]byte) []string { diff --git a/vendor/github.com/invopop/jsonschema/.gitignore b/vendor/github.com/invopop/jsonschema/.gitignore new file mode 100644 index 0000000..8ef0e14 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/.gitignore @@ -0,0 +1,2 @@ +vendor/ +.idea/ diff --git a/vendor/github.com/invopop/jsonschema/.golangci.yml b/vendor/github.com/invopop/jsonschema/.golangci.yml new file mode 100644 index 0000000..3dac8a3 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/.golangci.yml @@ -0,0 +1,70 @@ +run: + tests: true + max-same-issues: 50 + skip-dirs: + - resources + - old + skip-files: + - cmd/protopkg/main.go + +output: + print-issued-lines: false + +linters: + enable: + - gocyclo + - gocritic + - goconst + - dupl + - unconvert + - goimports + - unused + - vetshadow + - nakedret + - errcheck + - revive + - ineffassign + - goconst + - vet + - unparam + - gofmt + +linters-settings: + vet: + check-shadowing: true + use-installed-packages: true + dupl: + threshold: 100 + goconst: + min-len: 8 + min-occurrences: 3 + gocyclo: + min-complexity: 20 + gocritic: + disabled-checks: + - ifElseChain + gofmt: + rewrite-rules: + - pattern: 'interface{}' + replacement: 'any' + - pattern: 'a[b:len(a)]' + replacement: 'a[b:]' + +issues: + max-per-linter: 0 + max-same: 0 + exclude-use-default: false + exclude: + # Captured by errcheck. + - "^(G104|G204):" + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*Print(f|ln|)|os\.(Un)?Setenv). is not checked' + # Weird error only seen on Kochiku... + - "internal error: no range for" + - 'exported method `.*\.(MarshalJSON|UnmarshalJSON|URN|Payload|GoString|Close|Provides|Requires|ExcludeFromHash|MarshalText|UnmarshalText|Description|Check|Poll|Severity)` should have comment or be unexported' + - "composite literal uses unkeyed fields" + - 'declaration of "err" shadows declaration' + - "by other packages, and that stutters" + - "Potential file inclusion via variable" + - "at least one file in a package should have a package comment" + - "bad syntax for struct tag pair" diff --git a/vendor/github.com/invopop/jsonschema/COPYING b/vendor/github.com/invopop/jsonschema/COPYING new file mode 100644 index 0000000..2993ec0 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/invopop/jsonschema/README.md b/vendor/github.com/invopop/jsonschema/README.md new file mode 100644 index 0000000..1a68a09 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/README.md @@ -0,0 +1,373 @@ +# Go JSON Schema Reflection + +[![Lint](https://github.com/invopop/jsonschema/actions/workflows/lint.yaml/badge.svg)](https://github.com/invopop/jsonschema/actions/workflows/lint.yaml) +[![Test Go](https://github.com/invopop/jsonschema/actions/workflows/test.yaml/badge.svg)](https://github.com/invopop/jsonschema/actions/workflows/test.yaml) +[![Go Report Card](https://goreportcard.com/badge/github.com/invopop/jsonschema)](https://goreportcard.com/report/github.com/invopop/jsonschema) +[![GoDoc](https://godoc.org/github.com/invopop/jsonschema?status.svg)](https://godoc.org/github.com/invopop/jsonschema) +![Latest Tag](https://img.shields.io/github/v/tag/invopop/jsonschema) + +This package can be used to generate [JSON Schemas](http://json-schema.org/latest/json-schema-validation.html) from Go types through reflection. + +- Supports arbitrarily complex types, including `interface{}`, maps, slices, etc. +- Supports json-schema features such as minLength, maxLength, pattern, format, etc. +- Supports simple string and numeric enums. +- Supports custom property fields via the `jsonschema_extras` struct tag. + +This repository is a fork of the original [jsonschema](https://github.com/alecthomas/jsonschema) by [@alecthomas](https://github.com/alecthomas). At [Invopop](https://invopop.com) we use jsonschema as a cornerstone in our [GOBL library](https://github.com/invopop/gobl), and wanted to be able to continue building and adding features without taking up Alec's time. There have been a few significant changes that probably mean this version is a not compatible with with Alec's: + +- The original was stuck on the draft-04 version of JSON Schema, we've now moved to the latest JSON Schema Draft 2020-12. +- Schema IDs are added automatically from the current Go package's URL in order to be unique, and can be disabled with the `Anonymous` option. +- Support for the `FullyQualifyTypeName` option has been removed. If you have conflicts, you should use multiple schema files with different IDs, set the `DoNotReference` option to true to hide definitions completely, or add your own naming strategy using the `Namer` property. +- Support for `yaml` tags and related options has been dropped for the sake of simplification. There were a [few inconsistencies](https://github.com/invopop/jsonschema/pull/21) around this that have now been fixed. + +## Versions + +This project is still under v0 scheme, as per Go convention, breaking changes are likely. Please pin go modules to version tags or branches, and reach out if you think something can be improved. + +Go version >= 1.18 is required as generics are now being used. + +## Example + +The following Go type: + +```go +type TestUser struct { + ID int `json:"id"` + Name string `json:"name" jsonschema:"title=the name,description=The name of a friend,example=joe,example=lucy,default=alex"` + Friends []int `json:"friends,omitempty" jsonschema_description:"The list of IDs, omitted when empty"` + Tags map[string]interface{} `json:"tags,omitempty" jsonschema_extras:"a=b,foo=bar,foo=bar1"` + BirthDate time.Time `json:"birth_date,omitempty" jsonschema:"oneof_required=date"` + YearOfBirth string `json:"year_of_birth,omitempty" jsonschema:"oneof_required=year"` + Metadata interface{} `json:"metadata,omitempty" jsonschema:"oneof_type=string;array"` + FavColor string `json:"fav_color,omitempty" jsonschema:"enum=red,enum=green,enum=blue"` +} +``` + +Results in following JSON Schema: + +```go +jsonschema.Reflect(&TestUser{}) +``` + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/invopop/jsonschema_test/sample-user", + "$ref": "#/$defs/SampleUser", + "$defs": { + "SampleUser": { + "oneOf": [ + { + "required": ["birth_date"], + "title": "date" + }, + { + "required": ["year_of_birth"], + "title": "year" + } + ], + "properties": { + "id": { + "type": "integer" + }, + "name": { + "type": "string", + "title": "the name", + "description": "The name of a friend", + "default": "alex", + "examples": ["joe", "lucy"] + }, + "friends": { + "items": { + "type": "integer" + }, + "type": "array", + "description": "The list of IDs, omitted when empty" + }, + "tags": { + "type": "object", + "a": "b", + "foo": ["bar", "bar1"] + }, + "birth_date": { + "type": "string", + "format": "date-time" + }, + "year_of_birth": { + "type": "string" + }, + "metadata": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array" + } + ] + }, + "fav_color": { + "type": "string", + "enum": ["red", "green", "blue"] + } + }, + "additionalProperties": false, + "type": "object", + "required": ["id", "name"] + } + } +} +``` + +## YAML + +Support for `yaml` tags has now been removed. If you feel very strongly about this, we've opened a discussion to hear your comments: https://github.com/invopop/jsonschema/discussions/28 + +The recommended approach if you need to deal with YAML data is to first convert to JSON. The [invopop/yaml](https://github.com/invopop/yaml) library will make this trivial. + +## Configurable behaviour + +The behaviour of the schema generator can be altered with parameters when a `jsonschema.Reflector` +instance is created. + +### ExpandedStruct + +If set to `true`, makes the top level struct not to reference itself in the definitions. But type passed should be a struct type. + +eg. + +```go +type GrandfatherType struct { + FamilyName string `json:"family_name" jsonschema:"required"` +} + +type SomeBaseType struct { + SomeBaseProperty int `json:"some_base_property"` + // The jsonschema required tag is nonsensical for private and ignored properties. + // Their presence here tests that the fields *will not* be required in the output + // schema, even if they are tagged required. + somePrivateBaseProperty string `json:"i_am_private" jsonschema:"required"` + SomeIgnoredBaseProperty string `json:"-" jsonschema:"required"` + SomeSchemaIgnoredProperty string `jsonschema:"-,required"` + SomeUntaggedBaseProperty bool `jsonschema:"required"` + someUnexportedUntaggedBaseProperty bool + Grandfather GrandfatherType `json:"grand"` +} +``` + +will output: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "required": ["some_base_property", "grand", "SomeUntaggedBaseProperty"], + "properties": { + "SomeUntaggedBaseProperty": { + "type": "boolean" + }, + "grand": { + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/definitions/GrandfatherType" + }, + "some_base_property": { + "type": "integer" + } + }, + "type": "object", + "$defs": { + "GrandfatherType": { + "required": ["family_name"], + "properties": { + "family_name": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + } + } +} +``` + +### Using Go Comments + +Writing a good schema with descriptions inside tags can become cumbersome and tedious, especially if you already have some Go comments around your types and field definitions. If you'd like to take advantage of these existing comments, you can use the `AddGoComments(base, path string)` method that forms part of the reflector to parse your go files and automatically generate a dictionary of Go import paths, types, and fields, to individual comments. These will then be used automatically as description fields, and can be overridden with a manual definition if needed. + +Take a simplified example of a User struct which for the sake of simplicity we assume is defined inside this package: + +```go +package main + +// User is used as a base to provide tests for comments. +type User struct { + // Unique sequential identifier. + ID int `json:"id" jsonschema:"required"` + // Name of the user + Name string `json:"name"` +} +``` + +To get the comments provided into your JSON schema, use a regular `Reflector` and add the go code using an import module URL and path. Fully qualified go module paths cannot be determined reliably by the `go/parser` library, so we need to introduce this manually: + +```go +r := new(Reflector) +if err := r.AddGoComments("github.com/invopop/jsonschema", "./"); err != nil { + // deal with error +} +s := r.Reflect(&User{}) +// output +``` + +Expect the results to be similar to: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/User", + "$defs": { + "User": { + "required": ["id"], + "properties": { + "id": { + "type": "integer", + "description": "Unique sequential identifier." + }, + "name": { + "type": "string", + "description": "Name of the user" + } + }, + "additionalProperties": false, + "type": "object", + "description": "User is used as a base to provide tests for comments." + } + } +} +``` + +### Custom Key Naming + +In some situations, the keys actually used to write files are different from Go structs'. + +This is often the case when writing a configuration file to YAML or JSON from a Go struct, or when returning a JSON response for a Web API: APIs typically use snake_case, while Go uses PascalCase. + +You can pass a `func(string) string` function to `Reflector`'s `KeyNamer` option to map Go field names to JSON key names and reflect the aforementioned transformations, without having to specify `json:"..."` on every struct field. + +For example, consider the following struct + +```go +type User struct { + GivenName string + PasswordSalted []byte `json:"salted_password"` +} +``` + +We can transform field names to snake_case in the generated JSON schema: + +```go +r := new(jsonschema.Reflector) +r.KeyNamer = strcase.SnakeCase // from package github.com/stoewer/go-strcase + +r.Reflect(&User{}) +``` + +Will yield + +```diff + { + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/User", + "$defs": { + "User": { + "properties": { +- "GivenName": { ++ "given_name": { + "type": "string" + }, + "salted_password": { + "type": "string", + "contentEncoding": "base64" + } + }, + "additionalProperties": false, + "type": "object", +- "required": ["GivenName", "salted_password"] ++ "required": ["given_name", "salted_password"] + } + } + } +``` + +As you can see, if a field name has a `json:""` tag set, the `key` argument to `KeyNamer` will have the value of that tag. + +### Custom Type Definitions + +Sometimes it can be useful to have custom JSON Marshal and Unmarshal methods in your structs that automatically convert for example a string into an object. + +This library will recognize and attempt to call four different methods that help you adjust schemas to your specific needs: + +- `JSONSchema() *Schema` - will prevent auto-generation of the schema so that you can provide your own definition. +- `JSONSchemaExtend(schema *jsonschema.Schema)` - will be called _after_ the schema has been generated, allowing you to add or manipulate the fields easily. +- `JSONSchemaAlias() any` - is called when reflecting the type of object and allows for an alternative to be used instead. +- `JSONSchemaProperty(prop string) any` - will be called for every property inside a struct giving you the chance to provide an alternative object to convert into a schema. + +Note that all of these methods **must** be defined on a non-pointer object for them to be called. + +Take the following simplified example of a `CompactDate` that only includes the Year and Month: + +```go +type CompactDate struct { + Year int + Month int +} + +func (d *CompactDate) UnmarshalJSON(data []byte) error { + if len(data) != 9 { + return errors.New("invalid compact date length") + } + var err error + d.Year, err = strconv.Atoi(string(data[1:5])) + if err != nil { + return err + } + d.Month, err = strconv.Atoi(string(data[7:8])) + if err != nil { + return err + } + return nil +} + +func (d *CompactDate) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteByte('"') + buf.WriteString(fmt.Sprintf("%d-%02d", d.Year, d.Month)) + buf.WriteByte('"') + return buf.Bytes(), nil +} + +func (CompactDate) JSONSchema() *Schema { + return &Schema{ + Type: "string", + Title: "Compact Date", + Description: "Short date that only includes year and month", + Pattern: "^[0-9]{4}-[0-1][0-9]$", + } +} +``` + +The resulting schema generated for this struct would look like: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/CompactDate", + "$defs": { + "CompactDate": { + "pattern": "^[0-9]{4}-[0-1][0-9]$", + "type": "string", + "title": "Compact Date", + "description": "Short date that only includes year and month" + } + } +} +``` diff --git a/vendor/github.com/invopop/jsonschema/comment_extractor.go b/vendor/github.com/invopop/jsonschema/comment_extractor.go new file mode 100644 index 0000000..e157837 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/comment_extractor.go @@ -0,0 +1,93 @@ +package jsonschema + +import ( + "fmt" + "io/fs" + gopath "path" + "path/filepath" + "strings" + + "go/ast" + "go/doc" + "go/parser" + "go/token" +) + +// ExtractGoComments will read all the go files contained in the provided path, +// including sub-directories, in order to generate a dictionary of comments +// associated with Types and Fields. The results will be added to the `commentsMap` +// provided in the parameters and expected to be used for Schema "description" fields. +// +// The `go/parser` library is used to extract all the comments and unfortunately doesn't +// have a built-in way to determine the fully qualified name of a package. The `base` paremeter, +// the URL used to import that package, is thus required to be able to match reflected types. +// +// When parsing type comments, we use the `go/doc`'s Synopsis method to extract the first phrase +// only. Field comments, which tend to be much shorter, will include everything. +func ExtractGoComments(base, path string, commentMap map[string]string) error { + fset := token.NewFileSet() + dict := make(map[string][]*ast.Package) + err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + d, err := parser.ParseDir(fset, path, nil, parser.ParseComments) + if err != nil { + return err + } + for _, v := range d { + // paths may have multiple packages, like for tests + k := gopath.Join(base, path) + dict[k] = append(dict[k], v) + } + } + return nil + }) + if err != nil { + return err + } + + for pkg, p := range dict { + for _, f := range p { + gtxt := "" + typ := "" + ast.Inspect(f, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.TypeSpec: + typ = x.Name.String() + if !ast.IsExported(typ) { + typ = "" + } else { + txt := x.Doc.Text() + if txt == "" && gtxt != "" { + txt = gtxt + gtxt = "" + } + txt = doc.Synopsis(txt) + commentMap[fmt.Sprintf("%s.%s", pkg, typ)] = strings.TrimSpace(txt) + } + case *ast.Field: + txt := x.Doc.Text() + if txt == "" { + txt = x.Comment.Text() + } + if typ != "" && txt != "" { + for _, n := range x.Names { + if ast.IsExported(n.String()) { + k := fmt.Sprintf("%s.%s.%s", pkg, typ, n) + commentMap[k] = strings.TrimSpace(txt) + } + } + } + case *ast.GenDecl: + // remember for the next type + gtxt = x.Doc.Text() + } + return true + }) + } + } + + return nil +} diff --git a/vendor/github.com/invopop/jsonschema/id.go b/vendor/github.com/invopop/jsonschema/id.go new file mode 100644 index 0000000..73fafb3 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/id.go @@ -0,0 +1,76 @@ +package jsonschema + +import ( + "errors" + "fmt" + "net/url" + "strings" +) + +// ID represents a Schema ID type which should always be a URI. +// See draft-bhutton-json-schema-00 section 8.2.1 +type ID string + +// EmptyID is used to explicitly define an ID with no value. +const EmptyID ID = "" + +// Validate is used to check if the ID looks like a proper schema. +// This is done by parsing the ID as a URL and checking it has all the +// relevant parts. +func (id ID) Validate() error { + u, err := url.Parse(id.String()) + if err != nil { + return fmt.Errorf("invalid URL: %w", err) + } + if u.Hostname() == "" { + return errors.New("missing hostname") + } + if !strings.Contains(u.Hostname(), ".") { + return errors.New("hostname does not look valid") + } + if u.Path == "" { + return errors.New("path is expected") + } + if u.Scheme != "https" && u.Scheme != "http" { + return errors.New("unexpected schema") + } + return nil +} + +// Anchor sets the anchor part of the schema URI. +func (id ID) Anchor(name string) ID { + b := id.Base() + return ID(b.String() + "#" + name) +} + +// Def adds or replaces a definition identifier. +func (id ID) Def(name string) ID { + b := id.Base() + return ID(b.String() + "#/$defs/" + name) +} + +// Add appends the provided path to the id, and removes any +// anchor data that might be there. +func (id ID) Add(path string) ID { + b := id.Base() + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return ID(b.String() + path) +} + +// Base removes any anchor information from the schema +func (id ID) Base() ID { + s := id.String() + i := strings.LastIndex(s, "#") + if i != -1 { + s = s[0:i] + } + s = strings.TrimRight(s, "/") + return ID(s) +} + +// String provides string version of ID +func (id ID) String() string { + return string(id) +} diff --git a/vendor/github.com/invopop/jsonschema/reflect.go b/vendor/github.com/invopop/jsonschema/reflect.go new file mode 100644 index 0000000..3249c8c --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/reflect.go @@ -0,0 +1,1150 @@ +// Package jsonschema uses reflection to generate JSON Schemas from Go types [1]. +// +// If json tags are present on struct fields, they will be used to infer +// property names and if a property is required (omitempty is present). +// +// [1] http://json-schema.org/latest/json-schema-validation.html +package jsonschema + +import ( + "bytes" + "encoding/json" + "net" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// customSchemaImpl is used to detect if the type provides it's own +// custom Schema Type definition to use instead. Very useful for situations +// where there are custom JSON Marshal and Unmarshal methods. +type customSchemaImpl interface { + JSONSchema() *Schema +} + +// Function to be run after the schema has been generated. +// this will let you modify a schema afterwards +type extendSchemaImpl interface { + JSONSchemaExtend(*Schema) +} + +// If the object to be reflected defines a `JSONSchemaAlias` method, its type will +// be used instead of the original type. +type aliasSchemaImpl interface { + JSONSchemaAlias() any +} + +// If an object to be reflected defines a `JSONSchemaPropertyAlias` method, +// it will be called for each property to determine if another object +// should be used for the contents. +type propertyAliasSchemaImpl interface { + JSONSchemaProperty(prop string) any +} + +var customAliasSchema = reflect.TypeOf((*aliasSchemaImpl)(nil)).Elem() +var customPropertyAliasSchema = reflect.TypeOf((*propertyAliasSchemaImpl)(nil)).Elem() + +var customType = reflect.TypeOf((*customSchemaImpl)(nil)).Elem() +var extendType = reflect.TypeOf((*extendSchemaImpl)(nil)).Elem() + +// customSchemaGetFieldDocString +type customSchemaGetFieldDocString interface { + GetFieldDocString(fieldName string) string +} + +type customGetFieldDocString func(fieldName string) string + +var customStructGetFieldDocString = reflect.TypeOf((*customSchemaGetFieldDocString)(nil)).Elem() + +// Reflect reflects to Schema from a value using the default Reflector +func Reflect(v any) *Schema { + return ReflectFromType(reflect.TypeOf(v)) +} + +// ReflectFromType generates root schema using the default Reflector +func ReflectFromType(t reflect.Type) *Schema { + r := &Reflector{} + return r.ReflectFromType(t) +} + +// A Reflector reflects values into a Schema. +type Reflector struct { + // BaseSchemaID defines the URI that will be used as a base to determine Schema + // IDs for models. For example, a base Schema ID of `https://invopop.com/schemas` + // when defined with a struct called `User{}`, will result in a schema with an + // ID set to `https://invopop.com/schemas/user`. + // + // If no `BaseSchemaID` is provided, we'll take the type's complete package path + // and use that as a base instead. Set `Anonymous` to try if you do not want to + // include a schema ID. + BaseSchemaID ID + + // Anonymous when true will hide the auto-generated Schema ID and provide what is + // known as an "anonymous schema". As a rule, this is not recommended. + Anonymous bool + + // AssignAnchor when true will use the original struct's name as an anchor inside + // every definition, including the root schema. These can be useful for having a + // reference to the original struct's name in CamelCase instead of the snake-case used + // by default for URI compatibility. + // + // Anchors do not appear to be widely used out in the wild, so at this time the + // anchors themselves will not be used inside generated schema. + AssignAnchor bool + + // AllowAdditionalProperties will cause the Reflector to generate a schema + // without additionalProperties set to 'false' for all struct types. This means + // the presence of additional keys in JSON objects will not cause validation + // to fail. Note said additional keys will simply be dropped when the + // validated JSON is unmarshaled. + AllowAdditionalProperties bool + + // RequiredFromJSONSchemaTags will cause the Reflector to generate a schema + // that requires any key tagged with `jsonschema:required`, overriding the + // default of requiring any key *not* tagged with `json:,omitempty`. + RequiredFromJSONSchemaTags bool + + // Do not reference definitions. This will remove the top-level $defs map and + // instead cause the entire structure of types to be output in one tree. The + // list of type definitions (`$defs`) will not be included. + DoNotReference bool + + // ExpandedStruct when true will include the reflected type's definition in the + // root as opposed to a definition with a reference. + ExpandedStruct bool + + // FieldNameTag will change the tag used to get field names. json tags are used by default. + FieldNameTag string + + // IgnoredTypes defines a slice of types that should be ignored in the schema, + // switching to just allowing additional properties instead. + IgnoredTypes []any + + // Lookup allows a function to be defined that will provide a custom mapping of + // types to Schema IDs. This allows existing schema documents to be referenced + // by their ID instead of being embedded into the current schema definitions. + // Reflected types will never be pointers, only underlying elements. + Lookup func(reflect.Type) ID + + // Mapper is a function that can be used to map custom Go types to jsonschema schemas. + Mapper func(reflect.Type) *Schema + + // Namer allows customizing of type names. The default is to use the type's name + // provided by the reflect package. + Namer func(reflect.Type) string + + // KeyNamer allows customizing of key names. + // The default is to use the key's name as is, or the json tag if present. + // If a json tag is present, KeyNamer will receive the tag's name as an argument, not the original key name. + KeyNamer func(string) string + + // AdditionalFields allows adding structfields for a given type + AdditionalFields func(reflect.Type) []reflect.StructField + + // CommentMap is a dictionary of fully qualified go types and fields to comment + // strings that will be used if a description has not already been provided in + // the tags. Types and fields are added to the package path using "." as a + // separator. + // + // Type descriptions should be defined like: + // + // map[string]string{"github.com/invopop/jsonschema.Reflector": "A Reflector reflects values into a Schema."} + // + // And Fields defined as: + // + // map[string]string{"github.com/invopop/jsonschema.Reflector.DoNotReference": "Do not reference definitions."} + // + // See also: AddGoComments + CommentMap map[string]string +} + +// Reflect reflects to Schema from a value. +func (r *Reflector) Reflect(v any) *Schema { + return r.ReflectFromType(reflect.TypeOf(v)) +} + +// ReflectFromType generates root schema +func (r *Reflector) ReflectFromType(t reflect.Type) *Schema { + if t.Kind() == reflect.Ptr { + t = t.Elem() // re-assign from pointer + } + + name := r.typeName(t) + + s := new(Schema) + definitions := Definitions{} + s.Definitions = definitions + bs := r.reflectTypeToSchemaWithID(definitions, t) + if r.ExpandedStruct { + *s = *definitions[name] + delete(definitions, name) + } else { + *s = *bs + } + + // Attempt to set the schema ID + if !r.Anonymous && s.ID == EmptyID { + baseSchemaID := r.BaseSchemaID + if baseSchemaID == EmptyID { + id := ID("https://" + t.PkgPath()) + if err := id.Validate(); err == nil { + // it's okay to silently ignore URL errors + baseSchemaID = id + } + } + if baseSchemaID != EmptyID { + s.ID = baseSchemaID.Add(ToSnakeCase(name)) + } + } + + s.Version = Version + if !r.DoNotReference { + s.Definitions = definitions + } + + return s +} + +// Available Go defined types for JSON Schema Validation. +// RFC draft-wright-json-schema-validation-00, section 7.3 +var ( + timeType = reflect.TypeOf(time.Time{}) // date-time RFC section 7.3.1 + ipType = reflect.TypeOf(net.IP{}) // ipv4 and ipv6 RFC section 7.3.4, 7.3.5 + uriType = reflect.TypeOf(url.URL{}) // uri RFC section 7.3.6 +) + +// Byte slices will be encoded as base64 +var byteSliceType = reflect.TypeOf([]byte(nil)) + +// Except for json.RawMessage +var rawMessageType = reflect.TypeOf(json.RawMessage{}) + +// Go code generated from protobuf enum types should fulfil this interface. +type protoEnum interface { + EnumDescriptor() ([]byte, []int) +} + +var protoEnumType = reflect.TypeOf((*protoEnum)(nil)).Elem() + +// SetBaseSchemaID is a helper use to be able to set the reflectors base +// schema ID from a string as opposed to then ID instance. +func (r *Reflector) SetBaseSchemaID(id string) { + r.BaseSchemaID = ID(id) +} + +func (r *Reflector) refOrReflectTypeToSchema(definitions Definitions, t reflect.Type) *Schema { + id := r.lookupID(t) + if id != EmptyID { + return &Schema{ + Ref: id.String(), + } + } + + // Already added to definitions? + if def := r.refDefinition(definitions, t); def != nil { + return def + } + + return r.reflectTypeToSchemaWithID(definitions, t) +} + +func (r *Reflector) reflectTypeToSchemaWithID(defs Definitions, t reflect.Type) *Schema { + s := r.reflectTypeToSchema(defs, t) + if s != nil { + if r.Lookup != nil { + id := r.Lookup(t) + if id != EmptyID { + s.ID = id + } + } + } + return s +} + +func (r *Reflector) reflectTypeToSchema(definitions Definitions, t reflect.Type) *Schema { + // only try to reflect non-pointers + if t.Kind() == reflect.Ptr { + return r.refOrReflectTypeToSchema(definitions, t.Elem()) + } + + // Check if the there is an alias method that provides an object + // that we should use instead of this one. + if t.Implements(customAliasSchema) { + v := reflect.New(t) + o := v.Interface().(aliasSchemaImpl) + t = reflect.TypeOf(o.JSONSchemaAlias()) + return r.refOrReflectTypeToSchema(definitions, t) + } + + // Do any pre-definitions exist? + if r.Mapper != nil { + if t := r.Mapper(t); t != nil { + return t + } + } + if rt := r.reflectCustomSchema(definitions, t); rt != nil { + return rt + } + + // Prepare a base to which details can be added + st := new(Schema) + + // jsonpb will marshal protobuf enum options as either strings or integers. + // It will unmarshal either. + if t.Implements(protoEnumType) { + st.OneOf = []*Schema{ + {Type: "string"}, + {Type: "integer"}, + } + return st + } + + // Defined format types for JSON Schema Validation + // RFC draft-wright-json-schema-validation-00, section 7.3 + // TODO email RFC section 7.3.2, hostname RFC section 7.3.3, uriref RFC section 7.3.7 + if t == ipType { + // TODO differentiate ipv4 and ipv6 RFC section 7.3.4, 7.3.5 + st.Type = "string" + st.Format = "ipv4" + return st + } + + switch t.Kind() { + case reflect.Struct: + r.reflectStruct(definitions, t, st) + + case reflect.Slice, reflect.Array: + r.reflectSliceOrArray(definitions, t, st) + + case reflect.Map: + r.reflectMap(definitions, t, st) + + case reflect.Interface: + // empty + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + st.Type = "integer" + + case reflect.Float32, reflect.Float64: + st.Type = "number" + + case reflect.Bool: + st.Type = "boolean" + + case reflect.String: + st.Type = "string" + + default: + panic("unsupported type " + t.String()) + } + + r.reflectSchemaExtend(definitions, t, st) + + // Always try to reference the definition which may have just been created + if def := r.refDefinition(definitions, t); def != nil { + return def + } + + return st +} + +func (r *Reflector) reflectCustomSchema(definitions Definitions, t reflect.Type) *Schema { + if t.Kind() == reflect.Ptr { + return r.reflectCustomSchema(definitions, t.Elem()) + } + + if t.Implements(customType) { + v := reflect.New(t) + o := v.Interface().(customSchemaImpl) + st := o.JSONSchema() + r.addDefinition(definitions, t, st) + if ref := r.refDefinition(definitions, t); ref != nil { + return ref + } + return st + } + + return nil +} + +func (r *Reflector) reflectSchemaExtend(definitions Definitions, t reflect.Type, s *Schema) *Schema { + if t.Implements(extendType) { + v := reflect.New(t) + o := v.Interface().(extendSchemaImpl) + o.JSONSchemaExtend(s) + if ref := r.refDefinition(definitions, t); ref != nil { + return ref + } + } + + return s +} + +func (r *Reflector) reflectSliceOrArray(definitions Definitions, t reflect.Type, st *Schema) { + if t == rawMessageType { + return + } + + r.addDefinition(definitions, t, st) + + if st.Description == "" { + st.Description = r.lookupComment(t, "") + } + + if t.Kind() == reflect.Array { + l := uint64(t.Len()) + st.MinItems = &l + st.MaxItems = &l + } + if t.Kind() == reflect.Slice && t.Elem() == byteSliceType.Elem() { + st.Type = "string" + // NOTE: ContentMediaType is not set here + st.ContentEncoding = "base64" + } else { + st.Type = "array" + st.Items = r.refOrReflectTypeToSchema(definitions, t.Elem()) + } +} + +func (r *Reflector) reflectMap(definitions Definitions, t reflect.Type, st *Schema) { + r.addDefinition(definitions, t, st) + + st.Type = "object" + if st.Description == "" { + st.Description = r.lookupComment(t, "") + } + + switch t.Key().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + st.PatternProperties = map[string]*Schema{ + "^[0-9]+$": r.refOrReflectTypeToSchema(definitions, t.Elem()), + } + st.AdditionalProperties = FalseSchema + return + } + if t.Elem().Kind() != reflect.Interface { + st.AdditionalProperties = r.refOrReflectTypeToSchema(definitions, t.Elem()) + } +} + +// Reflects a struct to a JSON Schema type. +func (r *Reflector) reflectStruct(definitions Definitions, t reflect.Type, s *Schema) { + // Handle special types + switch t { + case timeType: // date-time RFC section 7.3.1 + s.Type = "string" + s.Format = "date-time" + return + case uriType: // uri RFC section 7.3.6 + s.Type = "string" + s.Format = "uri" + return + } + + r.addDefinition(definitions, t, s) + s.Type = "object" + s.Properties = NewProperties() + s.Description = r.lookupComment(t, "") + if r.AssignAnchor { + s.Anchor = t.Name() + } + if !r.AllowAdditionalProperties && s.AdditionalProperties == nil { + s.AdditionalProperties = FalseSchema + } + + ignored := false + for _, it := range r.IgnoredTypes { + if reflect.TypeOf(it) == t { + ignored = true + break + } + } + if !ignored { + r.reflectStructFields(s, definitions, t) + } +} + +func (r *Reflector) reflectStructFields(st *Schema, definitions Definitions, t reflect.Type) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + + var getFieldDocString customGetFieldDocString + if t.Implements(customStructGetFieldDocString) { + v := reflect.New(t) + o := v.Interface().(customSchemaGetFieldDocString) + getFieldDocString = o.GetFieldDocString + } + + customPropertyMethod := func(string) any { + return nil + } + if t.Implements(customPropertyAliasSchema) { + v := reflect.New(t) + o := v.Interface().(propertyAliasSchemaImpl) + customPropertyMethod = o.JSONSchemaProperty + } + + handleField := func(f reflect.StructField) { + name, shouldEmbed, required, nullable := r.reflectFieldName(f) + // if anonymous and exported type should be processed recursively + // current type should inherit properties of anonymous one + if name == "" { + if shouldEmbed { + r.reflectStructFields(st, definitions, f.Type) + } + return + } + + // If a JSONSchemaAlias(prop string) method is defined, attempt to use + // the provided object's type instead of the field's type. + var property *Schema + if alias := customPropertyMethod(name); alias != nil { + property = r.refOrReflectTypeToSchema(definitions, reflect.TypeOf(alias)) + } else { + property = r.refOrReflectTypeToSchema(definitions, f.Type) + } + + property.structKeywordsFromTags(f, st, name) + if property.Description == "" { + property.Description = r.lookupComment(t, f.Name) + } + if getFieldDocString != nil { + property.Description = getFieldDocString(f.Name) + } + + if nullable { + property = &Schema{ + OneOf: []*Schema{ + property, + { + Type: "null", + }, + }, + } + } + + st.Properties.Set(name, property) + if required { + st.Required = appendUniqueString(st.Required, name) + } + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + handleField(f) + } + if r.AdditionalFields != nil { + if af := r.AdditionalFields(t); af != nil { + for _, sf := range af { + handleField(sf) + } + } + } +} + +func appendUniqueString(base []string, value string) []string { + for _, v := range base { + if v == value { + return base + } + } + return append(base, value) +} + +func (r *Reflector) lookupComment(t reflect.Type, name string) string { + if r.CommentMap == nil { + return "" + } + + n := fullyQualifiedTypeName(t) + if name != "" { + n = n + "." + name + } + + return r.CommentMap[n] +} + +// addDefinition will append the provided schema. If needed, an ID and anchor will also be added. +func (r *Reflector) addDefinition(definitions Definitions, t reflect.Type, s *Schema) { + name := r.typeName(t) + if name == "" { + return + } + definitions[name] = s +} + +// refDefinition will provide a schema with a reference to an existing definition. +func (r *Reflector) refDefinition(definitions Definitions, t reflect.Type) *Schema { + if r.DoNotReference { + return nil + } + name := r.typeName(t) + if name == "" { + return nil + } + if _, ok := definitions[name]; !ok { + return nil + } + return &Schema{ + Ref: "#/$defs/" + name, + } +} + +func (r *Reflector) lookupID(t reflect.Type) ID { + if r.Lookup != nil { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return r.Lookup(t) + + } + return EmptyID +} + +func (t *Schema) structKeywordsFromTags(f reflect.StructField, parent *Schema, propertyName string) { + t.Description = f.Tag.Get("jsonschema_description") + + tags := splitOnUnescapedCommas(f.Tag.Get("jsonschema")) + tags = t.genericKeywords(tags, parent, propertyName) + + switch t.Type { + case "string": + t.stringKeywords(tags) + case "number": + t.numericalKeywords(tags) + case "integer": + t.numericalKeywords(tags) + case "array": + t.arrayKeywords(tags) + case "boolean": + t.booleanKeywords(tags) + } + extras := strings.Split(f.Tag.Get("jsonschema_extras"), ",") + t.extraKeywords(extras) +} + +// read struct tags for generic keywords +func (t *Schema) genericKeywords(tags []string, parent *Schema, propertyName string) []string { //nolint:gocyclo + unprocessed := make([]string, 0, len(tags)) + for _, tag := range tags { + nameValue := strings.SplitN(tag, "=", 2) + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "title": + t.Title = val + case "description": + t.Description = val + case "type": + t.Type = val + case "anchor": + t.Anchor = val + case "oneof_required": + var typeFound *Schema + for i := range parent.OneOf { + if parent.OneOf[i].Title == nameValue[1] { + typeFound = parent.OneOf[i] + } + } + if typeFound == nil { + typeFound = &Schema{ + Title: nameValue[1], + Required: []string{}, + } + parent.OneOf = append(parent.OneOf, typeFound) + } + typeFound.Required = append(typeFound.Required, propertyName) + case "anyof_required": + var typeFound *Schema + for i := range parent.AnyOf { + if parent.AnyOf[i].Title == nameValue[1] { + typeFound = parent.AnyOf[i] + } + } + if typeFound == nil { + typeFound = &Schema{ + Title: nameValue[1], + Required: []string{}, + } + parent.AnyOf = append(parent.AnyOf, typeFound) + } + typeFound.Required = append(typeFound.Required, propertyName) + case "oneof_ref": + subSchema := t + if t.Items != nil { + subSchema = t.Items + } + if subSchema.OneOf == nil { + subSchema.OneOf = make([]*Schema, 0, 1) + } + subSchema.Ref = "" + refs := strings.Split(nameValue[1], ";") + for _, r := range refs { + subSchema.OneOf = append(subSchema.OneOf, &Schema{ + Ref: r, + }) + } + case "oneof_type": + if t.OneOf == nil { + t.OneOf = make([]*Schema, 0, 1) + } + t.Type = "" + types := strings.Split(nameValue[1], ";") + for _, ty := range types { + t.OneOf = append(t.OneOf, &Schema{ + Type: ty, + }) + } + case "anyof_ref": + subSchema := t + if t.Items != nil { + subSchema = t.Items + } + if subSchema.AnyOf == nil { + subSchema.AnyOf = make([]*Schema, 0, 1) + } + subSchema.Ref = "" + refs := strings.Split(nameValue[1], ";") + for _, r := range refs { + subSchema.AnyOf = append(subSchema.AnyOf, &Schema{ + Ref: r, + }) + } + case "anyof_type": + if t.AnyOf == nil { + t.AnyOf = make([]*Schema, 0, 1) + } + t.Type = "" + types := strings.Split(nameValue[1], ";") + for _, ty := range types { + t.AnyOf = append(t.AnyOf, &Schema{ + Type: ty, + }) + } + default: + unprocessed = append(unprocessed, tag) + } + } + } + return unprocessed +} + +// read struct tags for boolean type keywords +func (t *Schema) booleanKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) != 2 { + continue + } + name, val := nameValue[0], nameValue[1] + if name == "default" { + if val == "true" { + t.Default = true + } else if val == "false" { + t.Default = false + } + } + } +} + +// read struct tags for string type keywords +func (t *Schema) stringKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.SplitN(tag, "=", 2) + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "minLength": + t.MinLength = parseUint(val) + case "maxLength": + t.MaxLength = parseUint(val) + case "pattern": + t.Pattern = val + case "format": + switch val { + case "date-time", "email", "hostname", "ipv4", "ipv6", "uri", "uuid": + t.Format = val + } + case "readOnly": + i, _ := strconv.ParseBool(val) + t.ReadOnly = i + case "writeOnly": + i, _ := strconv.ParseBool(val) + t.WriteOnly = i + case "default": + t.Default = val + case "example": + t.Examples = append(t.Examples, val) + case "enum": + t.Enum = append(t.Enum, val) + } + } + } +} + +// read struct tags for numerical type keywords +func (t *Schema) numericalKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "multipleOf": + t.MultipleOf, _ = toJSONNumber(val) + case "minimum": + t.Minimum, _ = toJSONNumber(val) + case "maximum": + t.Maximum, _ = toJSONNumber(val) + case "exclusiveMaximum": + t.ExclusiveMaximum, _ = toJSONNumber(val) + case "exclusiveMinimum": + t.ExclusiveMinimum, _ = toJSONNumber(val) + case "default": + if num, ok := toJSONNumber(val); ok { + t.Default = num + } + case "example": + if num, ok := toJSONNumber(val); ok { + t.Examples = append(t.Examples, num) + } + case "enum": + if num, ok := toJSONNumber(val); ok { + t.Enum = append(t.Enum, num) + } + } + } + } +} + +// read struct tags for object type keywords +// func (t *Type) objectKeywords(tags []string) { +// for _, tag := range tags{ +// nameValue := strings.Split(tag, "=") +// name, val := nameValue[0], nameValue[1] +// switch name{ +// case "dependencies": +// t.Dependencies = val +// break; +// case "patternProperties": +// t.PatternProperties = val +// break; +// } +// } +// } + +// read struct tags for array type keywords +func (t *Schema) arrayKeywords(tags []string) { + var defaultValues []any + + unprocessed := make([]string, 0, len(tags)) + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "minItems": + t.MinItems = parseUint(val) + case "maxItems": + t.MaxItems = parseUint(val) + case "uniqueItems": + t.UniqueItems = true + case "default": + defaultValues = append(defaultValues, val) + case "format": + t.Items.Format = val + case "pattern": + t.Items.Pattern = val + default: + unprocessed = append(unprocessed, tag) // left for further processing by underlying type + } + } + } + if len(defaultValues) > 0 { + t.Default = defaultValues + } + + if len(unprocessed) == 0 { + // we don't have anything else to process + return + } + + switch t.Items.Type { + case "string": + t.Items.stringKeywords(unprocessed) + case "number": + t.Items.numericalKeywords(unprocessed) + case "integer": + t.Items.numericalKeywords(unprocessed) + case "array": + // explicitly don't support traversal for the [][]..., as it's unclear where the array tags belong + case "boolean": + t.Items.booleanKeywords(unprocessed) + } +} + +func (t *Schema) extraKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.SplitN(tag, "=", 2) + if len(nameValue) == 2 { + t.setExtra(nameValue[0], nameValue[1]) + } + } +} + +func (t *Schema) setExtra(key, val string) { + if t.Extras == nil { + t.Extras = map[string]any{} + } + if existingVal, ok := t.Extras[key]; ok { + switch existingVal := existingVal.(type) { + case string: + t.Extras[key] = []string{existingVal, val} + case []string: + t.Extras[key] = append(existingVal, val) + case int: + t.Extras[key], _ = strconv.Atoi(val) + case bool: + t.Extras[key] = (val == "true" || val == "t") + } + } else { + switch key { + case "minimum": + t.Extras[key], _ = strconv.Atoi(val) + default: + var x any + if val == "true" { + x = true + } else if val == "false" { + x = false + } else { + x = val + } + t.Extras[key] = x + } + } +} + +func requiredFromJSONTags(tags []string, val *bool) { + if ignoredByJSONTags(tags) { + return + } + + for _, tag := range tags[1:] { + if tag == "omitempty" { + *val = false + return + } + } + *val = true +} + +func requiredFromJSONSchemaTags(tags []string, val *bool) { + if ignoredByJSONSchemaTags(tags) { + return + } + for _, tag := range tags { + if tag == "required" { + *val = true + } + } +} + +func nullableFromJSONSchemaTags(tags []string) bool { + if ignoredByJSONSchemaTags(tags) { + return false + } + for _, tag := range tags { + if tag == "nullable" { + return true + } + } + return false +} + +func ignoredByJSONTags(tags []string) bool { + return tags[0] == "-" +} + +func ignoredByJSONSchemaTags(tags []string) bool { + return tags[0] == "-" +} + +// toJSONNumber converts string to *json.Number. +// It'll aso return whether the number is valid. +func toJSONNumber(s string) (json.Number, bool) { + num := json.Number(s) + if _, err := num.Int64(); err == nil { + return num, true + } + if _, err := num.Float64(); err == nil { + return num, true + } + return json.Number(""), false +} + +func parseUint(num string) *uint64 { + val, err := strconv.ParseUint(num, 10, 64) + if err != nil { + return nil + } + return &val +} + +func (r *Reflector) fieldNameTag() string { + if r.FieldNameTag != "" { + return r.FieldNameTag + } + return "json" +} + +func (r *Reflector) reflectFieldName(f reflect.StructField) (string, bool, bool, bool) { + jsonTagString := f.Tag.Get(r.fieldNameTag()) + jsonTags := strings.Split(jsonTagString, ",") + + if ignoredByJSONTags(jsonTags) { + return "", false, false, false + } + + schemaTags := strings.Split(f.Tag.Get("jsonschema"), ",") + if ignoredByJSONSchemaTags(schemaTags) { + return "", false, false, false + } + + var required bool + if !r.RequiredFromJSONSchemaTags { + requiredFromJSONTags(jsonTags, &required) + } + requiredFromJSONSchemaTags(schemaTags, &required) + + nullable := nullableFromJSONSchemaTags(schemaTags) + + if f.Anonymous && jsonTags[0] == "" { + // As per JSON Marshal rules, anonymous structs are inherited + if f.Type.Kind() == reflect.Struct { + return "", true, false, false + } + + // As per JSON Marshal rules, anonymous pointer to structs are inherited + if f.Type.Kind() == reflect.Ptr && f.Type.Elem().Kind() == reflect.Struct { + return "", true, false, false + } + } + + // Try to determine the name from the different combos + name := f.Name + if jsonTags[0] != "" { + name = jsonTags[0] + } + if !f.Anonymous && f.PkgPath != "" { + // field not anonymous and not export has no export name + name = "" + } else if r.KeyNamer != nil { + name = r.KeyNamer(name) + } + + return name, false, required, nullable +} + +// UnmarshalJSON is used to parse a schema object or boolean. +func (t *Schema) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte("true")) { + *t = *TrueSchema + return nil + } else if bytes.Equal(data, []byte("false")) { + *t = *FalseSchema + return nil + } + type SchemaAlt Schema + aux := &struct { + *SchemaAlt + }{ + SchemaAlt: (*SchemaAlt)(t), + } + return json.Unmarshal(data, aux) +} + +// MarshalJSON is used to serialize a schema object or boolean. +func (t *Schema) MarshalJSON() ([]byte, error) { + if t.boolean != nil { + if *t.boolean { + return []byte("true"), nil + } + return []byte("false"), nil + } + if reflect.DeepEqual(&Schema{}, t) { + // Don't bother returning empty schemas + return []byte("true"), nil + } + type SchemaAlt Schema + b, err := json.Marshal((*SchemaAlt)(t)) + if err != nil { + return nil, err + } + if t.Extras == nil || len(t.Extras) == 0 { + return b, nil + } + m, err := json.Marshal(t.Extras) + if err != nil { + return nil, err + } + if len(b) == 2 { + return m, nil + } + b[len(b)-1] = ',' + return append(b, m[1:]...), nil +} + +func (r *Reflector) typeName(t reflect.Type) string { + if r.Namer != nil { + if name := r.Namer(t); name != "" { + return name + } + } + return t.Name() +} + +// Split on commas that are not preceded by `\`. +// This way, we prevent splitting regexes +func splitOnUnescapedCommas(tagString string) []string { + ret := make([]string, 0) + separated := strings.Split(tagString, ",") + ret = append(ret, separated[0]) + i := 0 + for _, nextTag := range separated[1:] { + if len(ret[i]) == 0 { + ret = append(ret, nextTag) + i++ + continue + } + + if ret[i][len(ret[i])-1] == '\\' { + ret[i] = ret[i][:len(ret[i])-1] + "," + nextTag + } else { + ret = append(ret, nextTag) + i++ + } + } + + return ret +} + +func fullyQualifiedTypeName(t reflect.Type) string { + return t.PkgPath() + "." + t.Name() +} + +// AddGoComments will update the reflectors comment map with all the comments +// found in the provided source directories. See the #ExtractGoComments method +// for more details. +func (r *Reflector) AddGoComments(base, path string) error { + if r.CommentMap == nil { + r.CommentMap = make(map[string]string) + } + return ExtractGoComments(base, path, r.CommentMap) +} diff --git a/vendor/github.com/invopop/jsonschema/schema.go b/vendor/github.com/invopop/jsonschema/schema.go new file mode 100644 index 0000000..2d914b8 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/schema.go @@ -0,0 +1,94 @@ +package jsonschema + +import ( + "encoding/json" + + orderedmap "github.com/wk8/go-ordered-map/v2" +) + +// Version is the JSON Schema version. +var Version = "https://json-schema.org/draft/2020-12/schema" + +// Schema represents a JSON Schema object type. +// RFC draft-bhutton-json-schema-00 section 4.3 +type Schema struct { + // RFC draft-bhutton-json-schema-00 + Version string `json:"$schema,omitempty"` // section 8.1.1 + ID ID `json:"$id,omitempty"` // section 8.2.1 + Anchor string `json:"$anchor,omitempty"` // section 8.2.2 + Ref string `json:"$ref,omitempty"` // section 8.2.3.1 + DynamicRef string `json:"$dynamicRef,omitempty"` // section 8.2.3.2 + Definitions Definitions `json:"$defs,omitempty"` // section 8.2.4 + Comments string `json:"$comment,omitempty"` // section 8.3 + // RFC draft-bhutton-json-schema-00 section 10.2.1 (Sub-schemas with logic) + AllOf []*Schema `json:"allOf,omitempty"` // section 10.2.1.1 + AnyOf []*Schema `json:"anyOf,omitempty"` // section 10.2.1.2 + OneOf []*Schema `json:"oneOf,omitempty"` // section 10.2.1.3 + Not *Schema `json:"not,omitempty"` // section 10.2.1.4 + // RFC draft-bhutton-json-schema-00 section 10.2.2 (Apply sub-schemas conditionally) + If *Schema `json:"if,omitempty"` // section 10.2.2.1 + Then *Schema `json:"then,omitempty"` // section 10.2.2.2 + Else *Schema `json:"else,omitempty"` // section 10.2.2.3 + DependentSchemas map[string]*Schema `json:"dependentSchemas,omitempty"` // section 10.2.2.4 + // RFC draft-bhutton-json-schema-00 section 10.3.1 (arrays) + PrefixItems []*Schema `json:"prefixItems,omitempty"` // section 10.3.1.1 + Items *Schema `json:"items,omitempty"` // section 10.3.1.2 (replaces additionalItems) + Contains *Schema `json:"contains,omitempty"` // section 10.3.1.3 + // RFC draft-bhutton-json-schema-00 section 10.3.2 (sub-schemas) + Properties *orderedmap.OrderedMap[string, *Schema] `json:"properties,omitempty"` // section 10.3.2.1 + PatternProperties map[string]*Schema `json:"patternProperties,omitempty"` // section 10.3.2.2 + AdditionalProperties *Schema `json:"additionalProperties,omitempty"` // section 10.3.2.3 + PropertyNames *Schema `json:"propertyNames,omitempty"` // section 10.3.2.4 + // RFC draft-bhutton-json-schema-validation-00, section 6 + Type string `json:"type,omitempty"` // section 6.1.1 + Enum []any `json:"enum,omitempty"` // section 6.1.2 + Const any `json:"const,omitempty"` // section 6.1.3 + MultipleOf json.Number `json:"multipleOf,omitempty"` // section 6.2.1 + Maximum json.Number `json:"maximum,omitempty"` // section 6.2.2 + ExclusiveMaximum json.Number `json:"exclusiveMaximum,omitempty"` // section 6.2.3 + Minimum json.Number `json:"minimum,omitempty"` // section 6.2.4 + ExclusiveMinimum json.Number `json:"exclusiveMinimum,omitempty"` // section 6.2.5 + MaxLength *uint64 `json:"maxLength,omitempty"` // section 6.3.1 + MinLength *uint64 `json:"minLength,omitempty"` // section 6.3.2 + Pattern string `json:"pattern,omitempty"` // section 6.3.3 + MaxItems *uint64 `json:"maxItems,omitempty"` // section 6.4.1 + MinItems *uint64 `json:"minItems,omitempty"` // section 6.4.2 + UniqueItems bool `json:"uniqueItems,omitempty"` // section 6.4.3 + MaxContains *uint64 `json:"maxContains,omitempty"` // section 6.4.4 + MinContains *uint64 `json:"minContains,omitempty"` // section 6.4.5 + MaxProperties *uint64 `json:"maxProperties,omitempty"` // section 6.5.1 + MinProperties *uint64 `json:"minProperties,omitempty"` // section 6.5.2 + Required []string `json:"required,omitempty"` // section 6.5.3 + DependentRequired map[string][]string `json:"dependentRequired,omitempty"` // section 6.5.4 + // RFC draft-bhutton-json-schema-validation-00, section 7 + Format string `json:"format,omitempty"` + // RFC draft-bhutton-json-schema-validation-00, section 8 + ContentEncoding string `json:"contentEncoding,omitempty"` // section 8.3 + ContentMediaType string `json:"contentMediaType,omitempty"` // section 8.4 + ContentSchema *Schema `json:"contentSchema,omitempty"` // section 8.5 + // RFC draft-bhutton-json-schema-validation-00, section 9 + Title string `json:"title,omitempty"` // section 9.1 + Description string `json:"description,omitempty"` // section 9.1 + Default any `json:"default,omitempty"` // section 9.2 + Deprecated bool `json:"deprecated,omitempty"` // section 9.3 + ReadOnly bool `json:"readOnly,omitempty"` // section 9.4 + WriteOnly bool `json:"writeOnly,omitempty"` // section 9.4 + Examples []any `json:"examples,omitempty"` // section 9.5 + + Extras map[string]any `json:"-"` + + // Special boolean representation of the Schema - section 4.3.2 + boolean *bool +} + +var ( + // TrueSchema defines a schema with a true value + TrueSchema = &Schema{boolean: &[]bool{true}[0]} + // FalseSchema defines a schema with a false value + FalseSchema = &Schema{boolean: &[]bool{false}[0]} +) + +// Definitions hold schema definitions. +// http://json-schema.org/latest/json-schema-validation.html#rfc.section.5.26 +// RFC draft-wright-json-schema-validation-00, section 5.26 +type Definitions map[string]*Schema diff --git a/vendor/github.com/invopop/jsonschema/utils.go b/vendor/github.com/invopop/jsonschema/utils.go new file mode 100644 index 0000000..ed8edf7 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/utils.go @@ -0,0 +1,26 @@ +package jsonschema + +import ( + "regexp" + "strings" + + orderedmap "github.com/wk8/go-ordered-map/v2" +) + +var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + +// ToSnakeCase converts the provided string into snake case using dashes. +// This is useful for Schema IDs and definitions to be coherent with +// common JSON Schema examples. +func ToSnakeCase(str string) string { + snake := matchFirstCap.ReplaceAllString(str, "${1}-${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}-${2}") + return strings.ToLower(snake) +} + +// NewProperties is a helper method to instantiate a new properties ordered +// map. +func NewProperties() *orderedmap.OrderedMap[string, *Schema] { + return orderedmap.New[string, *Schema]() +} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 0000000..fbff658 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 0000000..598a54a --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,278 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "net" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size >= config.PooledSize { + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) < s { + b.ensureSpaceSlow(s) + } +} + +func (b *Buffer) ensureSpaceSlow(s int) { + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + b.EnsureSpace(1) + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendBytesSlow(data) + } +} + +func (b *Buffer) appendBytesSlow(data []byte) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendString appends a string to buffer. +func (b *Buffer) AppendString(data string) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendStringSlow(data) + } +} + +func (b *Buffer) appendStringSlow(data string) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + bufs := net.Buffers(b.bufs) + if len(b.Buf) > 0 { + bufs = append(bufs, b.Buf) + } + n, err := bufs.WriteTo(w) + + for _, buf := range b.bufs { + putBuf(buf) + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return int(n), err +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enough, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 0000000..2c5b201 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,405 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + w.base64(data) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + + for i := 0; i < 128; i++ { + table[i] = true + } + + for _, v := range falseValues { + table[v] = false + } + + return table +} + +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + escapeTable := &htmlEscapeTable + if w.NoEscapeHTML { + escapeTable = &htmlNoEscapeTable + } + + for i := 0; i < len(s); { + c := s[i] + + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} + +const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +const padChar = '=' + +func (w *Writer) base64(in []byte) { + + if len(in) == 0 { + return + } + + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) + + si := 0 + n := (len(in) / 3) * 3 + + for si < n { + // Convert 3x 8bit source bytes into 4 bytes + val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) + + si += 3 + } + + remain := len(in) - si + if remain == 0 { + return + } + + // Add the remaining small block + val := uint(in[si+0]) << 16 + if remain == 2 { + val |= uint(in[si+1]) << 8 + } + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) + + switch remain { + case 2: + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) + case 1: + w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) + } +} diff --git a/vendor/github.com/wk8/go-ordered-map/v2/.gitignore b/vendor/github.com/wk8/go-ordered-map/v2/.gitignore new file mode 100644 index 0000000..57872d0 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/.gitignore @@ -0,0 +1 @@ +/vendor/ diff --git a/vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml b/vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml new file mode 100644 index 0000000..2417df1 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml @@ -0,0 +1,80 @@ +run: + tests: false + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - bodyclose + - containedctx + - contextcheck + - decorder + - depguard + - dogsled + - dupl + - durationcheck + - errcheck + - errchkjson + # FIXME: commented out as it crashes with 1.18 for now + # - errname + - errorlint + - exportloopref + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - goconst + - gocritic + - gocyclo + - godox + - gofmt + - gofumpt + - goheader + - goimports + - gomnd + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + - govet + - grouper + - ifshort + - importas + - ineffassign + - lll + - maintidx + - makezero + - misspell + - nakedret + - nilerr + - nilnil + - noctx + - nolintlint + - paralleltest + - prealloc + - predeclared + - promlinter + # FIXME: doesn't support 1.18 yet + # - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - tagliatelle + - tenv + - testpackage + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - varnamelen + - wastedassign + - whitespace diff --git a/vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md b/vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md new file mode 100644 index 0000000..f27126f --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md @@ -0,0 +1,38 @@ +# Changelog + +[comment]: # (Changes since last release go here) + +## 2.1.8 - Jun 27th 2023 + +* Added support for YAML serialization/deserialization + +## 2.1.7 - Apr 13th 2023 + +* Renamed test_utils.go to utils_test.go + +## 2.1.6 - Feb 15th 2023 + +* Added `GetAndMoveToBack()` and `GetAndMoveToFront()` methods + +## 2.1.5 - Dec 13th 2022 + +* Added `Value()` method + +## 2.1.4 - Dec 12th 2022 + +* Fixed a bug with UTF-8 special characters in JSON keys + +## 2.1.3 - Dec 11th 2022 + +* Added support for JSON marshalling/unmarshalling of wrapper of primitive types + +## 2.1.2 - Dec 10th 2022 +* Allowing to pass options to `New`, to give a capacity hint, or initial data +* Allowing to deserialize nested ordered maps from JSON without having to explicitly instantiate them +* Added the `AddPairs` method + +## 2.1.1 - Dec 9th 2022 +* Fixing a bug with JSON marshalling + +## 2.1.0 - Dec 7th 2022 +* Added support for JSON serialization/deserialization diff --git a/vendor/github.com/wk8/go-ordered-map/v2/LICENSE b/vendor/github.com/wk8/go-ordered-map/v2/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/wk8/go-ordered-map/v2/Makefile b/vendor/github.com/wk8/go-ordered-map/v2/Makefile new file mode 100644 index 0000000..6e0e18a --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/Makefile @@ -0,0 +1,32 @@ +.DEFAULT_GOAL := all + +.PHONY: all +all: test_with_fuzz lint + +# the TEST_FLAGS env var can be set to eg run only specific tests +TEST_COMMAND = go test -v -count=1 -race -cover $(TEST_FLAGS) + +.PHONY: test +test: + $(TEST_COMMAND) + +.PHONY: bench +bench: + go test -bench=. + +FUZZ_TIME ?= 10s + +# see https://github.com/golang/go/issues/46312 +# and https://stackoverflow.com/a/72673487/4867444 +# if we end up having more fuzz tests +.PHONY: test_with_fuzz +test_with_fuzz: + $(TEST_COMMAND) -fuzz=FuzzRoundTripJSON -fuzztime=$(FUZZ_TIME) + $(TEST_COMMAND) -fuzz=FuzzRoundTripYAML -fuzztime=$(FUZZ_TIME) + +.PHONY: fuzz +fuzz: test_with_fuzz + +.PHONY: lint +lint: + golangci-lint run diff --git a/vendor/github.com/wk8/go-ordered-map/v2/README.md b/vendor/github.com/wk8/go-ordered-map/v2/README.md new file mode 100644 index 0000000..b028944 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/README.md @@ -0,0 +1,154 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/wk8/go-ordered-map/v2.svg)](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2) +[![Build Status](https://circleci.com/gh/wk8/go-ordered-map.svg?style=svg)](https://app.circleci.com/pipelines/github/wk8/go-ordered-map) + +# Golang Ordered Maps + +Same as regular maps, but also remembers the order in which keys were inserted, akin to [Python's `collections.OrderedDict`s](https://docs.python.org/3.7/library/collections.html#ordereddict-objects). + +It offers the following features: +* optimal runtime performance (all operations are constant time) +* optimal memory usage (only one copy of values, no unnecessary memory allocation) +* allows iterating from newest or oldest keys indifferently, without memory copy, allowing to `break` the iteration, and in time linear to the number of keys iterated over rather than the total length of the ordered map +* supports any generic types for both keys and values. If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) that takes and returns generic `interface{}`s instead of using generics +* idiomatic API, akin to that of [`container/list`](https://golang.org/pkg/container/list) +* support for JSON and YAML marshalling + +## Documentation + +[The full documentation is available on pkg.go.dev](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2). + +## Installation +```bash +go get -u github.com/wk8/go-ordered-map/v2 +``` + +Or use your favorite golang vendoring tool! + +## Supported go versions + +Go >= 1.18 is required to use version >= 2 of this library, as it uses generics. + +If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) instead. + +## Example / usage + +```go +package main + +import ( + "fmt" + + "github.com/wk8/go-ordered-map/v2" +) + +func main() { + om := orderedmap.New[string, string]() + + om.Set("foo", "bar") + om.Set("bar", "baz") + om.Set("coucou", "toi") + + fmt.Println(om.Get("foo")) // => "bar", true + fmt.Println(om.Get("i dont exist")) // => "", false + + // iterating pairs from oldest to newest: + for pair := om.Oldest(); pair != nil; pair = pair.Next() { + fmt.Printf("%s => %s\n", pair.Key, pair.Value) + } // prints: + // foo => bar + // bar => baz + // coucou => toi + + // iterating over the 2 newest pairs: + i := 0 + for pair := om.Newest(); pair != nil; pair = pair.Prev() { + fmt.Printf("%s => %s\n", pair.Key, pair.Value) + i++ + if i >= 2 { + break + } + } // prints: + // coucou => toi + // bar => baz +} +``` + +An `OrderedMap`'s keys must implement `comparable`, and its values can be anything, for example: + +```go +type myStruct struct { + payload string +} + +func main() { + om := orderedmap.New[int, *myStruct]() + + om.Set(12, &myStruct{"foo"}) + om.Set(1, &myStruct{"bar"}) + + value, present := om.Get(12) + if !present { + panic("should be there!") + } + fmt.Println(value.payload) // => foo + + for pair := om.Oldest(); pair != nil; pair = pair.Next() { + fmt.Printf("%d => %s\n", pair.Key, pair.Value.payload) + } // prints: + // 12 => foo + // 1 => bar +} +``` + +Also worth noting that you can provision ordered maps with a capacity hint, as you would do by passing an optional hint to `make(map[K]V, capacity`): +```go +om := orderedmap.New[int, *myStruct](28) +``` + +You can also pass in some initial data to store in the map: +```go +om := orderedmap.New[int, string](orderedmap.WithInitialData[int, string]( + orderedmap.Pair[int, string]{ + Key: 12, + Value: "foo", + }, + orderedmap.Pair[int, string]{ + Key: 28, + Value: "bar", + }, +)) +``` + +`OrderedMap`s also support JSON serialization/deserialization, and preserves order: + +```go +// serialization +data, err := json.Marshal(om) +... + +// deserialization +om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect +err := json.Unmarshal(data, &om) +... +``` + +Similarly, it also supports YAML serialization/deserialization using the yaml.v3 package, which also preserves order: + +```go +// serialization +data, err := yaml.Marshal(om) +... + +// deserialization +om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect +err := yaml.Unmarshal(data, &om) +... +``` + +## Alternatives + +There are several other ordered map golang implementations out there, but I believe that at the time of writing none of them offer the same functionality as this library; more specifically: +* [iancoleman/orderedmap](https://github.com/iancoleman/orderedmap) only accepts `string` keys, its `Delete` operations are linear +* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) uses a channel for iterations, and leaks goroutines if the iteration is interrupted before fully traversing the map +* [mantyr/iterator](https://github.com/mantyr/iterator) also uses a channel for iterations, and its `Delete` operations are linear +* [samdolan/go-ordered-map](https://github.com/samdolan/go-ordered-map) adds unnecessary locking (users should add their own locking instead if they need it), its `Delete` and `Get` operations are linear, iterations trigger a linear memory allocation diff --git a/vendor/github.com/wk8/go-ordered-map/v2/json.go b/vendor/github.com/wk8/go-ordered-map/v2/json.go new file mode 100644 index 0000000..a545b53 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/json.go @@ -0,0 +1,182 @@ +package orderedmap + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "reflect" + "unicode/utf8" + + "github.com/buger/jsonparser" + "github.com/mailru/easyjson/jwriter" +) + +var ( + _ json.Marshaler = &OrderedMap[int, any]{} + _ json.Unmarshaler = &OrderedMap[int, any]{} +) + +// MarshalJSON implements the json.Marshaler interface. +func (om *OrderedMap[K, V]) MarshalJSON() ([]byte, error) { //nolint:funlen + if om == nil || om.list == nil { + return []byte("null"), nil + } + + writer := jwriter.Writer{} + writer.RawByte('{') + + for pair, firstIteration := om.Oldest(), true; pair != nil; pair = pair.Next() { + if firstIteration { + firstIteration = false + } else { + writer.RawByte(',') + } + + switch key := any(pair.Key).(type) { + case string: + writer.String(key) + case encoding.TextMarshaler: + writer.RawByte('"') + writer.Raw(key.MarshalText()) + writer.RawByte('"') + case int: + writer.IntStr(key) + case int8: + writer.Int8Str(key) + case int16: + writer.Int16Str(key) + case int32: + writer.Int32Str(key) + case int64: + writer.Int64Str(key) + case uint: + writer.UintStr(key) + case uint8: + writer.Uint8Str(key) + case uint16: + writer.Uint16Str(key) + case uint32: + writer.Uint32Str(key) + case uint64: + writer.Uint64Str(key) + default: + + // this switch takes care of wrapper types around primitive types, such as + // type myType string + switch keyValue := reflect.ValueOf(key); keyValue.Type().Kind() { + case reflect.String: + writer.String(keyValue.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + writer.Int64Str(keyValue.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + writer.Uint64Str(keyValue.Uint()) + default: + return nil, fmt.Errorf("unsupported key type: %T", key) + } + } + + writer.RawByte(':') + // the error is checked at the end of the function + writer.Raw(json.Marshal(pair.Value)) //nolint:errchkjson + } + + writer.RawByte('}') + + return dumpWriter(&writer) +} + +func dumpWriter(writer *jwriter.Writer) ([]byte, error) { + if writer.Error != nil { + return nil, writer.Error + } + + var buf bytes.Buffer + buf.Grow(writer.Size()) + if _, err := writer.DumpTo(&buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (om *OrderedMap[K, V]) UnmarshalJSON(data []byte) error { + if om.list == nil { + om.initialize(0) + } + + return jsonparser.ObjectEach( + data, + func(keyData []byte, valueData []byte, dataType jsonparser.ValueType, offset int) error { + if dataType == jsonparser.String { + // jsonparser removes the enclosing quotes; we need to restore them to make a valid JSON + valueData = data[offset-len(valueData)-2 : offset] + } + + var key K + var value V + + switch typedKey := any(&key).(type) { + case *string: + s, err := decodeUTF8(keyData) + if err != nil { + return err + } + *typedKey = s + case encoding.TextUnmarshaler: + if err := typedKey.UnmarshalText(keyData); err != nil { + return err + } + case *int, *int8, *int16, *int32, *int64, *uint, *uint8, *uint16, *uint32, *uint64: + if err := json.Unmarshal(keyData, typedKey); err != nil { + return err + } + default: + // this switch takes care of wrapper types around primitive types, such as + // type myType string + switch reflect.TypeOf(key).Kind() { + case reflect.String: + s, err := decodeUTF8(keyData) + if err != nil { + return err + } + + convertedKeyData := reflect.ValueOf(s).Convert(reflect.TypeOf(key)) + reflect.ValueOf(&key).Elem().Set(convertedKeyData) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if err := json.Unmarshal(keyData, &key); err != nil { + return err + } + default: + return fmt.Errorf("unsupported key type: %T", key) + } + } + + if err := json.Unmarshal(valueData, &value); err != nil { + return err + } + + om.Set(key, value) + return nil + }) +} + +func decodeUTF8(input []byte) (string, error) { + remaining, offset := input, 0 + runes := make([]rune, 0, len(remaining)) + + for len(remaining) > 0 { + r, size := utf8.DecodeRune(remaining) + if r == utf8.RuneError && size <= 1 { + return "", fmt.Errorf("not a valid UTF-8 string (at position %d): %s", offset, string(input)) + } + + runes = append(runes, r) + remaining = remaining[size:] + offset += size + } + + return string(runes), nil +} diff --git a/vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go b/vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go new file mode 100644 index 0000000..0647141 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go @@ -0,0 +1,296 @@ +// Package orderedmap implements an ordered map, i.e. a map that also keeps track of +// the order in which keys were inserted. +// +// All operations are constant-time. +// +// Github repo: https://github.com/wk8/go-ordered-map +// +package orderedmap + +import ( + "fmt" + + list "github.com/bahlo/generic-list-go" +) + +type Pair[K comparable, V any] struct { + Key K + Value V + + element *list.Element[*Pair[K, V]] +} + +type OrderedMap[K comparable, V any] struct { + pairs map[K]*Pair[K, V] + list *list.List[*Pair[K, V]] +} + +type initConfig[K comparable, V any] struct { + capacity int + initialData []Pair[K, V] +} + +type InitOption[K comparable, V any] func(config *initConfig[K, V]) + +// WithCapacity allows giving a capacity hint for the map, akin to the standard make(map[K]V, capacity). +func WithCapacity[K comparable, V any](capacity int) InitOption[K, V] { + return func(c *initConfig[K, V]) { + c.capacity = capacity + } +} + +// WithInitialData allows passing in initial data for the map. +func WithInitialData[K comparable, V any](initialData ...Pair[K, V]) InitOption[K, V] { + return func(c *initConfig[K, V]) { + c.initialData = initialData + if c.capacity < len(initialData) { + c.capacity = len(initialData) + } + } +} + +// New creates a new OrderedMap. +// options can either be one or several InitOption[K, V], or a single integer, +// which is then interpreted as a capacity hint, à la make(map[K]V, capacity). +func New[K comparable, V any](options ...any) *OrderedMap[K, V] { //nolint:varnamelen + orderedMap := &OrderedMap[K, V]{} + + var config initConfig[K, V] + for _, untypedOption := range options { + switch option := untypedOption.(type) { + case int: + if len(options) != 1 { + invalidOption() + } + config.capacity = option + + case InitOption[K, V]: + option(&config) + + default: + invalidOption() + } + } + + orderedMap.initialize(config.capacity) + orderedMap.AddPairs(config.initialData...) + + return orderedMap +} + +const invalidOptionMessage = `when using orderedmap.New[K,V]() with options, either provide one or several InitOption[K, V]; or a single integer which is then interpreted as a capacity hint, à la make(map[K]V, capacity).` //nolint:lll + +func invalidOption() { panic(invalidOptionMessage) } + +func (om *OrderedMap[K, V]) initialize(capacity int) { + om.pairs = make(map[K]*Pair[K, V], capacity) + om.list = list.New[*Pair[K, V]]() +} + +// Get looks for the given key, and returns the value associated with it, +// or V's nil value if not found. The boolean it returns says whether the key is present in the map. +func (om *OrderedMap[K, V]) Get(key K) (val V, present bool) { + if pair, present := om.pairs[key]; present { + return pair.Value, true + } + + return +} + +// Load is an alias for Get, mostly to present an API similar to `sync.Map`'s. +func (om *OrderedMap[K, V]) Load(key K) (V, bool) { + return om.Get(key) +} + +// Value returns the value associated with the given key or the zero value. +func (om *OrderedMap[K, V]) Value(key K) (val V) { + if pair, present := om.pairs[key]; present { + val = pair.Value + } + return +} + +// GetPair looks for the given key, and returns the pair associated with it, +// or nil if not found. The Pair struct can then be used to iterate over the ordered map +// from that point, either forward or backward. +func (om *OrderedMap[K, V]) GetPair(key K) *Pair[K, V] { + return om.pairs[key] +} + +// Set sets the key-value pair, and returns what `Get` would have returned +// on that key prior to the call to `Set`. +func (om *OrderedMap[K, V]) Set(key K, value V) (val V, present bool) { + if pair, present := om.pairs[key]; present { + oldValue := pair.Value + pair.Value = value + return oldValue, true + } + + pair := &Pair[K, V]{ + Key: key, + Value: value, + } + pair.element = om.list.PushBack(pair) + om.pairs[key] = pair + + return +} + +// AddPairs allows setting multiple pairs at a time. It's equivalent to calling +// Set on each pair sequentially. +func (om *OrderedMap[K, V]) AddPairs(pairs ...Pair[K, V]) { + for _, pair := range pairs { + om.Set(pair.Key, pair.Value) + } +} + +// Store is an alias for Set, mostly to present an API similar to `sync.Map`'s. +func (om *OrderedMap[K, V]) Store(key K, value V) (V, bool) { + return om.Set(key, value) +} + +// Delete removes the key-value pair, and returns what `Get` would have returned +// on that key prior to the call to `Delete`. +func (om *OrderedMap[K, V]) Delete(key K) (val V, present bool) { + if pair, present := om.pairs[key]; present { + om.list.Remove(pair.element) + delete(om.pairs, key) + return pair.Value, true + } + return +} + +// Len returns the length of the ordered map. +func (om *OrderedMap[K, V]) Len() int { + if om == nil || om.pairs == nil { + return 0 + } + return len(om.pairs) +} + +// Oldest returns a pointer to the oldest pair. It's meant to be used to iterate on the ordered map's +// pairs from the oldest to the newest, e.g.: +// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) } +func (om *OrderedMap[K, V]) Oldest() *Pair[K, V] { + if om == nil || om.list == nil { + return nil + } + return listElementToPair(om.list.Front()) +} + +// Newest returns a pointer to the newest pair. It's meant to be used to iterate on the ordered map's +// pairs from the newest to the oldest, e.g.: +// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) } +func (om *OrderedMap[K, V]) Newest() *Pair[K, V] { + if om == nil || om.list == nil { + return nil + } + return listElementToPair(om.list.Back()) +} + +// Next returns a pointer to the next pair. +func (p *Pair[K, V]) Next() *Pair[K, V] { + return listElementToPair(p.element.Next()) +} + +// Prev returns a pointer to the previous pair. +func (p *Pair[K, V]) Prev() *Pair[K, V] { + return listElementToPair(p.element.Prev()) +} + +func listElementToPair[K comparable, V any](element *list.Element[*Pair[K, V]]) *Pair[K, V] { + if element == nil { + return nil + } + return element.Value +} + +// KeyNotFoundError may be returned by functions in this package when they're called with keys that are not present +// in the map. +type KeyNotFoundError[K comparable] struct { + MissingKey K +} + +func (e *KeyNotFoundError[K]) Error() string { + return fmt.Sprintf("missing key: %v", e.MissingKey) +} + +// MoveAfter moves the value associated with key to its new position after the one associated with markKey. +// Returns an error iff key or markKey are not present in the map. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) MoveAfter(key, markKey K) error { + elements, err := om.getElements(key, markKey) + if err != nil { + return err + } + om.list.MoveAfter(elements[0], elements[1]) + return nil +} + +// MoveBefore moves the value associated with key to its new position before the one associated with markKey. +// Returns an error iff key or markKey are not present in the map. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) MoveBefore(key, markKey K) error { + elements, err := om.getElements(key, markKey) + if err != nil { + return err + } + om.list.MoveBefore(elements[0], elements[1]) + return nil +} + +func (om *OrderedMap[K, V]) getElements(keys ...K) ([]*list.Element[*Pair[K, V]], error) { + elements := make([]*list.Element[*Pair[K, V]], len(keys)) + for i, k := range keys { + pair, present := om.pairs[k] + if !present { + return nil, &KeyNotFoundError[K]{k} + } + elements[i] = pair.element + } + return elements, nil +} + +// MoveToBack moves the value associated with key to the back of the ordered map, +// i.e. makes it the newest pair in the map. +// Returns an error iff key is not present in the map. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) MoveToBack(key K) error { + _, err := om.GetAndMoveToBack(key) + return err +} + +// MoveToFront moves the value associated with key to the front of the ordered map, +// i.e. makes it the oldest pair in the map. +// Returns an error iff key is not present in the map. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) MoveToFront(key K) error { + _, err := om.GetAndMoveToFront(key) + return err +} + +// GetAndMoveToBack combines Get and MoveToBack in the same call. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) GetAndMoveToBack(key K) (val V, err error) { + if pair, present := om.pairs[key]; present { + val = pair.Value + om.list.MoveToBack(pair.element) + } else { + err = &KeyNotFoundError[K]{key} + } + + return +} + +// GetAndMoveToFront combines Get and MoveToFront in the same call. If an error is returned, +// it will be a KeyNotFoundError. +func (om *OrderedMap[K, V]) GetAndMoveToFront(key K) (val V, err error) { + if pair, present := om.pairs[key]; present { + val = pair.Value + om.list.MoveToFront(pair.element) + } else { + err = &KeyNotFoundError[K]{key} + } + + return +} diff --git a/vendor/github.com/wk8/go-ordered-map/v2/yaml.go b/vendor/github.com/wk8/go-ordered-map/v2/yaml.go new file mode 100644 index 0000000..6022471 --- /dev/null +++ b/vendor/github.com/wk8/go-ordered-map/v2/yaml.go @@ -0,0 +1,71 @@ +package orderedmap + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +var ( + _ yaml.Marshaler = &OrderedMap[int, any]{} + _ yaml.Unmarshaler = &OrderedMap[int, any]{} +) + +// MarshalYAML implements the yaml.Marshaler interface. +func (om *OrderedMap[K, V]) MarshalYAML() (interface{}, error) { + if om == nil { + return []byte("null"), nil + } + + node := yaml.Node{ + Kind: yaml.MappingNode, + } + + for pair := om.Oldest(); pair != nil; pair = pair.Next() { + key, value := pair.Key, pair.Value + + keyNode := &yaml.Node{} + + // serialize key to yaml, then deserialize it back into the node + // this is a hack to get the correct tag for the key + if err := keyNode.Encode(key); err != nil { + return nil, err + } + + valueNode := &yaml.Node{} + if err := valueNode.Encode(value); err != nil { + return nil, err + } + + node.Content = append(node.Content, keyNode, valueNode) + } + + return &node, nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (om *OrderedMap[K, V]) UnmarshalYAML(value *yaml.Node) error { + if value.Kind != yaml.MappingNode { + return fmt.Errorf("pipeline must contain YAML mapping, has %v", value.Kind) + } + + if om.list == nil { + om.initialize(0) + } + + for index := 0; index < len(value.Content); index += 2 { + var key K + var val V + + if err := value.Content[index].Decode(&key); err != nil { + return err + } + if err := value.Content[index+1].Decode(&val); err != nil { + return err + } + + om.Set(key, val) + } + + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 983751c..c5ea9a3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2,7 +2,13 @@ ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal -# github.com/cloudbase/garm-provider-common v0.1.4-0.20240906095211-57315d4ac8ae +# github.com/bahlo/generic-list-go v0.2.0 +## explicit; go 1.18 +github.com/bahlo/generic-list-go +# github.com/buger/jsonparser v1.1.1 +## explicit; go 1.13 +github.com/buger/jsonparser +# github.com/cloudbase/garm-provider-common v0.1.4-0.20240912084949-899c120c80ce ## explicit; go 1.22 github.com/cloudbase/garm-provider-common/cloudconfig github.com/cloudbase/garm-provider-common/defaults @@ -55,6 +61,13 @@ github.com/gophercloud/utils/openstack/clientconfig # github.com/gorilla/handlers v1.5.2 ## explicit; go 1.20 github.com/gorilla/handlers +# github.com/invopop/jsonschema v0.12.0 +## explicit; go 1.18 +github.com/invopop/jsonschema +# github.com/mailru/easyjson v0.7.7 +## explicit; go 1.12 +github.com/mailru/easyjson/buffer +github.com/mailru/easyjson/jwriter # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty @@ -80,6 +93,9 @@ github.com/stretchr/testify/mock # github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 ## explicit; go 1.18 github.com/teris-io/shortid +# github.com/wk8/go-ordered-map/v2 v2.1.8 +## explicit; go 1.18 +github.com/wk8/go-ordered-map/v2 # github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f ## explicit github.com/xeipuuv/gojsonpointer