From e375b318f1e2468bb697bee288df64f6e71b1310 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Thu, 23 May 2024 16:55:14 -0700 Subject: [PATCH] Migrate from api-frontend. --- .github/workflows/ci.yaml | 34 + .github/workflows/ent-ci.yaml | 19 + .github/workflows/logging-presubmit.yml | 13 + .github/workflows/migration-ci.yaml | 37 + .gitignore | 5 + Dockerfile | 28 + README.md | 178 + app.yaml | 14 + cloudbuild.yaml | 20 + clouddeploy.yaml | 29 + common/constants.go | 7 + common/types.go | 15 + config/config.go | 6 + db/tx.go | 95 + db/user.go | 19 + docker-compose.yml | 19 + drip/api.gen.go | 3278 +++++++ drip/codegen.yaml | 7 + drip/generate.go | 2 + ent/ciworkflowresult.go | 276 + ent/ciworkflowresult/ciworkflowresult.go | 192 + ent/ciworkflowresult/where.go | 788 ++ ent/ciworkflowresult_create.go | 1265 +++ ent/ciworkflowresult_delete.go | 88 + ent/ciworkflowresult_query.go | 726 ++ ent/ciworkflowresult_update.go | 869 ++ ent/client.go | 1707 ++++ ent/ent.go | 624 ++ ent/enttest/enttest.go | 84 + ent/generate.go | 3 + ent/gitcommit.go | 221 + ent/gitcommit/gitcommit.go | 156 + ent/gitcommit/where.go | 645 ++ ent/gitcommit_create.go | 1005 +++ ent/gitcommit_delete.go | 88 + ent/gitcommit_query.go | 644 ++ ent/gitcommit_update.go | 650 ++ ent/hook/hook.go | 294 + ent/migrate/migrate.go | 64 + ent/migrate/schema.go | 272 + ent/mutation.go | 7625 +++++++++++++++++ ent/node.go | 253 + ent/node/node.go | 181 + ent/node/where.go | 737 ++ ent/node_create.go | 1097 +++ ent/node_delete.go | 88 + ent/node_query.go | 718 ++ ent/node_update.go | 806 ++ ent/nodeversion.go | 238 + ent/nodeversion/nodeversion.go | 164 + ent/nodeversion/where.go | 443 + ent/nodeversion_create.go | 913 ++ ent/nodeversion_delete.go | 88 + ent/nodeversion_query.go | 726 ++ ent/nodeversion_update.go | 614 ++ ent/personalaccesstoken.go | 190 + .../personalaccesstoken.go | 125 + ent/personalaccesstoken/where.go | 465 + ent/personalaccesstoken_create.go | 786 ++ ent/personalaccesstoken_delete.go | 88 + ent/personalaccesstoken_query.go | 643 ++ ent/personalaccesstoken_update.go | 445 + ent/predicate/predicate.go | 34 + ent/publisher.go | 240 + ent/publisher/publisher.go | 205 + ent/publisher/where.go | 710 ++ ent/publisher_create.go | 1079 +++ ent/publisher_delete.go | 88 + ent/publisher_query.go | 794 ++ ent/publisher_update.go | 996 +++ ent/publisherpermission.go | 174 + .../publisherpermission.go | 123 + ent/publisherpermission/where.go | 287 + ent/publisherpermission_create.go | 610 ++ ent/publisherpermission_delete.go | 88 + ent/publisherpermission_query.go | 717 ++ ent/publisherpermission_update.go | 466 + ent/runtime.go | 196 + ent/runtime/runtime.go | 10 + ent/schema/ci_workflow_result.go | 56 + ent/schema/git_commit.go | 60 + ent/schema/mixins/createdBy.go | 24 + ent/schema/node.go | 60 + ent/schema/node_version.go | 55 + ent/schema/personal_access_token.go | 52 + ent/schema/publisher.go | 54 + ent/schema/publisherpermission.go | 48 + ent/schema/storage_file.go | 47 + ent/schema/user.go | 37 + ent/storagefile.go | 173 + ent/storagefile/storagefile.go | 117 + ent/storagefile/where.go | 531 ++ ent/storagefile_create.go | 887 ++ ent/storagefile_delete.go | 88 + ent/storagefile_query.go | 564 ++ ent/storagefile_update.go | 474 + ent/tx.go | 234 + ent/user.go | 188 + ent/user/user.go | 133 + ent/user/where.go | 384 + ent/user_create.go | 857 ++ ent/user_delete.go | 88 + ent/user_query.go | 644 ++ ent/user_update.go | 547 ++ gateways/slack/slack.go | 59 + gateways/storage/files.go | 162 + go.mod | 122 + go.sum | 444 + .../registry_integration_test.go | 671 ++ integration-tests/test_util.go | 111 + logging/logging.go | 61 + main.go | 52 + mapper/context.go | 20 + mapper/node.go | 109 + mapper/node_test.go | 152 + mapper/node_version.go | 80 + mapper/personal_access_token.go | 26 + mapper/publisher.go | 120 + mock/gateways/mock_slack_service.go | 14 + mock/gateways/mock_storage_service.go | 33 + my_pg_hba.conf | 2 + openapi.yml | 1489 ++++ run-service-prod.yaml | 29 + run-service-staging.yaml | 31 + server/handlers/openapi_handler.go | 18 + server/implementation/api.implementation.go | 25 + server/implementation/cicd.go | 167 + server/implementation/registry.go | 906 ++ server/implementation/user.go | 32 + server/middleware/error_logger.go | 24 + server/middleware/firebase_auth.go | 120 + server/middleware/firebase_auth_test.go | 69 + server/middleware/metric_middleware.go | 184 + server/middleware/service_account_auth.go | 60 + server/middleware/tracing_middleware.go | 46 + server/server.go | 112 + services/comfy_ci/comfy_ci_svc.go | 164 + services/registry/registry_svc.go | 509 ++ skaffold.yaml | 15 + supabase/.gitignore | 4 + supabase/config.toml | 159 + supabase/seed.sql | 0 tools/tools.go | 8 + 143 files changed, 51547 insertions(+) create mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/ent-ci.yaml create mode 100644 .github/workflows/logging-presubmit.yml create mode 100644 .github/workflows/migration-ci.yaml create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 app.yaml create mode 100644 cloudbuild.yaml create mode 100644 clouddeploy.yaml create mode 100644 common/constants.go create mode 100644 common/types.go create mode 100644 config/config.go create mode 100644 db/tx.go create mode 100644 db/user.go create mode 100644 docker-compose.yml create mode 100644 drip/api.gen.go create mode 100644 drip/codegen.yaml create mode 100644 drip/generate.go create mode 100644 ent/ciworkflowresult.go create mode 100644 ent/ciworkflowresult/ciworkflowresult.go create mode 100644 ent/ciworkflowresult/where.go create mode 100644 ent/ciworkflowresult_create.go create mode 100644 ent/ciworkflowresult_delete.go create mode 100644 ent/ciworkflowresult_query.go create mode 100644 ent/ciworkflowresult_update.go create mode 100644 ent/client.go create mode 100644 ent/ent.go create mode 100644 ent/enttest/enttest.go create mode 100644 ent/generate.go create mode 100644 ent/gitcommit.go create mode 100644 ent/gitcommit/gitcommit.go create mode 100644 ent/gitcommit/where.go create mode 100644 ent/gitcommit_create.go create mode 100644 ent/gitcommit_delete.go create mode 100644 ent/gitcommit_query.go create mode 100644 ent/gitcommit_update.go create mode 100644 ent/hook/hook.go create mode 100644 ent/migrate/migrate.go create mode 100644 ent/migrate/schema.go create mode 100644 ent/mutation.go create mode 100644 ent/node.go create mode 100644 ent/node/node.go create mode 100644 ent/node/where.go create mode 100644 ent/node_create.go create mode 100644 ent/node_delete.go create mode 100644 ent/node_query.go create mode 100644 ent/node_update.go create mode 100644 ent/nodeversion.go create mode 100644 ent/nodeversion/nodeversion.go create mode 100644 ent/nodeversion/where.go create mode 100644 ent/nodeversion_create.go create mode 100644 ent/nodeversion_delete.go create mode 100644 ent/nodeversion_query.go create mode 100644 ent/nodeversion_update.go create mode 100644 ent/personalaccesstoken.go create mode 100644 ent/personalaccesstoken/personalaccesstoken.go create mode 100644 ent/personalaccesstoken/where.go create mode 100644 ent/personalaccesstoken_create.go create mode 100644 ent/personalaccesstoken_delete.go create mode 100644 ent/personalaccesstoken_query.go create mode 100644 ent/personalaccesstoken_update.go create mode 100644 ent/predicate/predicate.go create mode 100644 ent/publisher.go create mode 100644 ent/publisher/publisher.go create mode 100644 ent/publisher/where.go create mode 100644 ent/publisher_create.go create mode 100644 ent/publisher_delete.go create mode 100644 ent/publisher_query.go create mode 100644 ent/publisher_update.go create mode 100644 ent/publisherpermission.go create mode 100644 ent/publisherpermission/publisherpermission.go create mode 100644 ent/publisherpermission/where.go create mode 100644 ent/publisherpermission_create.go create mode 100644 ent/publisherpermission_delete.go create mode 100644 ent/publisherpermission_query.go create mode 100644 ent/publisherpermission_update.go create mode 100644 ent/runtime.go create mode 100644 ent/runtime/runtime.go create mode 100644 ent/schema/ci_workflow_result.go create mode 100644 ent/schema/git_commit.go create mode 100644 ent/schema/mixins/createdBy.go create mode 100644 ent/schema/node.go create mode 100644 ent/schema/node_version.go create mode 100644 ent/schema/personal_access_token.go create mode 100644 ent/schema/publisher.go create mode 100644 ent/schema/publisherpermission.go create mode 100644 ent/schema/storage_file.go create mode 100644 ent/schema/user.go create mode 100644 ent/storagefile.go create mode 100644 ent/storagefile/storagefile.go create mode 100644 ent/storagefile/where.go create mode 100644 ent/storagefile_create.go create mode 100644 ent/storagefile_delete.go create mode 100644 ent/storagefile_query.go create mode 100644 ent/storagefile_update.go create mode 100644 ent/tx.go create mode 100644 ent/user.go create mode 100644 ent/user/user.go create mode 100644 ent/user/where.go create mode 100644 ent/user_create.go create mode 100644 ent/user_delete.go create mode 100644 ent/user_query.go create mode 100644 ent/user_update.go create mode 100644 gateways/slack/slack.go create mode 100644 gateways/storage/files.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 integration-tests/registry_integration_test.go create mode 100644 integration-tests/test_util.go create mode 100644 logging/logging.go create mode 100644 main.go create mode 100644 mapper/context.go create mode 100644 mapper/node.go create mode 100644 mapper/node_test.go create mode 100644 mapper/node_version.go create mode 100644 mapper/personal_access_token.go create mode 100644 mapper/publisher.go create mode 100644 mock/gateways/mock_slack_service.go create mode 100644 mock/gateways/mock_storage_service.go create mode 100644 my_pg_hba.conf create mode 100644 openapi.yml create mode 100644 run-service-prod.yaml create mode 100644 run-service-staging.yaml create mode 100644 server/handlers/openapi_handler.go create mode 100644 server/implementation/api.implementation.go create mode 100644 server/implementation/cicd.go create mode 100644 server/implementation/registry.go create mode 100644 server/implementation/user.go create mode 100644 server/middleware/error_logger.go create mode 100644 server/middleware/firebase_auth.go create mode 100644 server/middleware/firebase_auth_test.go create mode 100644 server/middleware/metric_middleware.go create mode 100644 server/middleware/service_account_auth.go create mode 100644 server/middleware/tracing_middleware.go create mode 100644 server/server.go create mode 100644 services/comfy_ci/comfy_ci_svc.go create mode 100644 services/registry/registry_svc.go create mode 100644 skaffold.yaml create mode 100644 supabase/.gitignore create mode 100644 supabase/config.toml create mode 100644 supabase/seed.sql create mode 100644 tools/tools.go diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..a2002fe --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,34 @@ +name: Continuous Integration + +on: + push: + branches: + - main + pull_request: + branches: [ main ] + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: 1.21 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + + - name: Get dependencies + run: go mod download + + - name: Build + run: go build -v ./... + + - name: Run unit tests + run: go test $(go list ./... | grep -v /integration) -cover -race -v + + - name: Run integration tests + run: go test ./integration-tests diff --git a/.github/workflows/ent-ci.yaml b/.github/workflows/ent-ci.yaml new file mode 100644 index 0000000..15cad84 --- /dev/null +++ b/.github/workflows/ent-ci.yaml @@ -0,0 +1,19 @@ +name: EntCI +on: + push: + # Run whenever code is changed in the master. + branches: + - main + # Run on PRs where something changed under the `ent/` directory. + pull_request: + paths: + - 'ent/*' +jobs: + ent: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3.0.1 + - uses: actions/setup-go@v3 + with: + go-version-file: 'go.mod' + - uses: ent/contrib/ci@master \ No newline at end of file diff --git a/.github/workflows/logging-presubmit.yml b/.github/workflows/logging-presubmit.yml new file mode 100644 index 0000000..2ae114c --- /dev/null +++ b/.github/workflows/logging-presubmit.yml @@ -0,0 +1,13 @@ +name: semgrep +on: + push: + branches: + - "*" +jobs: + semgrep-ci: + runs-on: ubuntu-20.04 + container: + image: semgrep/semgrep + steps: + - uses: actions/checkout@v3 + - run: semgrep ci --config .semgrep.yml --exclude='main.go' --exclude='server/server.go' --exclude='logging/*' -- \ No newline at end of file diff --git a/.github/workflows/migration-ci.yaml b/.github/workflows/migration-ci.yaml new file mode 100644 index 0000000..e3e44b1 --- /dev/null +++ b/.github/workflows/migration-ci.yaml @@ -0,0 +1,37 @@ +name: Atlas CI +on: + # Run whenever code is changed in the main branch, + # change this to your root branch. + # push: + # branches: + # - main + # Run on PRs where something changed under the `ent/migrate/migrations/` directory. + pull_request: + paths: + - 'ent/migrate/migrations/*' +jobs: + lint: + services: + # Spin up a postgres:10 container to be used as the dev-database for analysis. + postgres: + image: postgres:10 + env: + POSTGRES_DB: test + POSTGRES_PASSWORD: pass + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3.0.1 + with: + fetch-depth: 0 # Mandatory unless "latest" is set below. + - uses: ariga/atlas-action@v0 + with: + dir: ent/migrate/migrations + dir-format: golang-migrate # Or: atlas, goose, dbmate + dev-url: postgres://postgres:pass@localhost:5432/test?sslmode=disable \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..89ef497 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +tmp/ +.vscode/* +.vscode/settings.json +.idea/* +.DS_Store diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..41fce43 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +# Start from the official Go image to build the binary. +# Use the same version of Go as your project (1.21.5). +FROM golang:1.21.5 AS builder + +# Set the working directory inside the container. +WORKDIR /go/src/app + +# Copy the Go Modules manifests and download modules to leverage Docker cache. +COPY go.mod go.sum ./ +RUN go mod download + +# Copy the rest of the source code. +COPY . . + +# Build the Go app. +# -o /go/bin/app: Compile the binary to /go/bin/app. +# You might need to adjust the build command depending on your project's structure. +RUN CGO_ENABLED=0 GOOS=linux go build -v -o /go/bin/app + +# Use a small base image to create a minimal final image. +FROM alpine:latest +RUN apk --no-cache add ca-certificates + +# Copy the pre-built binary file from the previous stage. +COPY --from=builder /go/bin/app /go/bin/app + +# Run the binary. +ENTRYPOINT ["/go/bin/app"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..9bdf674 --- /dev/null +++ b/README.md @@ -0,0 +1,178 @@ +# registry-backend + +The first service to receive API requests + +## Local Dev + +### Golang + +https://go.dev/doc/install + +### Supabase + +Install [Supabase Cli](https://supabase.com/docs/guides/cli/getting-started) + +`brew install supabase/tap/supabase` + +`supabase start` + +Open [Supabase Studio](http://127.0.0.1:54323/project/default) locally. + +### Start API Server + +`docker compose up` + +This commands starts the server with Air that listens to changes. It connects to the Supabase running locally. + +### Set up local ADC credentials + +These are needed for authenticating Firebase JWT token auth + calling other GCP APIs. + +When testing login with registry, use this: +`gcloud config set project dreamboothy-dev` + +When testing workspace / VM creation, use this: +`gcloud config set project dreamboothy` + +`gcloud auth application-default login` + +If you are testing creating a node, you need to impersonate a service account because it requires signing cloud storage urls. + +`gcloud auth application-default login --impersonate-service-account 357148958219-compute@developer.gserviceaccount.com` + +TODO(robinhuang): Create a service account suitable for dev. + +# Code Generation + +Make sure you install the golang packages locally. + +`go get` + +## Schema Change + +Update the files in `ent/schema`. + +### Regenerate code + +This should search all directories and run go generate. This will run all the commands in the `generate.go` files in the repository. + +`go generate ./...` + +Or manually run: + +`go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert --feature sql/lock ./ent/schema` + +## API Spec Change (openapi.yml) + +### Regenerate code + +This should search all directories and run go generate. This will run all the commands in the `generate.go` files in the repository. + +`go generate ./...` + +Or manually run: + +`export PATH="$PATH:$HOME/bin:$HOME/go/bin"` + +https://github.com/deepmap/oapi-codegen/issues/795 + +`oapi-codegen --config drip/codegen.yaml openapi.yml` + +## TroubleShooting / Common Errors + +Here are some common errors and how to resolve them. + +### Firebase Token Errors + +Usually in localdev, we use dreamboothy-dev Firebase project for authentication. This conflicts with our machine creation logic because all of those machine images are in dreamboothy. TODO(robinhuang): Figure out a solution for this. Either we replicate things in dreamboothy-dev, or we pass project information separately when creating machine images. + +### Creating VM instance error: + +**Example:** + +``` +{ + "severity": "ERROR", + "error": "error creating instance: Post \"https://compute.googleapis.com/compute/v1/projects/dreamboothy/zones/us-central1-a/instances\": oauth2: \"invalid_grant\" \"reauth related error (invalid_rapt)\" \"https://support.google.com/a/answer/9368756\"", + "time": "2024-02-26T01:32:27Z", + "message": "Error creating instance:" +} + +{ + "severity": "ERROR", + "error": "failed to get session using author id 'nz0vAxfqWLSrqPcUhspyuOEp03z2': error creating instance: Post \"https://compute.googleapis.com/compute/v1/projects/dreamboothy/zones/us-central1-a/instances\": oauth2: \"invalid_grant\" \"reauth related error (invalid_rapt)\" \"https://support.google.com/a/answer/9368756\"", + "time": "2024-02-26T01:32:27Z", + "message": "Error occurred Path: /workflows/:id, Method: GET\n" +} +``` + +**Resolution:** + +You would likely need to run `gcloud auth application-default login` again and +restart your docker containers/services to pick up the new credentials. + +### Calling CreateSession endpoint + +Use the postman collection to call the CreateSession endpoint. You should be able to import changes with `openapi.yml` +file. +You should use this as a request body since there are list of supported GPU type. + +```json +{ + "gpu-type": "nvidia-tesla-t4" +} +``` + +### Bypass Authentication Error + +In order to bypass authentication error, you can add make the following changes in `firebase_auth.go` file. + +```go +package drip_middleware + +func FirebaseMiddleware(entClient *ent.Client) echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(ctx echo.Context) error { + userDetails := &UserDetails{ + ID: "test-james-token-id", + Email: "test-james-email@gmail.com", + Name: "James", + } + + authdCtx := context.WithValue(ctx.Request().Context(), UserContextKey, userDetails) + ctx.SetRequest(ctx.Request().WithContext(authdCtx)) + newUserError := db.UpsertUser(ctx.Request().Context(), entClient, userDetails.ID, userDetails.Email, userDetails.Name) + if newUserError != nil { + log.Ctx(ctx).Info().Ctx(ctx.Request().Context()).Err(newUserError).Msg("error User upserted successfully.") + } + return next(ctx) + } + } +} + +``` + +### Machine Image Not Found + +We use a custom machine image to create VM instances. That machine image is specified in `docker-compose.yml` file. + +```yaml +MACHINE_IMAGE: "comfy-cloud-template-3" +``` + +If you are getting an error that the machine image is not found, you can create a new machine image by following the +steps below: + +**TODO**: explore steps to create machine image with comfy setup. + +For the purpose of just testing endpoints, you don't really need to worry about Comfy specific machine image. +You can simply create a new VM on the GCP console and use that VM's image to create a new machine image. +And then update the `docker-compose.yml` file with the new machine image name. + +## Clean Up Resources + +You can use this script to cleanup resources for specific user. + +```shell +`docker compose -f scripts/cleanup/docker-compose.cleanup.yml run --rm cleanup -u ` +``` diff --git a/app.yaml b/app.yaml new file mode 100644 index 0000000..ac0b6b3 --- /dev/null +++ b/app.yaml @@ -0,0 +1,14 @@ +runtime: go121 +env: standard + +instance_class: F1 # Default instance class. Consider changing it based on your needs + +handlers: + - url: /.* + script: auto + secure: always # Optional: Redirects HTTP to HTTPS. + +automatic_scaling: + min_idle_instances: automatic # Default is automatic (spins down completely) + max_idle_instances: automatic # Default is automatic + min_instances: 0 # Default is 0 diff --git a/cloudbuild.yaml b/cloudbuild.yaml new file mode 100644 index 0000000..34e379c --- /dev/null +++ b/cloudbuild.yaml @@ -0,0 +1,20 @@ +steps: + # build the container image + - name: "gcr.io/cloud-builders/docker" + args: ["build", "-t", "us-central1-docker.pkg.dev/dreamboothy/registry-backend/registry-backend-image:$SHORT_SHA", "."] + # push container image + - name: "gcr.io/cloud-builders/docker" + args: ["push", "us-central1-docker.pkg.dev/dreamboothy/registry-backend/registry-backend-image:$SHORT_SHA"] + # Publish the release + - name: 'gcr.io/google.com/cloudsdktool/cloud-sdk:458.0.1' + entrypoint: 'bash' + args: + - '-c' + - > + gcloud deploy releases create release-registry-backend-$SHORT_SHA + --project=dreamboothy + --region=us-central1 + --delivery-pipeline=comfy-backend-api-pipeline + --images=registry-backend-image-substitute=us-central1-docker.pkg.dev/dreamboothy/registry-backend/registry-backend-image:$SHORT_SHA +options: + machineType: 'E2_HIGHCPU_8' \ No newline at end of file diff --git a/clouddeploy.yaml b/clouddeploy.yaml new file mode 100644 index 0000000..7e2117e --- /dev/null +++ b/clouddeploy.yaml @@ -0,0 +1,29 @@ +apiVersion: deploy.cloud.google.com/v1 +kind: DeliveryPipeline +metadata: + name: comfy-backend-api-pipeline +description: main application pipeline +serialPipeline: + stages: + - targetId: staging-comfy-backend + profiles: [staging] + - targetId: prod-comfy-backend + profiles: [prod] +--- + +apiVersion: deploy.cloud.google.com/v1 +kind: Target +metadata: + name: staging-comfy-backend +description: Cloud Run development service +run: + location: projects/dreamboothy/locations/us-central1 +--- + +apiVersion: deploy.cloud.google.com/v1 +kind: Target +metadata: + name: prod-comfy-backend +description: Cloud Run production service +run: + location: projects/dreamboothy/locations/us-central1 \ No newline at end of file diff --git a/common/constants.go b/common/constants.go new file mode 100644 index 0000000..5d8504f --- /dev/null +++ b/common/constants.go @@ -0,0 +1,7 @@ +package common // or any other appropriate package name + +// TraceIDKey is the key used to store/retrieve the Trace ID from context +const TraceIDKey = "trace-id" + +// SpanIDKey is the key used to store/retrieve the Span ID from context +const SpanIDKey = "span-id" diff --git a/common/types.go b/common/types.go new file mode 100644 index 0000000..ffd64d1 --- /dev/null +++ b/common/types.go @@ -0,0 +1,15 @@ +// File: common/types.go + +package common + +type InviteTokenStatus string + +const ( + StatusUsed InviteTokenStatus = "Used" + StatusAvailable InviteTokenStatus = "Available" + StatusExpired InviteTokenStatus = "Expired" +) + +func (InviteTokenStatus) Values() (statuses []string) { + return []string{string(StatusUsed), string(StatusAvailable), string(StatusExpired)} +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..3508977 --- /dev/null +++ b/config/config.go @@ -0,0 +1,6 @@ +package config + +type Config struct { + ProjectID string + DripEnv string +} diff --git a/db/tx.go b/db/tx.go new file mode 100644 index 0000000..cb1cc19 --- /dev/null +++ b/db/tx.go @@ -0,0 +1,95 @@ +package db + +import ( + "context" + "fmt" + "registry-backend/ent" + + "github.com/rs/zerolog/log" +) + +// WithTxResult wraps the given function with a transaction. +// If the function returns an error, the transaction is rolled back. +func WithTxResult[T any](ctx context.Context, client *ent.Client, fn func(tx *ent.Tx) (T, error)) (T, error) { + var zero T + + // Start a new transaction + log.Ctx(ctx).Info().Msg("Starting transaction") + tx, err := client.Tx(ctx) + if err != nil { + return zero, err + } + + // Flag to keep track of transaction finalization + transactionCompleted := false + + defer func() { + if transactionCompleted { + return + } + + log.Ctx(ctx).Info().Msg("Transaction not completed, attempting to rollback") + if v := recover(); v != nil { + // Attempt to rollback on panic + err := tx.Rollback() // Ignore rollback error here as panic takes precedence + log.Ctx(ctx).Info().Msgf("Rollback failed: %v", err) + panic(v) + } + }() + + // Execute the function within the transaction + log.Ctx(ctx).Info().Msg("Executing function within transaction") + result, err := fn(tx) + if err != nil { + // Rollback transaction on error + log.Ctx(ctx).Info().Msgf("Rolling back transaction on error: %v", err) + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: rolling back transaction: %v", err, rerr) + } + return zero, err + } + + // Commit the transaction + log.Ctx(ctx).Info().Msg("Committing transaction") + if err := tx.Commit(); err != nil { + log.Ctx(ctx).Info().Msgf("Error committing transaction: %v", err) + return zero, fmt.Errorf("committing transaction: %w", err) + } + + // Mark the transaction as completed to prevent deferred rollback + log.Ctx(ctx).Info().Msg("Transaction completed successfully") + transactionCompleted = true + return result, nil +} + +func WithTx(ctx context.Context, client *ent.Client, fn func(tx *ent.Tx) error) error { + tx, err := client.Tx(ctx) + if err != nil { + return err + } + defer func() { + if v := recover(); v != nil { + tx.Rollback() + panic(v) + } + }() + if err := fn(tx); err != nil { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: rolling back transaction: %v", err, rerr) + } + return err + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("committing transaction: %w", err) + } + return nil +} + +// rollback calls to tx.Rollback and wraps the given error +// with the rollback error if occurred. +func rollback(tx *ent.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: %v", err, rerr) + } + return err +} diff --git a/db/user.go b/db/user.go new file mode 100644 index 0000000..87546b5 --- /dev/null +++ b/db/user.go @@ -0,0 +1,19 @@ +package db + +import ( + "context" + "registry-backend/ent" + "registry-backend/ent/user" +) + +// UpsertUser creates or updates a user in the database +func UpsertUser(ctx context.Context, client *ent.Client, firebaseUID string, email string, name string) error { + return client.User.Create(). + SetID(firebaseUID). + SetEmail(email). + SetName(name). + OnConflictColumns(user.FieldID). + UpdateEmail(). + UpdateName(). + Exec(ctx) +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..58d76c0 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,19 @@ +version: "3.8" + +services: + registry-backend: + image: cosmtrek/air + working_dir: /app + ports: + - "8080:8080" + volumes: + - ./:/app # Ensure this matches the working_dir + - $HOME/.config/gcloud/application_default_credentials.json:/tmp/keys/application_default_credentials.json + environment: + DRIP_ENV: localdev + DB_CONNECTION_STRING: "user=postgres password=postgres host=host.docker.internal port=54322 dbname=postgres" + GOOGLE_APPLICATION_CREDENTIALS: /tmp/keys/application_default_credentials.json # This will be set in prod by GCP. + GOOGLE_CLOUD_PROJECT: "dreamboothy-dev" # This will be set in prod by GCP. + PROJECT_ID: "dreamboothy-dev" + CORS_ORIGIN: "http://localhost:3000" + LOG_LEVEL: info # Set the log level here diff --git a/drip/api.gen.go b/drip/api.gen.go new file mode 100644 index 0000000..eaa9c8d --- /dev/null +++ b/drip/api.gen.go @@ -0,0 +1,3278 @@ +// Package drip provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen/v2 version v2.1.0 DO NOT EDIT. +package drip + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/labstack/echo/v4" + "github.com/oapi-codegen/runtime" + strictecho "github.com/oapi-codegen/runtime/strictmiddleware/echo" + openapi_types "github.com/oapi-codegen/runtime/types" +) + +const ( + BearerAuthScopes = "BearerAuth.Scopes" +) + +// ActionJobResult defines model for ActionJobResult. +type ActionJobResult struct { + // ActionRunId Identifier of the run this result belongs to + ActionRunId *string `json:"action_run_id,omitempty"` + + // CommitHash The hash of the commit + CommitHash *string `json:"commit_hash,omitempty"` + + // CommitId The ID of the commit + CommitId *string `json:"commit_id,omitempty"` + + // CommitMessage The message of the commit + CommitMessage *string `json:"commit_message,omitempty"` + + // CommitTime The Unix timestamp when the commit was made + CommitTime *int64 `json:"commit_time,omitempty"` + + // EndTime The end time of the job as a Unix timestamp. + EndTime *int64 `json:"end_time,omitempty"` + + // GitRepo The repository name + GitRepo *string `json:"git_repo,omitempty"` + + // GpuType GPU type used + GpuType *string `json:"gpu_type,omitempty"` + + // Id Unique identifier for the job result + Id *openapi_types.UUID `json:"id,omitempty"` + + // OperatingSystem Operating system used + OperatingSystem *string `json:"operating_system,omitempty"` + + // PytorchVersion PyTorch version used + PytorchVersion *string `json:"pytorch_version,omitempty"` + + // StartTime The start time of the job as a Unix timestamp. + StartTime *int64 `json:"start_time,omitempty"` + StorageFile *StorageFile `json:"storage_file,omitempty"` + + // WorkflowName Name of the workflow + WorkflowName *string `json:"workflow_name,omitempty"` +} + +// Error defines model for Error. +type Error struct { + // Details Optional detailed information about the error or hints for resolving it. + Details *[]string `json:"details,omitempty"` + + // Message A clear and concise description of the error. + Message *string `json:"message,omitempty"` +} + +// ErrorResponse defines model for ErrorResponse. +type ErrorResponse struct { + Error string `json:"error"` + Message string `json:"message"` +} + +// Node defines model for Node. +type Node struct { + Author *string `json:"author,omitempty"` + Description *string `json:"description,omitempty"` + + // Downloads The number of downloads of the node. + Downloads *int `json:"downloads,omitempty"` + + // Icon URL to the node's icon. + Icon *string `json:"icon,omitempty"` + + // Id The unique identifier of the node. + Id *string `json:"id,omitempty"` + LatestVersion *NodeVersion `json:"latest_version,omitempty"` + + // License The path to the LICENSE file in the node's repository. + License *string `json:"license,omitempty"` + + // Name The display name of the node. + Name *string `json:"name,omitempty"` + Publisher *Publisher `json:"publisher,omitempty"` + + // Rating The average rating of the node. + Rating *float32 `json:"rating,omitempty"` + + // Repository URL to the node's repository. + Repository *string `json:"repository,omitempty"` + Tags *[]string `json:"tags,omitempty"` +} + +// NodeVersion defines model for NodeVersion. +type NodeVersion struct { + // Changelog Summary of changes made in this version + Changelog *string `json:"changelog,omitempty"` + + // CreatedAt The date and time the version was created. + CreatedAt *time.Time `json:"createdAt,omitempty"` + + // Dependencies A list of pip dependencies required by the node. + Dependencies *[]string `json:"dependencies,omitempty"` + + // Deprecated Indicates if this version is deprecated. + Deprecated *bool `json:"deprecated,omitempty"` + + // DownloadUrl [Output Only] URL to download this version of the node + DownloadUrl *string `json:"downloadUrl,omitempty"` + Id *string `json:"id,omitempty"` + + // Version The version identifier, following semantic versioning. Must be unique for the node. + Version *string `json:"version,omitempty"` +} + +// NodeVersionUpdateRequest defines model for NodeVersionUpdateRequest. +type NodeVersionUpdateRequest struct { + // Changelog The changelog describing the version changes. + Changelog *string `json:"changelog,omitempty"` + + // Deprecated Whether the version is deprecated. + Deprecated *bool `json:"deprecated,omitempty"` +} + +// PersonalAccessToken defines model for PersonalAccessToken. +type PersonalAccessToken struct { + // CreatedAt [Output Only]The date and time the token was created. + CreatedAt *time.Time `json:"createdAt,omitempty"` + + // Description Optional. A more detailed description of the token's intended use. + Description *string `json:"description,omitempty"` + + // Id Unique identifier for the GitCommit + Id *openapi_types.UUID `json:"id,omitempty"` + + // Name Required. The name of the token. Can be a simple description. + Name *string `json:"name,omitempty"` + + // Token [Output Only]. The personal access token. Only returned during creation. + Token *string `json:"token,omitempty"` +} + +// Publisher defines model for Publisher. +type Publisher struct { + // CreatedAt The date and time the publisher was created. + CreatedAt *time.Time `json:"createdAt,omitempty"` + Description *string `json:"description,omitempty"` + + // Id The unique identifier for the publisher. It's akin to a username. Should be lowercase. + Id *string `json:"id,omitempty"` + + // Logo URL to the publisher's logo. + Logo *string `json:"logo,omitempty"` + + // Members A list of members in the publisher. + Members *[]PublisherMember `json:"members,omitempty"` + Name *string `json:"name,omitempty"` + SourceCodeRepo *string `json:"source_code_repo,omitempty"` + Support *string `json:"support,omitempty"` + Website *string `json:"website,omitempty"` +} + +// PublisherMember defines model for PublisherMember. +type PublisherMember struct { + // Id The unique identifier for the publisher member. + Id *string `json:"id,omitempty"` + + // Role The role of the user in the publisher. + Role *string `json:"role,omitempty"` + User *PublisherUser `json:"user,omitempty"` +} + +// PublisherUser defines model for PublisherUser. +type PublisherUser struct { + // Email The email address for this user. + Email *string `json:"email,omitempty"` + + // Id The unique id for this user. + Id *string `json:"id,omitempty"` + + // Name The name for this user. + Name *string `json:"name,omitempty"` +} + +// StorageFile defines model for StorageFile. +type StorageFile struct { + // FilePath Path to the file in storage + FilePath *string `json:"file_path,omitempty"` + + // Id Unique identifier for the storage file + Id *openapi_types.UUID `json:"id,omitempty"` + + // PublicUrl Public URL + PublicUrl *string `json:"public_url,omitempty"` +} + +// User defines model for User. +type User struct { + // Email The email address for this user. + Email *string `json:"email,omitempty"` + + // Id The unique id for this user. + Id *string `json:"id,omitempty"` + + // IsAdmin Indicates if the user has admin privileges. + IsAdmin *bool `json:"isAdmin,omitempty"` + + // IsApproved Indicates if the user is approved. + IsApproved *bool `json:"isApproved,omitempty"` + + // Name The name for this user. + Name *string `json:"name,omitempty"` +} + +// GetBranchParams defines parameters for GetBranch. +type GetBranchParams struct { + // RepoName The repo to filter by. + RepoName string `form:"repo_name" json:"repo_name"` +} + +// GetGitcommitParams defines parameters for GetGitcommit. +type GetGitcommitParams struct { + // CommitId The ID of the commit to fetch data for. + CommitId *string `form:"commitId,omitempty" json:"commitId,omitempty"` + + // OperatingSystem The operating system to filter the CI data by. + OperatingSystem *string `form:"operatingSystem,omitempty" json:"operatingSystem,omitempty"` + + // WorkflowName The name of the workflow to filter the CI data by. + WorkflowName *string `form:"workflowName,omitempty" json:"workflowName,omitempty"` + + // Branch The branch of the gitcommit to filter the CI data by. + Branch *string `form:"branch,omitempty" json:"branch,omitempty"` + + // Page The page number to retrieve. + Page *int `form:"page,omitempty" json:"page,omitempty"` + + // PageSize The number of items to include per page. + PageSize *int `form:"pageSize,omitempty" json:"pageSize,omitempty"` + + // RepoName The repo to filter by. + RepoName *string `form:"repoName,omitempty" json:"repoName,omitempty"` +} + +// ListAllNodesParams defines parameters for ListAllNodes. +type ListAllNodesParams struct { + // Page Page number of the nodes list + Page *int `form:"page,omitempty" json:"page,omitempty"` + + // Limit Number of nodes to return per page + Limit *int `form:"limit,omitempty" json:"limit,omitempty"` +} + +// InstallNodeParams defines parameters for InstallNode. +type InstallNodeParams struct { + // Version Specific version of the node to retrieve. If omitted, the latest version is returned. + Version *string `form:"version,omitempty" json:"version,omitempty"` +} + +// ValidatePublisherParams defines parameters for ValidatePublisher. +type ValidatePublisherParams struct { + // Username The publisher username to validate. + Username string `form:"username" json:"username"` +} + +// PublishNodeVersionJSONBody defines parameters for PublishNodeVersion. +type PublishNodeVersionJSONBody struct { + Node Node `json:"node"` + NodeVersion NodeVersion `json:"node_version"` + PersonalAccessToken string `json:"personal_access_token"` +} + +// PostUploadArtifactJSONBody defines parameters for PostUploadArtifact. +type PostUploadArtifactJSONBody struct { + BranchName string `json:"branch_name"` + + // BucketName The name of the bucket where the output files are stored + BucketName *string `json:"bucket_name,omitempty"` + + // ComfyLogsGcsPath The path to ComfyUI logs. eg. gs://bucket-name/logs + ComfyLogsGcsPath *string `json:"comfy_logs_gcs_path,omitempty"` + CommitHash string `json:"commit_hash"` + + // CommitMessage The commit message + CommitMessage string `json:"commit_message"` + + // CommitTime The time of the commit in the format of "YYYY-MM-DDTHH:MM:SSZ" (2016-10-10T00:00:00Z) + CommitTime string `json:"commit_time"` + + // CudaVersion Cuda version. + CudaVersion *string `json:"cuda_version,omitempty"` + + // EndTime The end time of the job as a Unix timestamp. + EndTime int64 `json:"end_time"` + + // JobId Unique identifier for the job + JobId string `json:"job_id"` + + // Os Operating system used in the run + Os string `json:"os"` + + // OutputFilesGcsPaths A comma separated string that contains GCS path(s) to output files. eg. gs://bucket-name/output, gs://bucket-name/output2 + OutputFilesGcsPaths *string `json:"output_files_gcs_paths,omitempty"` + + // Repo Repository name + Repo string `json:"repo"` + + // RunId Unique identifier for the run + RunId string `json:"run_id"` + + // StartTime The start time of the job as a Unix timestamp. + StartTime int64 `json:"start_time"` + + // WorkflowName The name of the workflow + WorkflowName string `json:"workflow_name"` +} + +// CreatePublisherJSONRequestBody defines body for CreatePublisher for application/json ContentType. +type CreatePublisherJSONRequestBody = Publisher + +// UpdatePublisherJSONRequestBody defines body for UpdatePublisher for application/json ContentType. +type UpdatePublisherJSONRequestBody = Publisher + +// CreateNodeJSONRequestBody defines body for CreateNode for application/json ContentType. +type CreateNodeJSONRequestBody = Node + +// UpdateNodeJSONRequestBody defines body for UpdateNode for application/json ContentType. +type UpdateNodeJSONRequestBody = Node + +// PublishNodeVersionJSONRequestBody defines body for PublishNodeVersion for application/json ContentType. +type PublishNodeVersionJSONRequestBody PublishNodeVersionJSONBody + +// UpdateNodeVersionJSONRequestBody defines body for UpdateNodeVersion for application/json ContentType. +type UpdateNodeVersionJSONRequestBody = NodeVersionUpdateRequest + +// CreatePersonalAccessTokenJSONRequestBody defines body for CreatePersonalAccessToken for application/json ContentType. +type CreatePersonalAccessTokenJSONRequestBody = PersonalAccessToken + +// PostUploadArtifactJSONRequestBody defines body for PostUploadArtifact for application/json ContentType. +type PostUploadArtifactJSONRequestBody PostUploadArtifactJSONBody + +// ServerInterface represents all server handlers. +type ServerInterface interface { + // Retrieve all distinct branches for a given repo + // (GET /branch) + GetBranch(ctx echo.Context, params GetBranchParams) error + // Retrieve CI data for a given commit + // (GET /gitcommit) + GetGitcommit(ctx echo.Context, params GetGitcommitParams) error + // Retrieves a list of nodes + // (GET /nodes) + ListAllNodes(ctx echo.Context, params ListAllNodesParams) error + // Retrieve a specific node by ID + // (GET /nodes/{nodeId}) + GetNode(ctx echo.Context, nodeId string) error + // Returns a node version to be installed. + // (GET /nodes/{nodeId}/install) + InstallNode(ctx echo.Context, nodeId string, params InstallNodeParams) error + // List all versions of a node + // (GET /nodes/{nodeId}/versions) + ListNodeVersions(ctx echo.Context, nodeId string) error + // Retrieve a specific version of a node + // (GET /nodes/{nodeId}/versions/{versionId}) + GetNodeVersion(ctx echo.Context, nodeId string, versionId string) error + // Retrieve all publishers + // (GET /publishers) + ListPublishers(ctx echo.Context) error + // Create a new publisher + // (POST /publishers) + CreatePublisher(ctx echo.Context) error + // Validate if a publisher username is available + // (GET /publishers/validate) + ValidatePublisher(ctx echo.Context, params ValidatePublisherParams) error + // Delete a publisher + // (DELETE /publishers/{publisherId}) + DeletePublisher(ctx echo.Context, publisherId string) error + // Retrieve a publisher by ID + // (GET /publishers/{publisherId}) + GetPublisher(ctx echo.Context, publisherId string) error + // Update a publisher + // (PUT /publishers/{publisherId}) + UpdatePublisher(ctx echo.Context, publisherId string) error + // Retrieve all nodes + // (GET /publishers/{publisherId}/nodes) + ListNodesForPublisher(ctx echo.Context, publisherId string) error + // Create a new custom node + // (POST /publishers/{publisherId}/nodes) + CreateNode(ctx echo.Context, publisherId string) error + // Delete a specific node + // (DELETE /publishers/{publisherId}/nodes/{nodeId}) + DeleteNode(ctx echo.Context, publisherId string, nodeId string) error + // Update a specific node + // (PUT /publishers/{publisherId}/nodes/{nodeId}) + UpdateNode(ctx echo.Context, publisherId string, nodeId string) error + // Retrieve permissions the user has for a given publisher + // (GET /publishers/{publisherId}/nodes/{nodeId}/permissions) + GetPermissionOnPublisherNodes(ctx echo.Context, publisherId string, nodeId string) error + // Publish a new version of a node + // (POST /publishers/{publisherId}/nodes/{nodeId}/versions) + PublishNodeVersion(ctx echo.Context, publisherId string, nodeId string) error + // Unpublish (delete) a specific version of a node + // (DELETE /publishers/{publisherId}/nodes/{nodeId}/versions/{versionId}) + DeleteNodeVersion(ctx echo.Context, publisherId string, nodeId string, versionId string) error + // Update changelog and deprecation status of a node version + // (PUT /publishers/{publisherId}/nodes/{nodeId}/versions/{versionId}) + UpdateNodeVersion(ctx echo.Context, publisherId string, nodeId string, versionId string) error + // Retrieve permissions the user has for a given publisher + // (GET /publishers/{publisherId}/permissions) + GetPermissionOnPublisher(ctx echo.Context, publisherId string) error + // Retrieve all personal access tokens for a publisher + // (GET /publishers/{publisherId}/tokens) + ListPersonalAccessTokens(ctx echo.Context, publisherId string) error + // Create a new personal access token + // (POST /publishers/{publisherId}/tokens) + CreatePersonalAccessToken(ctx echo.Context, publisherId string) error + // Delete a specific personal access token + // (DELETE /publishers/{publisherId}/tokens/{tokenId}) + DeletePersonalAccessToken(ctx echo.Context, publisherId string, tokenId string) error + // Receive artifacts (output files) from the ComfyUI GitHub Action + // (POST /upload-artifact) + PostUploadArtifact(ctx echo.Context) error + // Get information about the calling user. + // (GET /users) + GetUser(ctx echo.Context) error + // Retrieve all publishers for a given user + // (GET /users/publishers/) + ListPublishersForUser(ctx echo.Context) error +} + +// ServerInterfaceWrapper converts echo contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface +} + +// GetBranch converts echo context to params. +func (w *ServerInterfaceWrapper) GetBranch(ctx echo.Context) error { + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params GetBranchParams + // ------------- Required query parameter "repo_name" ------------- + + err = runtime.BindQueryParameter("form", true, true, "repo_name", ctx.QueryParams(), ¶ms.RepoName) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter repo_name: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetBranch(ctx, params) + return err +} + +// GetGitcommit converts echo context to params. +func (w *ServerInterfaceWrapper) GetGitcommit(ctx echo.Context) error { + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params GetGitcommitParams + // ------------- Optional query parameter "commitId" ------------- + + err = runtime.BindQueryParameter("form", true, false, "commitId", ctx.QueryParams(), ¶ms.CommitId) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter commitId: %s", err)) + } + + // ------------- Optional query parameter "operatingSystem" ------------- + + err = runtime.BindQueryParameter("form", true, false, "operatingSystem", ctx.QueryParams(), ¶ms.OperatingSystem) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter operatingSystem: %s", err)) + } + + // ------------- Optional query parameter "workflowName" ------------- + + err = runtime.BindQueryParameter("form", true, false, "workflowName", ctx.QueryParams(), ¶ms.WorkflowName) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter workflowName: %s", err)) + } + + // ------------- Optional query parameter "branch" ------------- + + err = runtime.BindQueryParameter("form", true, false, "branch", ctx.QueryParams(), ¶ms.Branch) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter branch: %s", err)) + } + + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", ctx.QueryParams(), ¶ms.Page) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter page: %s", err)) + } + + // ------------- Optional query parameter "pageSize" ------------- + + err = runtime.BindQueryParameter("form", true, false, "pageSize", ctx.QueryParams(), ¶ms.PageSize) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter pageSize: %s", err)) + } + + // ------------- Optional query parameter "repoName" ------------- + + err = runtime.BindQueryParameter("form", true, false, "repoName", ctx.QueryParams(), ¶ms.RepoName) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter repoName: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetGitcommit(ctx, params) + return err +} + +// ListAllNodes converts echo context to params. +func (w *ServerInterfaceWrapper) ListAllNodes(ctx echo.Context) error { + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params ListAllNodesParams + // ------------- Optional query parameter "page" ------------- + + err = runtime.BindQueryParameter("form", true, false, "page", ctx.QueryParams(), ¶ms.Page) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter page: %s", err)) + } + + // ------------- Optional query parameter "limit" ------------- + + err = runtime.BindQueryParameter("form", true, false, "limit", ctx.QueryParams(), ¶ms.Limit) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter limit: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListAllNodes(ctx, params) + return err +} + +// GetNode converts echo context to params. +func (w *ServerInterfaceWrapper) GetNode(ctx echo.Context) error { + var err error + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetNode(ctx, nodeId) + return err +} + +// InstallNode converts echo context to params. +func (w *ServerInterfaceWrapper) InstallNode(ctx echo.Context) error { + var err error + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // Parameter object where we will unmarshal all parameters from the context + var params InstallNodeParams + // ------------- Optional query parameter "version" ------------- + + err = runtime.BindQueryParameter("form", true, false, "version", ctx.QueryParams(), ¶ms.Version) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter version: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.InstallNode(ctx, nodeId, params) + return err +} + +// ListNodeVersions converts echo context to params. +func (w *ServerInterfaceWrapper) ListNodeVersions(ctx echo.Context) error { + var err error + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListNodeVersions(ctx, nodeId) + return err +} + +// GetNodeVersion converts echo context to params. +func (w *ServerInterfaceWrapper) GetNodeVersion(ctx echo.Context) error { + var err error + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // ------------- Path parameter "versionId" ------------- + var versionId string + + err = runtime.BindStyledParameterWithOptions("simple", "versionId", ctx.Param("versionId"), &versionId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter versionId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetNodeVersion(ctx, nodeId, versionId) + return err +} + +// ListPublishers converts echo context to params. +func (w *ServerInterfaceWrapper) ListPublishers(ctx echo.Context) error { + var err error + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListPublishers(ctx) + return err +} + +// CreatePublisher converts echo context to params. +func (w *ServerInterfaceWrapper) CreatePublisher(ctx echo.Context) error { + var err error + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.CreatePublisher(ctx) + return err +} + +// ValidatePublisher converts echo context to params. +func (w *ServerInterfaceWrapper) ValidatePublisher(ctx echo.Context) error { + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params ValidatePublisherParams + // ------------- Required query parameter "username" ------------- + + err = runtime.BindQueryParameter("form", true, true, "username", ctx.QueryParams(), ¶ms.Username) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter username: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ValidatePublisher(ctx, params) + return err +} + +// DeletePublisher converts echo context to params. +func (w *ServerInterfaceWrapper) DeletePublisher(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.DeletePublisher(ctx, publisherId) + return err +} + +// GetPublisher converts echo context to params. +func (w *ServerInterfaceWrapper) GetPublisher(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetPublisher(ctx, publisherId) + return err +} + +// UpdatePublisher converts echo context to params. +func (w *ServerInterfaceWrapper) UpdatePublisher(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.UpdatePublisher(ctx, publisherId) + return err +} + +// ListNodesForPublisher converts echo context to params. +func (w *ServerInterfaceWrapper) ListNodesForPublisher(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListNodesForPublisher(ctx, publisherId) + return err +} + +// CreateNode converts echo context to params. +func (w *ServerInterfaceWrapper) CreateNode(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.CreateNode(ctx, publisherId) + return err +} + +// DeleteNode converts echo context to params. +func (w *ServerInterfaceWrapper) DeleteNode(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.DeleteNode(ctx, publisherId, nodeId) + return err +} + +// UpdateNode converts echo context to params. +func (w *ServerInterfaceWrapper) UpdateNode(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.UpdateNode(ctx, publisherId, nodeId) + return err +} + +// GetPermissionOnPublisherNodes converts echo context to params. +func (w *ServerInterfaceWrapper) GetPermissionOnPublisherNodes(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetPermissionOnPublisherNodes(ctx, publisherId, nodeId) + return err +} + +// PublishNodeVersion converts echo context to params. +func (w *ServerInterfaceWrapper) PublishNodeVersion(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.PublishNodeVersion(ctx, publisherId, nodeId) + return err +} + +// DeleteNodeVersion converts echo context to params. +func (w *ServerInterfaceWrapper) DeleteNodeVersion(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // ------------- Path parameter "versionId" ------------- + var versionId string + + err = runtime.BindStyledParameterWithOptions("simple", "versionId", ctx.Param("versionId"), &versionId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter versionId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.DeleteNodeVersion(ctx, publisherId, nodeId, versionId) + return err +} + +// UpdateNodeVersion converts echo context to params. +func (w *ServerInterfaceWrapper) UpdateNodeVersion(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "nodeId" ------------- + var nodeId string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeId", ctx.Param("nodeId"), &nodeId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter nodeId: %s", err)) + } + + // ------------- Path parameter "versionId" ------------- + var versionId string + + err = runtime.BindStyledParameterWithOptions("simple", "versionId", ctx.Param("versionId"), &versionId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter versionId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.UpdateNodeVersion(ctx, publisherId, nodeId, versionId) + return err +} + +// GetPermissionOnPublisher converts echo context to params. +func (w *ServerInterfaceWrapper) GetPermissionOnPublisher(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetPermissionOnPublisher(ctx, publisherId) + return err +} + +// ListPersonalAccessTokens converts echo context to params. +func (w *ServerInterfaceWrapper) ListPersonalAccessTokens(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListPersonalAccessTokens(ctx, publisherId) + return err +} + +// CreatePersonalAccessToken converts echo context to params. +func (w *ServerInterfaceWrapper) CreatePersonalAccessToken(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.CreatePersonalAccessToken(ctx, publisherId) + return err +} + +// DeletePersonalAccessToken converts echo context to params. +func (w *ServerInterfaceWrapper) DeletePersonalAccessToken(ctx echo.Context) error { + var err error + // ------------- Path parameter "publisherId" ------------- + var publisherId string + + err = runtime.BindStyledParameterWithOptions("simple", "publisherId", ctx.Param("publisherId"), &publisherId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter publisherId: %s", err)) + } + + // ------------- Path parameter "tokenId" ------------- + var tokenId string + + err = runtime.BindStyledParameterWithOptions("simple", "tokenId", ctx.Param("tokenId"), &tokenId, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tokenId: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.DeletePersonalAccessToken(ctx, publisherId, tokenId) + return err +} + +// PostUploadArtifact converts echo context to params. +func (w *ServerInterfaceWrapper) PostUploadArtifact(ctx echo.Context) error { + var err error + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.PostUploadArtifact(ctx) + return err +} + +// GetUser converts echo context to params. +func (w *ServerInterfaceWrapper) GetUser(ctx echo.Context) error { + var err error + + ctx.Set(BearerAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.GetUser(ctx) + return err +} + +// ListPublishersForUser converts echo context to params. +func (w *ServerInterfaceWrapper) ListPublishersForUser(ctx echo.Context) error { + var err error + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.ListPublishersForUser(ctx) + return err +} + +// This is a simple interface which specifies echo.Route addition functions which +// are present on both echo.Echo and echo.Group, since we want to allow using +// either of them for path registration +type EchoRouter interface { + CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route + TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route +} + +// RegisterHandlers adds each server route to the EchoRouter. +func RegisterHandlers(router EchoRouter, si ServerInterface) { + RegisterHandlersWithBaseURL(router, si, "") +} + +// Registers handlers, and prepends BaseURL to the paths, so that the paths +// can be served under a prefix. +func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string) { + + wrapper := ServerInterfaceWrapper{ + Handler: si, + } + + router.GET(baseURL+"/branch", wrapper.GetBranch) + router.GET(baseURL+"/gitcommit", wrapper.GetGitcommit) + router.GET(baseURL+"/nodes", wrapper.ListAllNodes) + router.GET(baseURL+"/nodes/:nodeId", wrapper.GetNode) + router.GET(baseURL+"/nodes/:nodeId/install", wrapper.InstallNode) + router.GET(baseURL+"/nodes/:nodeId/versions", wrapper.ListNodeVersions) + router.GET(baseURL+"/nodes/:nodeId/versions/:versionId", wrapper.GetNodeVersion) + router.GET(baseURL+"/publishers", wrapper.ListPublishers) + router.POST(baseURL+"/publishers", wrapper.CreatePublisher) + router.GET(baseURL+"/publishers/validate", wrapper.ValidatePublisher) + router.DELETE(baseURL+"/publishers/:publisherId", wrapper.DeletePublisher) + router.GET(baseURL+"/publishers/:publisherId", wrapper.GetPublisher) + router.PUT(baseURL+"/publishers/:publisherId", wrapper.UpdatePublisher) + router.GET(baseURL+"/publishers/:publisherId/nodes", wrapper.ListNodesForPublisher) + router.POST(baseURL+"/publishers/:publisherId/nodes", wrapper.CreateNode) + router.DELETE(baseURL+"/publishers/:publisherId/nodes/:nodeId", wrapper.DeleteNode) + router.PUT(baseURL+"/publishers/:publisherId/nodes/:nodeId", wrapper.UpdateNode) + router.GET(baseURL+"/publishers/:publisherId/nodes/:nodeId/permissions", wrapper.GetPermissionOnPublisherNodes) + router.POST(baseURL+"/publishers/:publisherId/nodes/:nodeId/versions", wrapper.PublishNodeVersion) + router.DELETE(baseURL+"/publishers/:publisherId/nodes/:nodeId/versions/:versionId", wrapper.DeleteNodeVersion) + router.PUT(baseURL+"/publishers/:publisherId/nodes/:nodeId/versions/:versionId", wrapper.UpdateNodeVersion) + router.GET(baseURL+"/publishers/:publisherId/permissions", wrapper.GetPermissionOnPublisher) + router.GET(baseURL+"/publishers/:publisherId/tokens", wrapper.ListPersonalAccessTokens) + router.POST(baseURL+"/publishers/:publisherId/tokens", wrapper.CreatePersonalAccessToken) + router.DELETE(baseURL+"/publishers/:publisherId/tokens/:tokenId", wrapper.DeletePersonalAccessToken) + router.POST(baseURL+"/upload-artifact", wrapper.PostUploadArtifact) + router.GET(baseURL+"/users", wrapper.GetUser) + router.GET(baseURL+"/users/publishers/", wrapper.ListPublishersForUser) + +} + +type GetBranchRequestObject struct { + Params GetBranchParams +} + +type GetBranchResponseObject interface { + VisitGetBranchResponse(w http.ResponseWriter) error +} + +type GetBranch200JSONResponse struct { + Branches *[]string `json:"branches,omitempty"` +} + +func (response GetBranch200JSONResponse) VisitGetBranchResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetBranch404Response struct { +} + +func (response GetBranch404Response) VisitGetBranchResponse(w http.ResponseWriter) error { + w.WriteHeader(404) + return nil +} + +type GetBranch500Response struct { +} + +func (response GetBranch500Response) VisitGetBranchResponse(w http.ResponseWriter) error { + w.WriteHeader(500) + return nil +} + +type GetGitcommitRequestObject struct { + Params GetGitcommitParams +} + +type GetGitcommitResponseObject interface { + VisitGetGitcommitResponse(w http.ResponseWriter) error +} + +type GetGitcommit200JSONResponse struct { + JobResults *[]ActionJobResult `json:"jobResults,omitempty"` + TotalNumberOfPages *int `json:"totalNumberOfPages,omitempty"` +} + +func (response GetGitcommit200JSONResponse) VisitGetGitcommitResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetGitcommit404Response struct { +} + +func (response GetGitcommit404Response) VisitGetGitcommitResponse(w http.ResponseWriter) error { + w.WriteHeader(404) + return nil +} + +type GetGitcommit500Response struct { +} + +func (response GetGitcommit500Response) VisitGetGitcommitResponse(w http.ResponseWriter) error { + w.WriteHeader(500) + return nil +} + +type ListAllNodesRequestObject struct { + Params ListAllNodesParams +} + +type ListAllNodesResponseObject interface { + VisitListAllNodesResponse(w http.ResponseWriter) error +} + +type ListAllNodes200JSONResponse struct { + // Limit Maximum number of nodes per page + Limit *int `json:"limit,omitempty"` + Nodes *[]Node `json:"nodes,omitempty"` + + // Page Current page number + Page *int `json:"page,omitempty"` + + // Total Total number of nodes available + Total *int `json:"total,omitempty"` + + // TotalPages Total number of pages available + TotalPages *int `json:"totalPages,omitempty"` +} + +func (response ListAllNodes200JSONResponse) VisitListAllNodesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListAllNodes400Response struct { +} + +func (response ListAllNodes400Response) VisitListAllNodesResponse(w http.ResponseWriter) error { + w.WriteHeader(400) + return nil +} + +type ListAllNodes404Response struct { +} + +func (response ListAllNodes404Response) VisitListAllNodesResponse(w http.ResponseWriter) error { + w.WriteHeader(404) + return nil +} + +type ListAllNodes500JSONResponse ErrorResponse + +func (response ListAllNodes500JSONResponse) VisitListAllNodesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetNodeRequestObject struct { + NodeId string `json:"nodeId"` +} + +type GetNodeResponseObject interface { + VisitGetNodeResponse(w http.ResponseWriter) error +} + +type GetNode200JSONResponse Node + +func (response GetNode200JSONResponse) VisitGetNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetNode403JSONResponse ErrorResponse + +func (response GetNode403JSONResponse) VisitGetNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type GetNode404JSONResponse ErrorResponse + +func (response GetNode404JSONResponse) VisitGetNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetNode500JSONResponse ErrorResponse + +func (response GetNode500JSONResponse) VisitGetNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type InstallNodeRequestObject struct { + NodeId string `json:"nodeId"` + Params InstallNodeParams +} + +type InstallNodeResponseObject interface { + VisitInstallNodeResponse(w http.ResponseWriter) error +} + +type InstallNode200JSONResponse NodeVersion + +func (response InstallNode200JSONResponse) VisitInstallNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type InstallNode400JSONResponse ErrorResponse + +func (response InstallNode400JSONResponse) VisitInstallNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type InstallNode403JSONResponse ErrorResponse + +func (response InstallNode403JSONResponse) VisitInstallNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type InstallNode404JSONResponse ErrorResponse + +func (response InstallNode404JSONResponse) VisitInstallNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type InstallNode500JSONResponse ErrorResponse + +func (response InstallNode500JSONResponse) VisitInstallNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodeVersionsRequestObject struct { + NodeId string `json:"nodeId"` +} + +type ListNodeVersionsResponseObject interface { + VisitListNodeVersionsResponse(w http.ResponseWriter) error +} + +type ListNodeVersions200JSONResponse []NodeVersion + +func (response ListNodeVersions200JSONResponse) VisitListNodeVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodeVersions404JSONResponse Error + +func (response ListNodeVersions404JSONResponse) VisitListNodeVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodeVersions500JSONResponse ErrorResponse + +func (response ListNodeVersions500JSONResponse) VisitListNodeVersionsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetNodeVersionRequestObject struct { + NodeId string `json:"nodeId"` + VersionId string `json:"versionId"` +} + +type GetNodeVersionResponseObject interface { + VisitGetNodeVersionResponse(w http.ResponseWriter) error +} + +type GetNodeVersion200JSONResponse NodeVersion + +func (response GetNodeVersion200JSONResponse) VisitGetNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetNodeVersion404JSONResponse Error + +func (response GetNodeVersion404JSONResponse) VisitGetNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetNodeVersion500JSONResponse ErrorResponse + +func (response GetNodeVersion500JSONResponse) VisitGetNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ListPublishersRequestObject struct { +} + +type ListPublishersResponseObject interface { + VisitListPublishersResponse(w http.ResponseWriter) error +} + +type ListPublishers200JSONResponse []Publisher + +func (response ListPublishers200JSONResponse) VisitListPublishersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListPublishers400JSONResponse ErrorResponse + +func (response ListPublishers400JSONResponse) VisitListPublishersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ListPublishers500JSONResponse ErrorResponse + +func (response ListPublishers500JSONResponse) VisitListPublishersResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePublisherRequestObject struct { + Body *CreatePublisherJSONRequestBody +} + +type CreatePublisherResponseObject interface { + VisitCreatePublisherResponse(w http.ResponseWriter) error +} + +type CreatePublisher201JSONResponse Publisher + +func (response CreatePublisher201JSONResponse) VisitCreatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(201) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePublisher400JSONResponse ErrorResponse + +func (response CreatePublisher400JSONResponse) VisitCreatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePublisher401Response struct { +} + +func (response CreatePublisher401Response) VisitCreatePublisherResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type CreatePublisher403JSONResponse ErrorResponse + +func (response CreatePublisher403JSONResponse) VisitCreatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePublisher500JSONResponse ErrorResponse + +func (response CreatePublisher500JSONResponse) VisitCreatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ValidatePublisherRequestObject struct { + Params ValidatePublisherParams +} + +type ValidatePublisherResponseObject interface { + VisitValidatePublisherResponse(w http.ResponseWriter) error +} + +type ValidatePublisher200JSONResponse struct { + // IsAvailable True if the username is available, false otherwise. + IsAvailable *bool `json:"isAvailable,omitempty"` +} + +func (response ValidatePublisher200JSONResponse) VisitValidatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ValidatePublisher400JSONResponse ErrorResponse + +func (response ValidatePublisher400JSONResponse) VisitValidatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ValidatePublisher500JSONResponse ErrorResponse + +func (response ValidatePublisher500JSONResponse) VisitValidatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type DeletePublisherRequestObject struct { + PublisherId string `json:"publisherId"` +} + +type DeletePublisherResponseObject interface { + VisitDeletePublisherResponse(w http.ResponseWriter) error +} + +type DeletePublisher204Response struct { +} + +func (response DeletePublisher204Response) VisitDeletePublisherResponse(w http.ResponseWriter) error { + w.WriteHeader(204) + return nil +} + +type DeletePublisher404JSONResponse ErrorResponse + +func (response DeletePublisher404JSONResponse) VisitDeletePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type DeletePublisher500JSONResponse ErrorResponse + +func (response DeletePublisher500JSONResponse) VisitDeletePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetPublisherRequestObject struct { + PublisherId string `json:"publisherId"` +} + +type GetPublisherResponseObject interface { + VisitGetPublisherResponse(w http.ResponseWriter) error +} + +type GetPublisher200JSONResponse Publisher + +func (response GetPublisher200JSONResponse) VisitGetPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetPublisher404JSONResponse ErrorResponse + +func (response GetPublisher404JSONResponse) VisitGetPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetPublisher500JSONResponse ErrorResponse + +func (response GetPublisher500JSONResponse) VisitGetPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type UpdatePublisherRequestObject struct { + PublisherId string `json:"publisherId"` + Body *UpdatePublisherJSONRequestBody +} + +type UpdatePublisherResponseObject interface { + VisitUpdatePublisherResponse(w http.ResponseWriter) error +} + +type UpdatePublisher200JSONResponse Publisher + +func (response UpdatePublisher200JSONResponse) VisitUpdatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type UpdatePublisher400JSONResponse ErrorResponse + +func (response UpdatePublisher400JSONResponse) VisitUpdatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type UpdatePublisher401Response struct { +} + +func (response UpdatePublisher401Response) VisitUpdatePublisherResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type UpdatePublisher404JSONResponse ErrorResponse + +func (response UpdatePublisher404JSONResponse) VisitUpdatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UpdatePublisher500JSONResponse ErrorResponse + +func (response UpdatePublisher500JSONResponse) VisitUpdatePublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodesForPublisherRequestObject struct { + PublisherId string `json:"publisherId"` +} + +type ListNodesForPublisherResponseObject interface { + VisitListNodesForPublisherResponse(w http.ResponseWriter) error +} + +type ListNodesForPublisher200JSONResponse []Node + +func (response ListNodesForPublisher200JSONResponse) VisitListNodesForPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodesForPublisher400JSONResponse ErrorResponse + +func (response ListNodesForPublisher400JSONResponse) VisitListNodesForPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ListNodesForPublisher500JSONResponse ErrorResponse + +func (response ListNodesForPublisher500JSONResponse) VisitListNodesForPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type CreateNodeRequestObject struct { + PublisherId string `json:"publisherId"` + Body *CreateNodeJSONRequestBody +} + +type CreateNodeResponseObject interface { + VisitCreateNodeResponse(w http.ResponseWriter) error +} + +type CreateNode201JSONResponse Node + +func (response CreateNode201JSONResponse) VisitCreateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(201) + + return json.NewEncoder(w).Encode(response) +} + +type CreateNode400JSONResponse ErrorResponse + +func (response CreateNode400JSONResponse) VisitCreateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type CreateNode401Response struct { +} + +func (response CreateNode401Response) VisitCreateNodeResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type CreateNode500JSONResponse ErrorResponse + +func (response CreateNode500JSONResponse) VisitCreateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteNodeRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` +} + +type DeleteNodeResponseObject interface { + VisitDeleteNodeResponse(w http.ResponseWriter) error +} + +type DeleteNode204Response struct { +} + +func (response DeleteNode204Response) VisitDeleteNodeResponse(w http.ResponseWriter) error { + w.WriteHeader(204) + return nil +} + +type DeleteNode403JSONResponse ErrorResponse + +func (response DeleteNode403JSONResponse) VisitDeleteNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteNode404JSONResponse ErrorResponse + +func (response DeleteNode404JSONResponse) VisitDeleteNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteNode500JSONResponse ErrorResponse + +func (response DeleteNode500JSONResponse) VisitDeleteNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` + Body *UpdateNodeJSONRequestBody +} + +type UpdateNodeResponseObject interface { + VisitUpdateNodeResponse(w http.ResponseWriter) error +} + +type UpdateNode200JSONResponse Node + +func (response UpdateNode200JSONResponse) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNode400JSONResponse ErrorResponse + +func (response UpdateNode400JSONResponse) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNode401Response struct { +} + +func (response UpdateNode401Response) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type UpdateNode403JSONResponse ErrorResponse + +func (response UpdateNode403JSONResponse) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNode404JSONResponse ErrorResponse + +func (response UpdateNode404JSONResponse) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNode500JSONResponse ErrorResponse + +func (response UpdateNode500JSONResponse) VisitUpdateNodeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisherNodesRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` +} + +type GetPermissionOnPublisherNodesResponseObject interface { + VisitGetPermissionOnPublisherNodesResponse(w http.ResponseWriter) error +} + +type GetPermissionOnPublisherNodes200JSONResponse struct { + CanEdit *bool `json:"canEdit,omitempty"` +} + +func (response GetPermissionOnPublisherNodes200JSONResponse) VisitGetPermissionOnPublisherNodesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisherNodes400JSONResponse ErrorResponse + +func (response GetPermissionOnPublisherNodes400JSONResponse) VisitGetPermissionOnPublisherNodesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisherNodes500JSONResponse ErrorResponse + +func (response GetPermissionOnPublisherNodes500JSONResponse) VisitGetPermissionOnPublisherNodesResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type PublishNodeVersionRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` + Body *PublishNodeVersionJSONRequestBody +} + +type PublishNodeVersionResponseObject interface { + VisitPublishNodeVersionResponse(w http.ResponseWriter) error +} + +type PublishNodeVersion201JSONResponse struct { + NodeVersion *NodeVersion `json:"node_version,omitempty"` + + // SignedUrl The signed URL to upload the node version token. + SignedUrl *string `json:"signedUrl,omitempty"` +} + +func (response PublishNodeVersion201JSONResponse) VisitPublishNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(201) + + return json.NewEncoder(w).Encode(response) +} + +type PublishNodeVersion400JSONResponse ErrorResponse + +func (response PublishNodeVersion400JSONResponse) VisitPublishNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type PublishNodeVersion403JSONResponse ErrorResponse + +func (response PublishNodeVersion403JSONResponse) VisitPublishNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type PublishNodeVersion500JSONResponse ErrorResponse + +func (response PublishNodeVersion500JSONResponse) VisitPublishNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type DeleteNodeVersionRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` + VersionId string `json:"versionId"` +} + +type DeleteNodeVersionResponseObject interface { + VisitDeleteNodeVersionResponse(w http.ResponseWriter) error +} + +type DeleteNodeVersion204Response struct { +} + +func (response DeleteNodeVersion204Response) VisitDeleteNodeVersionResponse(w http.ResponseWriter) error { + w.WriteHeader(204) + return nil +} + +type DeleteNodeVersion404JSONResponse Error + +func (response DeleteNodeVersion404JSONResponse) VisitDeleteNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeVersionRequestObject struct { + PublisherId string `json:"publisherId"` + NodeId string `json:"nodeId"` + VersionId string `json:"versionId"` + Body *UpdateNodeVersionJSONRequestBody +} + +type UpdateNodeVersionResponseObject interface { + VisitUpdateNodeVersionResponse(w http.ResponseWriter) error +} + +type UpdateNodeVersion200JSONResponse NodeVersion + +func (response UpdateNodeVersion200JSONResponse) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeVersion400JSONResponse ErrorResponse + +func (response UpdateNodeVersion400JSONResponse) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeVersion401Response struct { +} + +func (response UpdateNodeVersion401Response) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type UpdateNodeVersion403JSONResponse ErrorResponse + +func (response UpdateNodeVersion403JSONResponse) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeVersion404JSONResponse ErrorResponse + +func (response UpdateNodeVersion404JSONResponse) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UpdateNodeVersion500JSONResponse ErrorResponse + +func (response UpdateNodeVersion500JSONResponse) VisitUpdateNodeVersionResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisherRequestObject struct { + PublisherId string `json:"publisherId"` +} + +type GetPermissionOnPublisherResponseObject interface { + VisitGetPermissionOnPublisherResponse(w http.ResponseWriter) error +} + +type GetPermissionOnPublisher200JSONResponse struct { + CanEdit *bool `json:"canEdit,omitempty"` +} + +func (response GetPermissionOnPublisher200JSONResponse) VisitGetPermissionOnPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisher400JSONResponse ErrorResponse + +func (response GetPermissionOnPublisher400JSONResponse) VisitGetPermissionOnPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type GetPermissionOnPublisher500JSONResponse ErrorResponse + +func (response GetPermissionOnPublisher500JSONResponse) VisitGetPermissionOnPublisherResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type ListPersonalAccessTokensRequestObject struct { + PublisherId string `json:"publisherId"` +} + +type ListPersonalAccessTokensResponseObject interface { + VisitListPersonalAccessTokensResponse(w http.ResponseWriter) error +} + +type ListPersonalAccessTokens200JSONResponse []PersonalAccessToken + +func (response ListPersonalAccessTokens200JSONResponse) VisitListPersonalAccessTokensResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListPersonalAccessTokens403JSONResponse ErrorResponse + +func (response ListPersonalAccessTokens403JSONResponse) VisitListPersonalAccessTokensResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type ListPersonalAccessTokens404JSONResponse ErrorResponse + +func (response ListPersonalAccessTokens404JSONResponse) VisitListPersonalAccessTokensResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type ListPersonalAccessTokens500JSONResponse ErrorResponse + +func (response ListPersonalAccessTokens500JSONResponse) VisitListPersonalAccessTokensResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePersonalAccessTokenRequestObject struct { + PublisherId string `json:"publisherId"` + Body *CreatePersonalAccessTokenJSONRequestBody +} + +type CreatePersonalAccessTokenResponseObject interface { + VisitCreatePersonalAccessTokenResponse(w http.ResponseWriter) error +} + +type CreatePersonalAccessToken201JSONResponse struct { + // Token The newly created personal access token. + Token *string `json:"token,omitempty"` +} + +func (response CreatePersonalAccessToken201JSONResponse) VisitCreatePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(201) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePersonalAccessToken400JSONResponse ErrorResponse + +func (response CreatePersonalAccessToken400JSONResponse) VisitCreatePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePersonalAccessToken403JSONResponse ErrorResponse + +func (response CreatePersonalAccessToken403JSONResponse) VisitCreatePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type CreatePersonalAccessToken500JSONResponse ErrorResponse + +func (response CreatePersonalAccessToken500JSONResponse) VisitCreatePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type DeletePersonalAccessTokenRequestObject struct { + PublisherId string `json:"publisherId"` + TokenId string `json:"tokenId"` +} + +type DeletePersonalAccessTokenResponseObject interface { + VisitDeletePersonalAccessTokenResponse(w http.ResponseWriter) error +} + +type DeletePersonalAccessToken204Response struct { +} + +func (response DeletePersonalAccessToken204Response) VisitDeletePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.WriteHeader(204) + return nil +} + +type DeletePersonalAccessToken403JSONResponse ErrorResponse + +func (response DeletePersonalAccessToken403JSONResponse) VisitDeletePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type DeletePersonalAccessToken404JSONResponse ErrorResponse + +func (response DeletePersonalAccessToken404JSONResponse) VisitDeletePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type DeletePersonalAccessToken500JSONResponse ErrorResponse + +func (response DeletePersonalAccessToken500JSONResponse) VisitDeletePersonalAccessTokenResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type PostUploadArtifactRequestObject struct { + Body *PostUploadArtifactJSONRequestBody +} + +type PostUploadArtifactResponseObject interface { + VisitPostUploadArtifactResponse(w http.ResponseWriter) error +} + +type PostUploadArtifact200JSONResponse struct { + Message *string `json:"message,omitempty"` +} + +func (response PostUploadArtifact200JSONResponse) VisitPostUploadArtifactResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type PostUploadArtifact400Response struct { +} + +func (response PostUploadArtifact400Response) VisitPostUploadArtifactResponse(w http.ResponseWriter) error { + w.WriteHeader(400) + return nil +} + +type PostUploadArtifact500Response struct { +} + +func (response PostUploadArtifact500Response) VisitPostUploadArtifactResponse(w http.ResponseWriter) error { + w.WriteHeader(500) + return nil +} + +type GetUserRequestObject struct { +} + +type GetUserResponseObject interface { + VisitGetUserResponse(w http.ResponseWriter) error +} + +type GetUser200JSONResponse User + +func (response GetUser200JSONResponse) VisitGetUserResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetUser401Response struct { +} + +func (response GetUser401Response) VisitGetUserResponse(w http.ResponseWriter) error { + w.WriteHeader(401) + return nil +} + +type GetUser404Response struct { +} + +func (response GetUser404Response) VisitGetUserResponse(w http.ResponseWriter) error { + w.WriteHeader(404) + return nil +} + +type ListPublishersForUserRequestObject struct { +} + +type ListPublishersForUserResponseObject interface { + VisitListPublishersForUserResponse(w http.ResponseWriter) error +} + +type ListPublishersForUser200JSONResponse []Publisher + +func (response ListPublishersForUser200JSONResponse) VisitListPublishersForUserResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListPublishersForUser400JSONResponse ErrorResponse + +func (response ListPublishersForUser400JSONResponse) VisitListPublishersForUserResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type ListPublishersForUser500JSONResponse ErrorResponse + +func (response ListPublishersForUser500JSONResponse) VisitListPublishersForUserResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +// StrictServerInterface represents all server handlers. +type StrictServerInterface interface { + // Retrieve all distinct branches for a given repo + // (GET /branch) + GetBranch(ctx context.Context, request GetBranchRequestObject) (GetBranchResponseObject, error) + // Retrieve CI data for a given commit + // (GET /gitcommit) + GetGitcommit(ctx context.Context, request GetGitcommitRequestObject) (GetGitcommitResponseObject, error) + // Retrieves a list of nodes + // (GET /nodes) + ListAllNodes(ctx context.Context, request ListAllNodesRequestObject) (ListAllNodesResponseObject, error) + // Retrieve a specific node by ID + // (GET /nodes/{nodeId}) + GetNode(ctx context.Context, request GetNodeRequestObject) (GetNodeResponseObject, error) + // Returns a node version to be installed. + // (GET /nodes/{nodeId}/install) + InstallNode(ctx context.Context, request InstallNodeRequestObject) (InstallNodeResponseObject, error) + // List all versions of a node + // (GET /nodes/{nodeId}/versions) + ListNodeVersions(ctx context.Context, request ListNodeVersionsRequestObject) (ListNodeVersionsResponseObject, error) + // Retrieve a specific version of a node + // (GET /nodes/{nodeId}/versions/{versionId}) + GetNodeVersion(ctx context.Context, request GetNodeVersionRequestObject) (GetNodeVersionResponseObject, error) + // Retrieve all publishers + // (GET /publishers) + ListPublishers(ctx context.Context, request ListPublishersRequestObject) (ListPublishersResponseObject, error) + // Create a new publisher + // (POST /publishers) + CreatePublisher(ctx context.Context, request CreatePublisherRequestObject) (CreatePublisherResponseObject, error) + // Validate if a publisher username is available + // (GET /publishers/validate) + ValidatePublisher(ctx context.Context, request ValidatePublisherRequestObject) (ValidatePublisherResponseObject, error) + // Delete a publisher + // (DELETE /publishers/{publisherId}) + DeletePublisher(ctx context.Context, request DeletePublisherRequestObject) (DeletePublisherResponseObject, error) + // Retrieve a publisher by ID + // (GET /publishers/{publisherId}) + GetPublisher(ctx context.Context, request GetPublisherRequestObject) (GetPublisherResponseObject, error) + // Update a publisher + // (PUT /publishers/{publisherId}) + UpdatePublisher(ctx context.Context, request UpdatePublisherRequestObject) (UpdatePublisherResponseObject, error) + // Retrieve all nodes + // (GET /publishers/{publisherId}/nodes) + ListNodesForPublisher(ctx context.Context, request ListNodesForPublisherRequestObject) (ListNodesForPublisherResponseObject, error) + // Create a new custom node + // (POST /publishers/{publisherId}/nodes) + CreateNode(ctx context.Context, request CreateNodeRequestObject) (CreateNodeResponseObject, error) + // Delete a specific node + // (DELETE /publishers/{publisherId}/nodes/{nodeId}) + DeleteNode(ctx context.Context, request DeleteNodeRequestObject) (DeleteNodeResponseObject, error) + // Update a specific node + // (PUT /publishers/{publisherId}/nodes/{nodeId}) + UpdateNode(ctx context.Context, request UpdateNodeRequestObject) (UpdateNodeResponseObject, error) + // Retrieve permissions the user has for a given publisher + // (GET /publishers/{publisherId}/nodes/{nodeId}/permissions) + GetPermissionOnPublisherNodes(ctx context.Context, request GetPermissionOnPublisherNodesRequestObject) (GetPermissionOnPublisherNodesResponseObject, error) + // Publish a new version of a node + // (POST /publishers/{publisherId}/nodes/{nodeId}/versions) + PublishNodeVersion(ctx context.Context, request PublishNodeVersionRequestObject) (PublishNodeVersionResponseObject, error) + // Unpublish (delete) a specific version of a node + // (DELETE /publishers/{publisherId}/nodes/{nodeId}/versions/{versionId}) + DeleteNodeVersion(ctx context.Context, request DeleteNodeVersionRequestObject) (DeleteNodeVersionResponseObject, error) + // Update changelog and deprecation status of a node version + // (PUT /publishers/{publisherId}/nodes/{nodeId}/versions/{versionId}) + UpdateNodeVersion(ctx context.Context, request UpdateNodeVersionRequestObject) (UpdateNodeVersionResponseObject, error) + // Retrieve permissions the user has for a given publisher + // (GET /publishers/{publisherId}/permissions) + GetPermissionOnPublisher(ctx context.Context, request GetPermissionOnPublisherRequestObject) (GetPermissionOnPublisherResponseObject, error) + // Retrieve all personal access tokens for a publisher + // (GET /publishers/{publisherId}/tokens) + ListPersonalAccessTokens(ctx context.Context, request ListPersonalAccessTokensRequestObject) (ListPersonalAccessTokensResponseObject, error) + // Create a new personal access token + // (POST /publishers/{publisherId}/tokens) + CreatePersonalAccessToken(ctx context.Context, request CreatePersonalAccessTokenRequestObject) (CreatePersonalAccessTokenResponseObject, error) + // Delete a specific personal access token + // (DELETE /publishers/{publisherId}/tokens/{tokenId}) + DeletePersonalAccessToken(ctx context.Context, request DeletePersonalAccessTokenRequestObject) (DeletePersonalAccessTokenResponseObject, error) + // Receive artifacts (output files) from the ComfyUI GitHub Action + // (POST /upload-artifact) + PostUploadArtifact(ctx context.Context, request PostUploadArtifactRequestObject) (PostUploadArtifactResponseObject, error) + // Get information about the calling user. + // (GET /users) + GetUser(ctx context.Context, request GetUserRequestObject) (GetUserResponseObject, error) + // Retrieve all publishers for a given user + // (GET /users/publishers/) + ListPublishersForUser(ctx context.Context, request ListPublishersForUserRequestObject) (ListPublishersForUserResponseObject, error) +} + +type StrictHandlerFunc = strictecho.StrictEchoHandlerFunc +type StrictMiddlewareFunc = strictecho.StrictEchoMiddlewareFunc + +func NewStrictHandler(ssi StrictServerInterface, middlewares []StrictMiddlewareFunc) ServerInterface { + return &strictHandler{ssi: ssi, middlewares: middlewares} +} + +type strictHandler struct { + ssi StrictServerInterface + middlewares []StrictMiddlewareFunc +} + +// GetBranch operation middleware +func (sh *strictHandler) GetBranch(ctx echo.Context, params GetBranchParams) error { + var request GetBranchRequestObject + + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetBranch(ctx.Request().Context(), request.(GetBranchRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetBranch") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetBranchResponseObject); ok { + return validResponse.VisitGetBranchResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetGitcommit operation middleware +func (sh *strictHandler) GetGitcommit(ctx echo.Context, params GetGitcommitParams) error { + var request GetGitcommitRequestObject + + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetGitcommit(ctx.Request().Context(), request.(GetGitcommitRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetGitcommit") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetGitcommitResponseObject); ok { + return validResponse.VisitGetGitcommitResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListAllNodes operation middleware +func (sh *strictHandler) ListAllNodes(ctx echo.Context, params ListAllNodesParams) error { + var request ListAllNodesRequestObject + + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListAllNodes(ctx.Request().Context(), request.(ListAllNodesRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListAllNodes") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListAllNodesResponseObject); ok { + return validResponse.VisitListAllNodesResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetNode operation middleware +func (sh *strictHandler) GetNode(ctx echo.Context, nodeId string) error { + var request GetNodeRequestObject + + request.NodeId = nodeId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetNode(ctx.Request().Context(), request.(GetNodeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetNode") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetNodeResponseObject); ok { + return validResponse.VisitGetNodeResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// InstallNode operation middleware +func (sh *strictHandler) InstallNode(ctx echo.Context, nodeId string, params InstallNodeParams) error { + var request InstallNodeRequestObject + + request.NodeId = nodeId + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.InstallNode(ctx.Request().Context(), request.(InstallNodeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "InstallNode") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(InstallNodeResponseObject); ok { + return validResponse.VisitInstallNodeResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListNodeVersions operation middleware +func (sh *strictHandler) ListNodeVersions(ctx echo.Context, nodeId string) error { + var request ListNodeVersionsRequestObject + + request.NodeId = nodeId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListNodeVersions(ctx.Request().Context(), request.(ListNodeVersionsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListNodeVersions") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListNodeVersionsResponseObject); ok { + return validResponse.VisitListNodeVersionsResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetNodeVersion operation middleware +func (sh *strictHandler) GetNodeVersion(ctx echo.Context, nodeId string, versionId string) error { + var request GetNodeVersionRequestObject + + request.NodeId = nodeId + request.VersionId = versionId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetNodeVersion(ctx.Request().Context(), request.(GetNodeVersionRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetNodeVersion") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetNodeVersionResponseObject); ok { + return validResponse.VisitGetNodeVersionResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListPublishers operation middleware +func (sh *strictHandler) ListPublishers(ctx echo.Context) error { + var request ListPublishersRequestObject + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListPublishers(ctx.Request().Context(), request.(ListPublishersRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListPublishers") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListPublishersResponseObject); ok { + return validResponse.VisitListPublishersResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// CreatePublisher operation middleware +func (sh *strictHandler) CreatePublisher(ctx echo.Context) error { + var request CreatePublisherRequestObject + + var body CreatePublisherJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.CreatePublisher(ctx.Request().Context(), request.(CreatePublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CreatePublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(CreatePublisherResponseObject); ok { + return validResponse.VisitCreatePublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ValidatePublisher operation middleware +func (sh *strictHandler) ValidatePublisher(ctx echo.Context, params ValidatePublisherParams) error { + var request ValidatePublisherRequestObject + + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ValidatePublisher(ctx.Request().Context(), request.(ValidatePublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ValidatePublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ValidatePublisherResponseObject); ok { + return validResponse.VisitValidatePublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// DeletePublisher operation middleware +func (sh *strictHandler) DeletePublisher(ctx echo.Context, publisherId string) error { + var request DeletePublisherRequestObject + + request.PublisherId = publisherId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.DeletePublisher(ctx.Request().Context(), request.(DeletePublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DeletePublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(DeletePublisherResponseObject); ok { + return validResponse.VisitDeletePublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetPublisher operation middleware +func (sh *strictHandler) GetPublisher(ctx echo.Context, publisherId string) error { + var request GetPublisherRequestObject + + request.PublisherId = publisherId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetPublisher(ctx.Request().Context(), request.(GetPublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetPublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetPublisherResponseObject); ok { + return validResponse.VisitGetPublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// UpdatePublisher operation middleware +func (sh *strictHandler) UpdatePublisher(ctx echo.Context, publisherId string) error { + var request UpdatePublisherRequestObject + + request.PublisherId = publisherId + + var body UpdatePublisherJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.UpdatePublisher(ctx.Request().Context(), request.(UpdatePublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "UpdatePublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(UpdatePublisherResponseObject); ok { + return validResponse.VisitUpdatePublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListNodesForPublisher operation middleware +func (sh *strictHandler) ListNodesForPublisher(ctx echo.Context, publisherId string) error { + var request ListNodesForPublisherRequestObject + + request.PublisherId = publisherId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListNodesForPublisher(ctx.Request().Context(), request.(ListNodesForPublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListNodesForPublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListNodesForPublisherResponseObject); ok { + return validResponse.VisitListNodesForPublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// CreateNode operation middleware +func (sh *strictHandler) CreateNode(ctx echo.Context, publisherId string) error { + var request CreateNodeRequestObject + + request.PublisherId = publisherId + + var body CreateNodeJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.CreateNode(ctx.Request().Context(), request.(CreateNodeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CreateNode") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(CreateNodeResponseObject); ok { + return validResponse.VisitCreateNodeResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// DeleteNode operation middleware +func (sh *strictHandler) DeleteNode(ctx echo.Context, publisherId string, nodeId string) error { + var request DeleteNodeRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.DeleteNode(ctx.Request().Context(), request.(DeleteNodeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DeleteNode") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(DeleteNodeResponseObject); ok { + return validResponse.VisitDeleteNodeResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// UpdateNode operation middleware +func (sh *strictHandler) UpdateNode(ctx echo.Context, publisherId string, nodeId string) error { + var request UpdateNodeRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + + var body UpdateNodeJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.UpdateNode(ctx.Request().Context(), request.(UpdateNodeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "UpdateNode") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(UpdateNodeResponseObject); ok { + return validResponse.VisitUpdateNodeResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetPermissionOnPublisherNodes operation middleware +func (sh *strictHandler) GetPermissionOnPublisherNodes(ctx echo.Context, publisherId string, nodeId string) error { + var request GetPermissionOnPublisherNodesRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetPermissionOnPublisherNodes(ctx.Request().Context(), request.(GetPermissionOnPublisherNodesRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetPermissionOnPublisherNodes") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetPermissionOnPublisherNodesResponseObject); ok { + return validResponse.VisitGetPermissionOnPublisherNodesResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// PublishNodeVersion operation middleware +func (sh *strictHandler) PublishNodeVersion(ctx echo.Context, publisherId string, nodeId string) error { + var request PublishNodeVersionRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + + var body PublishNodeVersionJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.PublishNodeVersion(ctx.Request().Context(), request.(PublishNodeVersionRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "PublishNodeVersion") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(PublishNodeVersionResponseObject); ok { + return validResponse.VisitPublishNodeVersionResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// DeleteNodeVersion operation middleware +func (sh *strictHandler) DeleteNodeVersion(ctx echo.Context, publisherId string, nodeId string, versionId string) error { + var request DeleteNodeVersionRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + request.VersionId = versionId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.DeleteNodeVersion(ctx.Request().Context(), request.(DeleteNodeVersionRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DeleteNodeVersion") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(DeleteNodeVersionResponseObject); ok { + return validResponse.VisitDeleteNodeVersionResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// UpdateNodeVersion operation middleware +func (sh *strictHandler) UpdateNodeVersion(ctx echo.Context, publisherId string, nodeId string, versionId string) error { + var request UpdateNodeVersionRequestObject + + request.PublisherId = publisherId + request.NodeId = nodeId + request.VersionId = versionId + + var body UpdateNodeVersionJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.UpdateNodeVersion(ctx.Request().Context(), request.(UpdateNodeVersionRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "UpdateNodeVersion") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(UpdateNodeVersionResponseObject); ok { + return validResponse.VisitUpdateNodeVersionResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetPermissionOnPublisher operation middleware +func (sh *strictHandler) GetPermissionOnPublisher(ctx echo.Context, publisherId string) error { + var request GetPermissionOnPublisherRequestObject + + request.PublisherId = publisherId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetPermissionOnPublisher(ctx.Request().Context(), request.(GetPermissionOnPublisherRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetPermissionOnPublisher") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetPermissionOnPublisherResponseObject); ok { + return validResponse.VisitGetPermissionOnPublisherResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListPersonalAccessTokens operation middleware +func (sh *strictHandler) ListPersonalAccessTokens(ctx echo.Context, publisherId string) error { + var request ListPersonalAccessTokensRequestObject + + request.PublisherId = publisherId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListPersonalAccessTokens(ctx.Request().Context(), request.(ListPersonalAccessTokensRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListPersonalAccessTokens") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListPersonalAccessTokensResponseObject); ok { + return validResponse.VisitListPersonalAccessTokensResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// CreatePersonalAccessToken operation middleware +func (sh *strictHandler) CreatePersonalAccessToken(ctx echo.Context, publisherId string) error { + var request CreatePersonalAccessTokenRequestObject + + request.PublisherId = publisherId + + var body CreatePersonalAccessTokenJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.CreatePersonalAccessToken(ctx.Request().Context(), request.(CreatePersonalAccessTokenRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CreatePersonalAccessToken") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(CreatePersonalAccessTokenResponseObject); ok { + return validResponse.VisitCreatePersonalAccessTokenResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// DeletePersonalAccessToken operation middleware +func (sh *strictHandler) DeletePersonalAccessToken(ctx echo.Context, publisherId string, tokenId string) error { + var request DeletePersonalAccessTokenRequestObject + + request.PublisherId = publisherId + request.TokenId = tokenId + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.DeletePersonalAccessToken(ctx.Request().Context(), request.(DeletePersonalAccessTokenRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DeletePersonalAccessToken") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(DeletePersonalAccessTokenResponseObject); ok { + return validResponse.VisitDeletePersonalAccessTokenResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// PostUploadArtifact operation middleware +func (sh *strictHandler) PostUploadArtifact(ctx echo.Context) error { + var request PostUploadArtifactRequestObject + + var body PostUploadArtifactJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.PostUploadArtifact(ctx.Request().Context(), request.(PostUploadArtifactRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "PostUploadArtifact") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(PostUploadArtifactResponseObject); ok { + return validResponse.VisitPostUploadArtifactResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// GetUser operation middleware +func (sh *strictHandler) GetUser(ctx echo.Context) error { + var request GetUserRequestObject + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.GetUser(ctx.Request().Context(), request.(GetUserRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetUser") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(GetUserResponseObject); ok { + return validResponse.VisitGetUserResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// ListPublishersForUser operation middleware +func (sh *strictHandler) ListPublishersForUser(ctx echo.Context) error { + var request ListPublishersForUserRequestObject + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.ListPublishersForUser(ctx.Request().Context(), request.(ListPublishersForUserRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListPublishersForUser") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(ListPublishersForUserResponseObject); ok { + return validResponse.VisitListPublishersForUserResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("unexpected response type: %T", response) + } + return nil +} + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/+xdfXPTOLf/KhrfZ2ZhbkgKy90/+l8p0O1eWjqUssNCb0axTxKBLRlJbjfL9Lvf0Zst", + "x7Lj9CU0S2eeeejGsl7O+Z1XHUvfo5hlOaNApYh2v0cinkOG9Z97sSSM/sEm70AUqVQ/5ZzlwCUB3QDr", + "BmNe0DFJ1A8JiJiTXP0a7UaHCVBJpgQ4YlMk54B4QZGcE4G47hFNIGV0JpBk0SCSixyi3UhITugsuhqo", + "iWVEjudYzJudv58DUk9c16ZxRzehGapODl/27iIDIfAMwv3Yh707kyRr6emMkr+ReiwkznJ0OQfqdYku", + "sUAZTiAaRFPGMyyj3YhQ+dvzajBCJcyAq9GAJh1DAU30SG7WX9gEYYHw0hyG/caaETnmkLPwWOqJIJLx", + "BaI4gxBpZnkxNj8ud3BwcobUE1QISEKvhth7Rsm3AhCpcDhlvFynwaC/sqIgwb4V5rEkdDYWCyEha470", + "1rVApkXrNPOFZDyejy+AC/3qck8ni/eqAbINWjsSEvMuEOnnt8hbIRnHMxhPSapH/A+HabQb/deoUh8j", + "qztGp6bta9X0ahBdMv51mrLLsWZ7Y7bHuJqja9pc8VX5C5t8gViqjl9xznhTLSUgMUlFiEvqD5wi0wIS", + "RKhZuqI0nrBC6lmA6hcxjuaESqFBw0Gw9EIxmEhFMSIh0yM0GGN/wJzjhfrvVqWxh+IUMEeYJihmNCYC", + "kNfCkURPZrgGQd6ByBkV0CQMOHo1Ju1NsjkMh28F4ZBEu59sF9UL54FZHLMkMDgu5Lxl9BpdQs/ZJU0Z", + "TkQY6rTIJsbGlA0d8ShLYBjEM4lD0nf27g2SrHz1F4FUu2FfhaNmUzSUTnguVU8pliCkrxK6hEtR94Nt", + "ql4mMVhmN+eSYzl363lzuP/q+PQVUgKMCPXXWGnm4PzCYqv6T4jIU2z0+cpl5sUkJWIOfNUKT8qGCnxa", + "rYaHxxeg9Ayyqjc8AYMO3VW5zD6MX0EUiWcajn0VwVWLoHyouF6Xl3iO6QxSFlj7aZFlmC/Ugk0r4xAY", + "rhLhbEfQ+eCAJSR7soWhWIJWSNpwKGo4O6S8DvtyzWKoN55oMzQICXYONAEal5q5rgBTIqRaRU5y5LdF", + "TuWgyaLG0/5qN4GcQ6ymG3BMaULUI4HItEYxRASqXvT4PmEsBUx9ZXTG02bPn94WMi8kekvTxTmyoHJv", + "1Ify0NquXho/t/oN7z1eVbpngKYsTdml9kwgw1SS2DUjdDZER4VQbrjTWs4/ahHiFSA+yxUa3sG3AoRc", + "C9Fq9uVjawUnatI+BC3Why1Ia2X3n3OQc+C1vlYyOrTUE+BC+Q97cQxCvGdfISS37RJWQ0dY3KTq9LrC", + "VrOiYddniPZQxjhULlDA5dCTULaPSiWSiXJChzd3ug+I3HdR0UqfO2x03lm9METa8HtmR895iPYxVXDG", + "SJAsT2sOVViNOyZ2cMoMllvmI6y57wZULRAHWXCqiFmojg3vwkMGceVbxt5oCgOotLK3AqIbeD2O6+WE", + "huhQ/iIQ/qrMFENYoYorFg7R6ZwVaaIYl7JL4DFuwVvKZqzTdpeD/SKQahzsJQPlEXQaJNvE+UnVGnwb", + "1MuHOQLnfixbKIfwZmjHCh7DOGYJlOF0s1GR54zL4LNLmAgiW7z5dvTZqTYweAOeW0IG2cBZ2uJWqidO", + "rhVIgmxo9Kda9mbLmWrcTY4zESIGZJikLckU9QjhJOFKPxhaEKFXcL0Iokcf7b65Vo6rOggRwI/eG8tX", + "0cNYhRWBzIUXbLggw2YNbm47bEe64z7mQ0MlHhchJ01zOFbeWT+CbAUQiNhLMkJXOrtWouZYIKxeQDkn", + "FySFumPl+btE7OU5Zxc9HGknrQJh+0q4y1sHrdKHEBecyMWpknTDoReAOfC9woB1ov/rtQPOH3++jwYm", + "261np59WY82lzKMr1TGhU6OBiVQCEe2zbLpAeyeHkeeNR0+HOzZTSHFOot3o1+HO8Fk0iJSs6NmMJhzT", + "WE9lBjLk2igfQiCcpsg0BYMdjGbkAqgOSBU1bDaS0cMk2o0OQL4wHauxOM5Aatv2qS0Fq0R0SlIJHE10", + "aKtAE30rgC8ixxodKo9tlrZK/0hegKUZNguYYr0xEMWKKJgyushYIUaaRmeHAdadq/5MfkpT5dnOjnZz", + "mHI1NVVwnqcKVITR0Rdh3JBqyLoMOjrdLBC/WvZ7oj2KdGtlhMoxrgbR853nIcblDFEm0ZQVNFHN/scs", + "allWpHJ3UiSAXwA3eT0DXRPOGwxwAhegQZAQIQmNZTsa9NujGZF2s6EPtHhBxQB9YRPz/zYLLgbai/S1", + "rEBYCBYT5T+iSyLn5dBmtCAUD8q59EDj8s6LRibIeK7cWqxW2wZP0/4wiXw0NrREaEy2nKivpEFNZP/Q", + "jN0uGWUHp2YrYO0Z0EC6e+1ZuBePjYiuOQUDKDeJEj9rz2LiFM+a4+cKYzZrK5mKnjTq24bJjQcR0DtP", + "m5ndFqKXKWKtJtSohMZpkei4Tk+oa/RT8k/bDHb6TmEt3dvg6w9XtV/cNnBd2XY528v7x4EYSDKJ02PN", + "nLfTEzwDX4N7BO2lss1DpBaICVUy3l/ZtWp3k7G4bf3uZMvX6FZxap1OWWJI0a3PFXAJ1RraRa76TYRj", + "zoRR+GXQJJoa+w0Rci9Nj/VoKzT2iSe0Xu5S6JHvRnKPy9HMSEZXFJyWQtsybkqMCeorsLcrKmb0BteO", + "8N8kKzKPhmZV3lqa21QlEHpJnN55C4hZHtyA3C84Byp9dRycgxbSgL+ufm6sBl9gkuJJCu1dlXLe3Z+a", + "Vnd/vfRCm4wYiQ8K8gVOSYIIzQs5cFqFmF9b9cRxQEH0xk8XT+t7uoEVrqN4lM6oU8HtZH2KjBY4r/TP", + "6Lv65zC5WqmIlDKwu+6qZ4xEDjGZkrjcSGh4isdm32NJ5Whh1umFUpbNHDrjkNu2fKtFrMkF9bujgQHJ", + "r5uDwGvGJyRJgHrw3MzIetl0K7C/DEs0WaDDl70EYESokDhNuwTBypezi5V9t+/q9Q8QkHIvyuz7I+0C", + "lDOzKYWmzByabsJyc40yhNsRtYbNPl1aiD9szdlHh1PEMiIlJAOfHN4OndtWafOUq31uzxRjqSAR7Ub/", + "9/lz8t+fPw+9f/5zB25y71qNNp2hcFJuIIlC7y5NizRdDD0TtSlxqpk+UcRzUzI2wUnJGJN7Hf60Sm54", + "X7WcjQm0pDlmSYYm4FSQzceuVnf2bT8CaQYOHrjFfbXkvb1mr6JqKV3Y4MYb6z+p4MqntbgTXG6p0dVE", + "UhRyxDF+oat2sRAs4dOJwtF3+1fdFQ26lB9Kk3AneBx0ld34BhY9UgEBRmdnhy8ft5jbclX3yrntsFcv", + "28tml32ri0qeNicSjhHb5496EFolJFU+p1M7n1TNNqFDazWbqzSoV/hXzXLTvs4LnOgyQxBy4KJ74/ho", + "l+yeQ6eW2PPA4rH9/GoQ5UwE8LGvq4Iqlhn1A0K+YMni1lbsQaJeRa403FUDlE/vauDA5r8uTbG1UTWX", + "+z6i8LkhzXKlhKmnJ/9A8mM98fsiJHb/Xxt7f+f/07kymZUMGfArNQuXlQy1iVBd4440f7AprQomAvbn", + "EH8tyyGqOihX66aLI1IOOFkgib9CIND/YMfw5XNluB8YSTLkptsWQLu2G3VAlorLxF6Z4W2mhHkBfmlJ", + "SUH3ygBNcSoAMTkHfklqZYMdpbxNYJ253i3JlDHm5bbVfQjCMyIEoTOPDqYoTrP0fsakDsmKhbhNGPz8", + "fg8J/F7+baOBBFIwAlkXo5f69w4hCnjkXt83FInnLWVnev1myiHLs8GkRjWde+ku91Xnhs0+vNq9obbA", + "8UeBZGfTLo9Lvj5Ab91IrVJey7sGyx53EcCY+TBmgzD74d78xqFdaBL/a7z5B2lc1xAYGetjCLo8ikbl", + "TTjvLV4zvh1W4wZ1I6tT3/cuazPcRuTWsjptVRnduZze1RT3zKpU1RSbSw91VnBsQ1Jo2NuObHVuJi6E", + "ZNlyNtzftexW4rXype4o8c7FZ3B3G6LBWjRdi9QWZD4UJm1XZFvbWQubhvagY8ug/SNNzIaKBP9lkcqD", + "Ntmu8GiVNlnDsI5y4Dot3VUqdADypGz2lpZRU8tHB9thdG+y4xFj+ioh/pf7a+1UeJvnHvUfds/XSOd5", + "hKt/Ee1/jnMLaYRwQV04jrNdr1vDtM1Wuy4W1J4a1ydPodpe87w0d4zL2BzjMi4PgOk+984qy9rAbZ2d", + "B6X4dgPcJu2uSQ9BZhTCh1npYyT1Y3eSVZHbc6xgucrU7mSv/lK/aVrhsuzHoXkr4u+Hgot+nofVbDaq", + "v1alWz/lulwnujrm3ypNG+7otopIA2mED+4IWlrJ5SObVnh89zuIITh+aFZ4ruUEu5W4dTy+RhFmGW4v", + "RUbGwWY0NUcWVsfZYZp4h80hIbEslr+Ta4zdrAuq4vmfG7Z3kyIIHmH4A9IGHeXXpTxuQQJh+JBBWKm5", + "tjaJEFZtanW+bqt9B7C+kb9JbuFe78g+ZAN+umyAjo9WfK/RPFtWbH9hQejE3DXrDILnrv5cn9jbNW+v", + "3ah/txJkqJW2kJxp2KAjTPEMMrXelV+2BFC3jSV3IeG561RSy4HI+hAtuEwXZYVE+Dzk6+WADIe3qPbi", + "IfdznY9tQojplvMeVnX0Xf/b72uADSuGcOxr53v7CRtDwJ+88MMQ4V9W+XFN0TH5+ieYSzLFsezYfWJC", + "nunGe67tbe3rmEMix62nnU+K+CvI8Yozge239aYxupwDN8fdM3NOvz2wlJuDosM3ZumDE8cpm4nxLBYt", + "h1j7t+bYoxWRemWIYDZEM7E7Gpk5PFHTGqlHPa6uu9aFcvZQTtfmOjfJ+dd/2e7sV2Pm+3317HP08ePH", + "j0+Ojp68fPn+9993j452T0//+hyhR892nv725OnOk6c773d2dvX//nocnEeR4PYbzfaLBPvHGjVe3+QV", + "dV/YZLz2VXHBO+FEz1vgHMV5EbyQxwBYX6tW4TJ4N4FiIEYClLkyeWRubkjB5ZmXAh3sn2r8PhKPFYR9", + "8WiBsGkyaHvwLHh0f/CSv3erL/hruyqynfwtZNv09Xcr7rBrO9g36A77+8qakiUsSwJpfHn3IdY1Sl3y", + "G9pkUNO5y3Ov0c4TvuvtWt8k9dV56dzKmOHUc28QhxjIBZitaWfu6sfxdZz0yF26/0ZHu+oZlIML9MgX", + "vsdoyllmzlW2ZuWAyN+LCTKn5JpjXwvRdazHAUh9FcEd7kDYazEaxH77v+t+u9Q8HfP1+vt1ByBbrmiM", + "cZq6r6GHHvH8aKHn+SivGb8Nsj4ck3Lfj0mpJXQL0ZHL1RhV3ZuIUN8ioq+EUDYS52Sofckh47Po6vzq", + "/wMAAP//lLylj0N5AAA=", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/drip/codegen.yaml b/drip/codegen.yaml new file mode 100644 index 0000000..eb24a73 --- /dev/null +++ b/drip/codegen.yaml @@ -0,0 +1,7 @@ +output: api.gen.go +package: drip +generate: + models: true + echo-server: true + strict-server: true + embedded-spec: true diff --git a/drip/generate.go b/drip/generate.go new file mode 100644 index 0000000..b2b7f4e --- /dev/null +++ b/drip/generate.go @@ -0,0 +1,2 @@ +//go:generate go run github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen --config=codegen.yaml ../openapi.yml +package drip diff --git a/ent/ciworkflowresult.go b/ent/ciworkflowresult.go new file mode 100644 index 0000000..597504c --- /dev/null +++ b/ent/ciworkflowresult.go @@ -0,0 +1,276 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/storagefile" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// CIWorkflowResult is the model entity for the CIWorkflowResult schema. +type CIWorkflowResult struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // OperatingSystem holds the value of the "operating_system" field. + OperatingSystem string `json:"operating_system,omitempty"` + // GpuType holds the value of the "gpu_type" field. + GpuType string `json:"gpu_type,omitempty"` + // PytorchVersion holds the value of the "pytorch_version" field. + PytorchVersion string `json:"pytorch_version,omitempty"` + // WorkflowName holds the value of the "workflow_name" field. + WorkflowName string `json:"workflow_name,omitempty"` + // RunID holds the value of the "run_id" field. + RunID string `json:"run_id,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // StartTime holds the value of the "start_time" field. + StartTime int64 `json:"start_time,omitempty"` + // EndTime holds the value of the "end_time" field. + EndTime int64 `json:"end_time,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the CIWorkflowResultQuery when eager-loading is set. + Edges CIWorkflowResultEdges `json:"edges"` + ci_workflow_result_storage_file *uuid.UUID + git_commit_results *uuid.UUID + selectValues sql.SelectValues +} + +// CIWorkflowResultEdges holds the relations/edges for other nodes in the graph. +type CIWorkflowResultEdges struct { + // Gitcommit holds the value of the gitcommit edge. + Gitcommit *GitCommit `json:"gitcommit,omitempty"` + // StorageFile holds the value of the storage_file edge. + StorageFile *StorageFile `json:"storage_file,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// GitcommitOrErr returns the Gitcommit value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e CIWorkflowResultEdges) GitcommitOrErr() (*GitCommit, error) { + if e.Gitcommit != nil { + return e.Gitcommit, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: gitcommit.Label} + } + return nil, &NotLoadedError{edge: "gitcommit"} +} + +// StorageFileOrErr returns the StorageFile value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e CIWorkflowResultEdges) StorageFileOrErr() (*StorageFile, error) { + if e.StorageFile != nil { + return e.StorageFile, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: storagefile.Label} + } + return nil, &NotLoadedError{edge: "storage_file"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*CIWorkflowResult) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case ciworkflowresult.FieldStartTime, ciworkflowresult.FieldEndTime: + values[i] = new(sql.NullInt64) + case ciworkflowresult.FieldOperatingSystem, ciworkflowresult.FieldGpuType, ciworkflowresult.FieldPytorchVersion, ciworkflowresult.FieldWorkflowName, ciworkflowresult.FieldRunID, ciworkflowresult.FieldStatus: + values[i] = new(sql.NullString) + case ciworkflowresult.FieldCreateTime, ciworkflowresult.FieldUpdateTime: + values[i] = new(sql.NullTime) + case ciworkflowresult.FieldID: + values[i] = new(uuid.UUID) + case ciworkflowresult.ForeignKeys[0]: // ci_workflow_result_storage_file + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + case ciworkflowresult.ForeignKeys[1]: // git_commit_results + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the CIWorkflowResult fields. +func (cwr *CIWorkflowResult) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case ciworkflowresult.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + cwr.ID = *value + } + case ciworkflowresult.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + cwr.CreateTime = value.Time + } + case ciworkflowresult.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + cwr.UpdateTime = value.Time + } + case ciworkflowresult.FieldOperatingSystem: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field operating_system", values[i]) + } else if value.Valid { + cwr.OperatingSystem = value.String + } + case ciworkflowresult.FieldGpuType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field gpu_type", values[i]) + } else if value.Valid { + cwr.GpuType = value.String + } + case ciworkflowresult.FieldPytorchVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field pytorch_version", values[i]) + } else if value.Valid { + cwr.PytorchVersion = value.String + } + case ciworkflowresult.FieldWorkflowName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field workflow_name", values[i]) + } else if value.Valid { + cwr.WorkflowName = value.String + } + case ciworkflowresult.FieldRunID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field run_id", values[i]) + } else if value.Valid { + cwr.RunID = value.String + } + case ciworkflowresult.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + cwr.Status = value.String + } + case ciworkflowresult.FieldStartTime: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_time", values[i]) + } else if value.Valid { + cwr.StartTime = value.Int64 + } + case ciworkflowresult.FieldEndTime: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_time", values[i]) + } else if value.Valid { + cwr.EndTime = value.Int64 + } + case ciworkflowresult.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field ci_workflow_result_storage_file", values[i]) + } else if value.Valid { + cwr.ci_workflow_result_storage_file = new(uuid.UUID) + *cwr.ci_workflow_result_storage_file = *value.S.(*uuid.UUID) + } + case ciworkflowresult.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field git_commit_results", values[i]) + } else if value.Valid { + cwr.git_commit_results = new(uuid.UUID) + *cwr.git_commit_results = *value.S.(*uuid.UUID) + } + default: + cwr.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the CIWorkflowResult. +// This includes values selected through modifiers, order, etc. +func (cwr *CIWorkflowResult) Value(name string) (ent.Value, error) { + return cwr.selectValues.Get(name) +} + +// QueryGitcommit queries the "gitcommit" edge of the CIWorkflowResult entity. +func (cwr *CIWorkflowResult) QueryGitcommit() *GitCommitQuery { + return NewCIWorkflowResultClient(cwr.config).QueryGitcommit(cwr) +} + +// QueryStorageFile queries the "storage_file" edge of the CIWorkflowResult entity. +func (cwr *CIWorkflowResult) QueryStorageFile() *StorageFileQuery { + return NewCIWorkflowResultClient(cwr.config).QueryStorageFile(cwr) +} + +// Update returns a builder for updating this CIWorkflowResult. +// Note that you need to call CIWorkflowResult.Unwrap() before calling this method if this CIWorkflowResult +// was returned from a transaction, and the transaction was committed or rolled back. +func (cwr *CIWorkflowResult) Update() *CIWorkflowResultUpdateOne { + return NewCIWorkflowResultClient(cwr.config).UpdateOne(cwr) +} + +// Unwrap unwraps the CIWorkflowResult entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (cwr *CIWorkflowResult) Unwrap() *CIWorkflowResult { + _tx, ok := cwr.config.driver.(*txDriver) + if !ok { + panic("ent: CIWorkflowResult is not a transactional entity") + } + cwr.config.driver = _tx.drv + return cwr +} + +// String implements the fmt.Stringer. +func (cwr *CIWorkflowResult) String() string { + var builder strings.Builder + builder.WriteString("CIWorkflowResult(") + builder.WriteString(fmt.Sprintf("id=%v, ", cwr.ID)) + builder.WriteString("create_time=") + builder.WriteString(cwr.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(cwr.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("operating_system=") + builder.WriteString(cwr.OperatingSystem) + builder.WriteString(", ") + builder.WriteString("gpu_type=") + builder.WriteString(cwr.GpuType) + builder.WriteString(", ") + builder.WriteString("pytorch_version=") + builder.WriteString(cwr.PytorchVersion) + builder.WriteString(", ") + builder.WriteString("workflow_name=") + builder.WriteString(cwr.WorkflowName) + builder.WriteString(", ") + builder.WriteString("run_id=") + builder.WriteString(cwr.RunID) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(cwr.Status) + builder.WriteString(", ") + builder.WriteString("start_time=") + builder.WriteString(fmt.Sprintf("%v", cwr.StartTime)) + builder.WriteString(", ") + builder.WriteString("end_time=") + builder.WriteString(fmt.Sprintf("%v", cwr.EndTime)) + builder.WriteByte(')') + return builder.String() +} + +// CIWorkflowResults is a parsable slice of CIWorkflowResult. +type CIWorkflowResults []*CIWorkflowResult diff --git a/ent/ciworkflowresult/ciworkflowresult.go b/ent/ciworkflowresult/ciworkflowresult.go new file mode 100644 index 0000000..de32cde --- /dev/null +++ b/ent/ciworkflowresult/ciworkflowresult.go @@ -0,0 +1,192 @@ +// Code generated by ent, DO NOT EDIT. + +package ciworkflowresult + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the ciworkflowresult type in the database. + Label = "ci_workflow_result" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldOperatingSystem holds the string denoting the operating_system field in the database. + FieldOperatingSystem = "operating_system" + // FieldGpuType holds the string denoting the gpu_type field in the database. + FieldGpuType = "gpu_type" + // FieldPytorchVersion holds the string denoting the pytorch_version field in the database. + FieldPytorchVersion = "pytorch_version" + // FieldWorkflowName holds the string denoting the workflow_name field in the database. + FieldWorkflowName = "workflow_name" + // FieldRunID holds the string denoting the run_id field in the database. + FieldRunID = "run_id" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldStartTime holds the string denoting the start_time field in the database. + FieldStartTime = "start_time" + // FieldEndTime holds the string denoting the end_time field in the database. + FieldEndTime = "end_time" + // EdgeGitcommit holds the string denoting the gitcommit edge name in mutations. + EdgeGitcommit = "gitcommit" + // EdgeStorageFile holds the string denoting the storage_file edge name in mutations. + EdgeStorageFile = "storage_file" + // Table holds the table name of the ciworkflowresult in the database. + Table = "ci_workflow_results" + // GitcommitTable is the table that holds the gitcommit relation/edge. + GitcommitTable = "ci_workflow_results" + // GitcommitInverseTable is the table name for the GitCommit entity. + // It exists in this package in order to avoid circular dependency with the "gitcommit" package. + GitcommitInverseTable = "git_commits" + // GitcommitColumn is the table column denoting the gitcommit relation/edge. + GitcommitColumn = "git_commit_results" + // StorageFileTable is the table that holds the storage_file relation/edge. + StorageFileTable = "ci_workflow_results" + // StorageFileInverseTable is the table name for the StorageFile entity. + // It exists in this package in order to avoid circular dependency with the "storagefile" package. + StorageFileInverseTable = "storage_files" + // StorageFileColumn is the table column denoting the storage_file relation/edge. + StorageFileColumn = "ci_workflow_result_storage_file" +) + +// Columns holds all SQL columns for ciworkflowresult fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldOperatingSystem, + FieldGpuType, + FieldPytorchVersion, + FieldWorkflowName, + FieldRunID, + FieldStatus, + FieldStartTime, + FieldEndTime, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "ci_workflow_results" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "ci_workflow_result_storage_file", + "git_commit_results", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the CIWorkflowResult queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByOperatingSystem orders the results by the operating_system field. +func ByOperatingSystem(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOperatingSystem, opts...).ToFunc() +} + +// ByGpuType orders the results by the gpu_type field. +func ByGpuType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGpuType, opts...).ToFunc() +} + +// ByPytorchVersion orders the results by the pytorch_version field. +func ByPytorchVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPytorchVersion, opts...).ToFunc() +} + +// ByWorkflowName orders the results by the workflow_name field. +func ByWorkflowName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWorkflowName, opts...).ToFunc() +} + +// ByRunID orders the results by the run_id field. +func ByRunID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRunID, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByStartTime orders the results by the start_time field. +func ByStartTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartTime, opts...).ToFunc() +} + +// ByEndTime orders the results by the end_time field. +func ByEndTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndTime, opts...).ToFunc() +} + +// ByGitcommitField orders the results by gitcommit field. +func ByGitcommitField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGitcommitStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStorageFileField orders the results by storage_file field. +func ByStorageFileField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStorageFileStep(), sql.OrderByField(field, opts...)) + } +} +func newGitcommitStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GitcommitInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GitcommitTable, GitcommitColumn), + ) +} +func newStorageFileStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StorageFileInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, StorageFileTable, StorageFileColumn), + ) +} diff --git a/ent/ciworkflowresult/where.go b/ent/ciworkflowresult/where.go new file mode 100644 index 0000000..7e4703e --- /dev/null +++ b/ent/ciworkflowresult/where.go @@ -0,0 +1,788 @@ +// Code generated by ent, DO NOT EDIT. + +package ciworkflowresult + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldUpdateTime, v)) +} + +// OperatingSystem applies equality check predicate on the "operating_system" field. It's identical to OperatingSystemEQ. +func OperatingSystem(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldOperatingSystem, v)) +} + +// GpuType applies equality check predicate on the "gpu_type" field. It's identical to GpuTypeEQ. +func GpuType(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldGpuType, v)) +} + +// PytorchVersion applies equality check predicate on the "pytorch_version" field. It's identical to PytorchVersionEQ. +func PytorchVersion(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldPytorchVersion, v)) +} + +// WorkflowName applies equality check predicate on the "workflow_name" field. It's identical to WorkflowNameEQ. +func WorkflowName(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldWorkflowName, v)) +} + +// RunID applies equality check predicate on the "run_id" field. It's identical to RunIDEQ. +func RunID(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldRunID, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldStatus, v)) +} + +// StartTime applies equality check predicate on the "start_time" field. It's identical to StartTimeEQ. +func StartTime(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldStartTime, v)) +} + +// EndTime applies equality check predicate on the "end_time" field. It's identical to EndTimeEQ. +func EndTime(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldEndTime, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldUpdateTime, v)) +} + +// OperatingSystemEQ applies the EQ predicate on the "operating_system" field. +func OperatingSystemEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldOperatingSystem, v)) +} + +// OperatingSystemNEQ applies the NEQ predicate on the "operating_system" field. +func OperatingSystemNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldOperatingSystem, v)) +} + +// OperatingSystemIn applies the In predicate on the "operating_system" field. +func OperatingSystemIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldOperatingSystem, vs...)) +} + +// OperatingSystemNotIn applies the NotIn predicate on the "operating_system" field. +func OperatingSystemNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldOperatingSystem, vs...)) +} + +// OperatingSystemGT applies the GT predicate on the "operating_system" field. +func OperatingSystemGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldOperatingSystem, v)) +} + +// OperatingSystemGTE applies the GTE predicate on the "operating_system" field. +func OperatingSystemGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldOperatingSystem, v)) +} + +// OperatingSystemLT applies the LT predicate on the "operating_system" field. +func OperatingSystemLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldOperatingSystem, v)) +} + +// OperatingSystemLTE applies the LTE predicate on the "operating_system" field. +func OperatingSystemLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldOperatingSystem, v)) +} + +// OperatingSystemContains applies the Contains predicate on the "operating_system" field. +func OperatingSystemContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldOperatingSystem, v)) +} + +// OperatingSystemHasPrefix applies the HasPrefix predicate on the "operating_system" field. +func OperatingSystemHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldOperatingSystem, v)) +} + +// OperatingSystemHasSuffix applies the HasSuffix predicate on the "operating_system" field. +func OperatingSystemHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldOperatingSystem, v)) +} + +// OperatingSystemEqualFold applies the EqualFold predicate on the "operating_system" field. +func OperatingSystemEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldOperatingSystem, v)) +} + +// OperatingSystemContainsFold applies the ContainsFold predicate on the "operating_system" field. +func OperatingSystemContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldOperatingSystem, v)) +} + +// GpuTypeEQ applies the EQ predicate on the "gpu_type" field. +func GpuTypeEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldGpuType, v)) +} + +// GpuTypeNEQ applies the NEQ predicate on the "gpu_type" field. +func GpuTypeNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldGpuType, v)) +} + +// GpuTypeIn applies the In predicate on the "gpu_type" field. +func GpuTypeIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldGpuType, vs...)) +} + +// GpuTypeNotIn applies the NotIn predicate on the "gpu_type" field. +func GpuTypeNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldGpuType, vs...)) +} + +// GpuTypeGT applies the GT predicate on the "gpu_type" field. +func GpuTypeGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldGpuType, v)) +} + +// GpuTypeGTE applies the GTE predicate on the "gpu_type" field. +func GpuTypeGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldGpuType, v)) +} + +// GpuTypeLT applies the LT predicate on the "gpu_type" field. +func GpuTypeLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldGpuType, v)) +} + +// GpuTypeLTE applies the LTE predicate on the "gpu_type" field. +func GpuTypeLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldGpuType, v)) +} + +// GpuTypeContains applies the Contains predicate on the "gpu_type" field. +func GpuTypeContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldGpuType, v)) +} + +// GpuTypeHasPrefix applies the HasPrefix predicate on the "gpu_type" field. +func GpuTypeHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldGpuType, v)) +} + +// GpuTypeHasSuffix applies the HasSuffix predicate on the "gpu_type" field. +func GpuTypeHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldGpuType, v)) +} + +// GpuTypeIsNil applies the IsNil predicate on the "gpu_type" field. +func GpuTypeIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldGpuType)) +} + +// GpuTypeNotNil applies the NotNil predicate on the "gpu_type" field. +func GpuTypeNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldGpuType)) +} + +// GpuTypeEqualFold applies the EqualFold predicate on the "gpu_type" field. +func GpuTypeEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldGpuType, v)) +} + +// GpuTypeContainsFold applies the ContainsFold predicate on the "gpu_type" field. +func GpuTypeContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldGpuType, v)) +} + +// PytorchVersionEQ applies the EQ predicate on the "pytorch_version" field. +func PytorchVersionEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldPytorchVersion, v)) +} + +// PytorchVersionNEQ applies the NEQ predicate on the "pytorch_version" field. +func PytorchVersionNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldPytorchVersion, v)) +} + +// PytorchVersionIn applies the In predicate on the "pytorch_version" field. +func PytorchVersionIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldPytorchVersion, vs...)) +} + +// PytorchVersionNotIn applies the NotIn predicate on the "pytorch_version" field. +func PytorchVersionNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldPytorchVersion, vs...)) +} + +// PytorchVersionGT applies the GT predicate on the "pytorch_version" field. +func PytorchVersionGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldPytorchVersion, v)) +} + +// PytorchVersionGTE applies the GTE predicate on the "pytorch_version" field. +func PytorchVersionGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldPytorchVersion, v)) +} + +// PytorchVersionLT applies the LT predicate on the "pytorch_version" field. +func PytorchVersionLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldPytorchVersion, v)) +} + +// PytorchVersionLTE applies the LTE predicate on the "pytorch_version" field. +func PytorchVersionLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldPytorchVersion, v)) +} + +// PytorchVersionContains applies the Contains predicate on the "pytorch_version" field. +func PytorchVersionContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldPytorchVersion, v)) +} + +// PytorchVersionHasPrefix applies the HasPrefix predicate on the "pytorch_version" field. +func PytorchVersionHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldPytorchVersion, v)) +} + +// PytorchVersionHasSuffix applies the HasSuffix predicate on the "pytorch_version" field. +func PytorchVersionHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldPytorchVersion, v)) +} + +// PytorchVersionIsNil applies the IsNil predicate on the "pytorch_version" field. +func PytorchVersionIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldPytorchVersion)) +} + +// PytorchVersionNotNil applies the NotNil predicate on the "pytorch_version" field. +func PytorchVersionNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldPytorchVersion)) +} + +// PytorchVersionEqualFold applies the EqualFold predicate on the "pytorch_version" field. +func PytorchVersionEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldPytorchVersion, v)) +} + +// PytorchVersionContainsFold applies the ContainsFold predicate on the "pytorch_version" field. +func PytorchVersionContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldPytorchVersion, v)) +} + +// WorkflowNameEQ applies the EQ predicate on the "workflow_name" field. +func WorkflowNameEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldWorkflowName, v)) +} + +// WorkflowNameNEQ applies the NEQ predicate on the "workflow_name" field. +func WorkflowNameNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldWorkflowName, v)) +} + +// WorkflowNameIn applies the In predicate on the "workflow_name" field. +func WorkflowNameIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldWorkflowName, vs...)) +} + +// WorkflowNameNotIn applies the NotIn predicate on the "workflow_name" field. +func WorkflowNameNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldWorkflowName, vs...)) +} + +// WorkflowNameGT applies the GT predicate on the "workflow_name" field. +func WorkflowNameGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldWorkflowName, v)) +} + +// WorkflowNameGTE applies the GTE predicate on the "workflow_name" field. +func WorkflowNameGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldWorkflowName, v)) +} + +// WorkflowNameLT applies the LT predicate on the "workflow_name" field. +func WorkflowNameLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldWorkflowName, v)) +} + +// WorkflowNameLTE applies the LTE predicate on the "workflow_name" field. +func WorkflowNameLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldWorkflowName, v)) +} + +// WorkflowNameContains applies the Contains predicate on the "workflow_name" field. +func WorkflowNameContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldWorkflowName, v)) +} + +// WorkflowNameHasPrefix applies the HasPrefix predicate on the "workflow_name" field. +func WorkflowNameHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldWorkflowName, v)) +} + +// WorkflowNameHasSuffix applies the HasSuffix predicate on the "workflow_name" field. +func WorkflowNameHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldWorkflowName, v)) +} + +// WorkflowNameIsNil applies the IsNil predicate on the "workflow_name" field. +func WorkflowNameIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldWorkflowName)) +} + +// WorkflowNameNotNil applies the NotNil predicate on the "workflow_name" field. +func WorkflowNameNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldWorkflowName)) +} + +// WorkflowNameEqualFold applies the EqualFold predicate on the "workflow_name" field. +func WorkflowNameEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldWorkflowName, v)) +} + +// WorkflowNameContainsFold applies the ContainsFold predicate on the "workflow_name" field. +func WorkflowNameContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldWorkflowName, v)) +} + +// RunIDEQ applies the EQ predicate on the "run_id" field. +func RunIDEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldRunID, v)) +} + +// RunIDNEQ applies the NEQ predicate on the "run_id" field. +func RunIDNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldRunID, v)) +} + +// RunIDIn applies the In predicate on the "run_id" field. +func RunIDIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldRunID, vs...)) +} + +// RunIDNotIn applies the NotIn predicate on the "run_id" field. +func RunIDNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldRunID, vs...)) +} + +// RunIDGT applies the GT predicate on the "run_id" field. +func RunIDGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldRunID, v)) +} + +// RunIDGTE applies the GTE predicate on the "run_id" field. +func RunIDGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldRunID, v)) +} + +// RunIDLT applies the LT predicate on the "run_id" field. +func RunIDLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldRunID, v)) +} + +// RunIDLTE applies the LTE predicate on the "run_id" field. +func RunIDLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldRunID, v)) +} + +// RunIDContains applies the Contains predicate on the "run_id" field. +func RunIDContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldRunID, v)) +} + +// RunIDHasPrefix applies the HasPrefix predicate on the "run_id" field. +func RunIDHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldRunID, v)) +} + +// RunIDHasSuffix applies the HasSuffix predicate on the "run_id" field. +func RunIDHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldRunID, v)) +} + +// RunIDIsNil applies the IsNil predicate on the "run_id" field. +func RunIDIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldRunID)) +} + +// RunIDNotNil applies the NotNil predicate on the "run_id" field. +func RunIDNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldRunID)) +} + +// RunIDEqualFold applies the EqualFold predicate on the "run_id" field. +func RunIDEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldRunID, v)) +} + +// RunIDContainsFold applies the ContainsFold predicate on the "run_id" field. +func RunIDContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldRunID, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusIsNil applies the IsNil predicate on the "status" field. +func StatusIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldStatus)) +} + +// StatusNotNil applies the NotNil predicate on the "status" field. +func StatusNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldStatus)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldContainsFold(FieldStatus, v)) +} + +// StartTimeEQ applies the EQ predicate on the "start_time" field. +func StartTimeEQ(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldStartTime, v)) +} + +// StartTimeNEQ applies the NEQ predicate on the "start_time" field. +func StartTimeNEQ(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldStartTime, v)) +} + +// StartTimeIn applies the In predicate on the "start_time" field. +func StartTimeIn(vs ...int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldStartTime, vs...)) +} + +// StartTimeNotIn applies the NotIn predicate on the "start_time" field. +func StartTimeNotIn(vs ...int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldStartTime, vs...)) +} + +// StartTimeGT applies the GT predicate on the "start_time" field. +func StartTimeGT(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldStartTime, v)) +} + +// StartTimeGTE applies the GTE predicate on the "start_time" field. +func StartTimeGTE(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldStartTime, v)) +} + +// StartTimeLT applies the LT predicate on the "start_time" field. +func StartTimeLT(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldStartTime, v)) +} + +// StartTimeLTE applies the LTE predicate on the "start_time" field. +func StartTimeLTE(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldStartTime, v)) +} + +// StartTimeIsNil applies the IsNil predicate on the "start_time" field. +func StartTimeIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldStartTime)) +} + +// StartTimeNotNil applies the NotNil predicate on the "start_time" field. +func StartTimeNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldStartTime)) +} + +// EndTimeEQ applies the EQ predicate on the "end_time" field. +func EndTimeEQ(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldEQ(FieldEndTime, v)) +} + +// EndTimeNEQ applies the NEQ predicate on the "end_time" field. +func EndTimeNEQ(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNEQ(FieldEndTime, v)) +} + +// EndTimeIn applies the In predicate on the "end_time" field. +func EndTimeIn(vs ...int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIn(FieldEndTime, vs...)) +} + +// EndTimeNotIn applies the NotIn predicate on the "end_time" field. +func EndTimeNotIn(vs ...int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotIn(FieldEndTime, vs...)) +} + +// EndTimeGT applies the GT predicate on the "end_time" field. +func EndTimeGT(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGT(FieldEndTime, v)) +} + +// EndTimeGTE applies the GTE predicate on the "end_time" field. +func EndTimeGTE(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldGTE(FieldEndTime, v)) +} + +// EndTimeLT applies the LT predicate on the "end_time" field. +func EndTimeLT(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLT(FieldEndTime, v)) +} + +// EndTimeLTE applies the LTE predicate on the "end_time" field. +func EndTimeLTE(v int64) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldLTE(FieldEndTime, v)) +} + +// EndTimeIsNil applies the IsNil predicate on the "end_time" field. +func EndTimeIsNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldIsNull(FieldEndTime)) +} + +// EndTimeNotNil applies the NotNil predicate on the "end_time" field. +func EndTimeNotNil() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.FieldNotNull(FieldEndTime)) +} + +// HasGitcommit applies the HasEdge predicate on the "gitcommit" edge. +func HasGitcommit() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GitcommitTable, GitcommitColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGitcommitWith applies the HasEdge predicate on the "gitcommit" edge with a given conditions (other predicates). +func HasGitcommitWith(preds ...predicate.GitCommit) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(func(s *sql.Selector) { + step := newGitcommitStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasStorageFile applies the HasEdge predicate on the "storage_file" edge. +func HasStorageFile() predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, StorageFileTable, StorageFileColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasStorageFileWith applies the HasEdge predicate on the "storage_file" edge with a given conditions (other predicates). +func HasStorageFileWith(preds ...predicate.StorageFile) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(func(s *sql.Selector) { + step := newStorageFileStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.CIWorkflowResult) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.CIWorkflowResult) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.CIWorkflowResult) predicate.CIWorkflowResult { + return predicate.CIWorkflowResult(sql.NotPredicates(p)) +} diff --git a/ent/ciworkflowresult_create.go b/ent/ciworkflowresult_create.go new file mode 100644 index 0000000..2c9bc6c --- /dev/null +++ b/ent/ciworkflowresult_create.go @@ -0,0 +1,1265 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// CIWorkflowResultCreate is the builder for creating a CIWorkflowResult entity. +type CIWorkflowResultCreate struct { + config + mutation *CIWorkflowResultMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (cwrc *CIWorkflowResultCreate) SetCreateTime(t time.Time) *CIWorkflowResultCreate { + cwrc.mutation.SetCreateTime(t) + return cwrc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableCreateTime(t *time.Time) *CIWorkflowResultCreate { + if t != nil { + cwrc.SetCreateTime(*t) + } + return cwrc +} + +// SetUpdateTime sets the "update_time" field. +func (cwrc *CIWorkflowResultCreate) SetUpdateTime(t time.Time) *CIWorkflowResultCreate { + cwrc.mutation.SetUpdateTime(t) + return cwrc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableUpdateTime(t *time.Time) *CIWorkflowResultCreate { + if t != nil { + cwrc.SetUpdateTime(*t) + } + return cwrc +} + +// SetOperatingSystem sets the "operating_system" field. +func (cwrc *CIWorkflowResultCreate) SetOperatingSystem(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetOperatingSystem(s) + return cwrc +} + +// SetGpuType sets the "gpu_type" field. +func (cwrc *CIWorkflowResultCreate) SetGpuType(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetGpuType(s) + return cwrc +} + +// SetNillableGpuType sets the "gpu_type" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableGpuType(s *string) *CIWorkflowResultCreate { + if s != nil { + cwrc.SetGpuType(*s) + } + return cwrc +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (cwrc *CIWorkflowResultCreate) SetPytorchVersion(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetPytorchVersion(s) + return cwrc +} + +// SetNillablePytorchVersion sets the "pytorch_version" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillablePytorchVersion(s *string) *CIWorkflowResultCreate { + if s != nil { + cwrc.SetPytorchVersion(*s) + } + return cwrc +} + +// SetWorkflowName sets the "workflow_name" field. +func (cwrc *CIWorkflowResultCreate) SetWorkflowName(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetWorkflowName(s) + return cwrc +} + +// SetNillableWorkflowName sets the "workflow_name" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableWorkflowName(s *string) *CIWorkflowResultCreate { + if s != nil { + cwrc.SetWorkflowName(*s) + } + return cwrc +} + +// SetRunID sets the "run_id" field. +func (cwrc *CIWorkflowResultCreate) SetRunID(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetRunID(s) + return cwrc +} + +// SetNillableRunID sets the "run_id" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableRunID(s *string) *CIWorkflowResultCreate { + if s != nil { + cwrc.SetRunID(*s) + } + return cwrc +} + +// SetStatus sets the "status" field. +func (cwrc *CIWorkflowResultCreate) SetStatus(s string) *CIWorkflowResultCreate { + cwrc.mutation.SetStatus(s) + return cwrc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableStatus(s *string) *CIWorkflowResultCreate { + if s != nil { + cwrc.SetStatus(*s) + } + return cwrc +} + +// SetStartTime sets the "start_time" field. +func (cwrc *CIWorkflowResultCreate) SetStartTime(i int64) *CIWorkflowResultCreate { + cwrc.mutation.SetStartTime(i) + return cwrc +} + +// SetNillableStartTime sets the "start_time" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableStartTime(i *int64) *CIWorkflowResultCreate { + if i != nil { + cwrc.SetStartTime(*i) + } + return cwrc +} + +// SetEndTime sets the "end_time" field. +func (cwrc *CIWorkflowResultCreate) SetEndTime(i int64) *CIWorkflowResultCreate { + cwrc.mutation.SetEndTime(i) + return cwrc +} + +// SetNillableEndTime sets the "end_time" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableEndTime(i *int64) *CIWorkflowResultCreate { + if i != nil { + cwrc.SetEndTime(*i) + } + return cwrc +} + +// SetID sets the "id" field. +func (cwrc *CIWorkflowResultCreate) SetID(u uuid.UUID) *CIWorkflowResultCreate { + cwrc.mutation.SetID(u) + return cwrc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableID(u *uuid.UUID) *CIWorkflowResultCreate { + if u != nil { + cwrc.SetID(*u) + } + return cwrc +} + +// SetGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID. +func (cwrc *CIWorkflowResultCreate) SetGitcommitID(id uuid.UUID) *CIWorkflowResultCreate { + cwrc.mutation.SetGitcommitID(id) + return cwrc +} + +// SetNillableGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableGitcommitID(id *uuid.UUID) *CIWorkflowResultCreate { + if id != nil { + cwrc = cwrc.SetGitcommitID(*id) + } + return cwrc +} + +// SetGitcommit sets the "gitcommit" edge to the GitCommit entity. +func (cwrc *CIWorkflowResultCreate) SetGitcommit(g *GitCommit) *CIWorkflowResultCreate { + return cwrc.SetGitcommitID(g.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (cwrc *CIWorkflowResultCreate) SetStorageFileID(id uuid.UUID) *CIWorkflowResultCreate { + cwrc.mutation.SetStorageFileID(id) + return cwrc +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (cwrc *CIWorkflowResultCreate) SetNillableStorageFileID(id *uuid.UUID) *CIWorkflowResultCreate { + if id != nil { + cwrc = cwrc.SetStorageFileID(*id) + } + return cwrc +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (cwrc *CIWorkflowResultCreate) SetStorageFile(s *StorageFile) *CIWorkflowResultCreate { + return cwrc.SetStorageFileID(s.ID) +} + +// Mutation returns the CIWorkflowResultMutation object of the builder. +func (cwrc *CIWorkflowResultCreate) Mutation() *CIWorkflowResultMutation { + return cwrc.mutation +} + +// Save creates the CIWorkflowResult in the database. +func (cwrc *CIWorkflowResultCreate) Save(ctx context.Context) (*CIWorkflowResult, error) { + cwrc.defaults() + return withHooks(ctx, cwrc.sqlSave, cwrc.mutation, cwrc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cwrc *CIWorkflowResultCreate) SaveX(ctx context.Context) *CIWorkflowResult { + v, err := cwrc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cwrc *CIWorkflowResultCreate) Exec(ctx context.Context) error { + _, err := cwrc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwrc *CIWorkflowResultCreate) ExecX(ctx context.Context) { + if err := cwrc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cwrc *CIWorkflowResultCreate) defaults() { + if _, ok := cwrc.mutation.CreateTime(); !ok { + v := ciworkflowresult.DefaultCreateTime() + cwrc.mutation.SetCreateTime(v) + } + if _, ok := cwrc.mutation.UpdateTime(); !ok { + v := ciworkflowresult.DefaultUpdateTime() + cwrc.mutation.SetUpdateTime(v) + } + if _, ok := cwrc.mutation.ID(); !ok { + v := ciworkflowresult.DefaultID() + cwrc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cwrc *CIWorkflowResultCreate) check() error { + if _, ok := cwrc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "CIWorkflowResult.create_time"`)} + } + if _, ok := cwrc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "CIWorkflowResult.update_time"`)} + } + if _, ok := cwrc.mutation.OperatingSystem(); !ok { + return &ValidationError{Name: "operating_system", err: errors.New(`ent: missing required field "CIWorkflowResult.operating_system"`)} + } + return nil +} + +func (cwrc *CIWorkflowResultCreate) sqlSave(ctx context.Context) (*CIWorkflowResult, error) { + if err := cwrc.check(); err != nil { + return nil, err + } + _node, _spec := cwrc.createSpec() + if err := sqlgraph.CreateNode(ctx, cwrc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + cwrc.mutation.id = &_node.ID + cwrc.mutation.done = true + return _node, nil +} + +func (cwrc *CIWorkflowResultCreate) createSpec() (*CIWorkflowResult, *sqlgraph.CreateSpec) { + var ( + _node = &CIWorkflowResult{config: cwrc.config} + _spec = sqlgraph.NewCreateSpec(ciworkflowresult.Table, sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = cwrc.conflict + if id, ok := cwrc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := cwrc.mutation.CreateTime(); ok { + _spec.SetField(ciworkflowresult.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := cwrc.mutation.UpdateTime(); ok { + _spec.SetField(ciworkflowresult.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := cwrc.mutation.OperatingSystem(); ok { + _spec.SetField(ciworkflowresult.FieldOperatingSystem, field.TypeString, value) + _node.OperatingSystem = value + } + if value, ok := cwrc.mutation.GpuType(); ok { + _spec.SetField(ciworkflowresult.FieldGpuType, field.TypeString, value) + _node.GpuType = value + } + if value, ok := cwrc.mutation.PytorchVersion(); ok { + _spec.SetField(ciworkflowresult.FieldPytorchVersion, field.TypeString, value) + _node.PytorchVersion = value + } + if value, ok := cwrc.mutation.WorkflowName(); ok { + _spec.SetField(ciworkflowresult.FieldWorkflowName, field.TypeString, value) + _node.WorkflowName = value + } + if value, ok := cwrc.mutation.RunID(); ok { + _spec.SetField(ciworkflowresult.FieldRunID, field.TypeString, value) + _node.RunID = value + } + if value, ok := cwrc.mutation.Status(); ok { + _spec.SetField(ciworkflowresult.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := cwrc.mutation.StartTime(); ok { + _spec.SetField(ciworkflowresult.FieldStartTime, field.TypeInt64, value) + _node.StartTime = value + } + if value, ok := cwrc.mutation.EndTime(); ok { + _spec.SetField(ciworkflowresult.FieldEndTime, field.TypeInt64, value) + _node.EndTime = value + } + if nodes := cwrc.mutation.GitcommitIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: ciworkflowresult.GitcommitTable, + Columns: []string{ciworkflowresult.GitcommitColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.git_commit_results = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := cwrc.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: ciworkflowresult.StorageFileTable, + Columns: []string{ciworkflowresult.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.ci_workflow_result_storage_file = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.CIWorkflowResult.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.CIWorkflowResultUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (cwrc *CIWorkflowResultCreate) OnConflict(opts ...sql.ConflictOption) *CIWorkflowResultUpsertOne { + cwrc.conflict = opts + return &CIWorkflowResultUpsertOne{ + create: cwrc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (cwrc *CIWorkflowResultCreate) OnConflictColumns(columns ...string) *CIWorkflowResultUpsertOne { + cwrc.conflict = append(cwrc.conflict, sql.ConflictColumns(columns...)) + return &CIWorkflowResultUpsertOne{ + create: cwrc, + } +} + +type ( + // CIWorkflowResultUpsertOne is the builder for "upsert"-ing + // one CIWorkflowResult node. + CIWorkflowResultUpsertOne struct { + create *CIWorkflowResultCreate + } + + // CIWorkflowResultUpsert is the "OnConflict" setter. + CIWorkflowResultUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *CIWorkflowResultUpsert) SetUpdateTime(v time.Time) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateUpdateTime() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldUpdateTime) + return u +} + +// SetOperatingSystem sets the "operating_system" field. +func (u *CIWorkflowResultUpsert) SetOperatingSystem(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldOperatingSystem, v) + return u +} + +// UpdateOperatingSystem sets the "operating_system" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateOperatingSystem() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldOperatingSystem) + return u +} + +// SetGpuType sets the "gpu_type" field. +func (u *CIWorkflowResultUpsert) SetGpuType(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldGpuType, v) + return u +} + +// UpdateGpuType sets the "gpu_type" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateGpuType() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldGpuType) + return u +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (u *CIWorkflowResultUpsert) ClearGpuType() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldGpuType) + return u +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (u *CIWorkflowResultUpsert) SetPytorchVersion(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldPytorchVersion, v) + return u +} + +// UpdatePytorchVersion sets the "pytorch_version" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdatePytorchVersion() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldPytorchVersion) + return u +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (u *CIWorkflowResultUpsert) ClearPytorchVersion() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldPytorchVersion) + return u +} + +// SetWorkflowName sets the "workflow_name" field. +func (u *CIWorkflowResultUpsert) SetWorkflowName(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldWorkflowName, v) + return u +} + +// UpdateWorkflowName sets the "workflow_name" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateWorkflowName() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldWorkflowName) + return u +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (u *CIWorkflowResultUpsert) ClearWorkflowName() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldWorkflowName) + return u +} + +// SetRunID sets the "run_id" field. +func (u *CIWorkflowResultUpsert) SetRunID(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldRunID, v) + return u +} + +// UpdateRunID sets the "run_id" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateRunID() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldRunID) + return u +} + +// ClearRunID clears the value of the "run_id" field. +func (u *CIWorkflowResultUpsert) ClearRunID() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldRunID) + return u +} + +// SetStatus sets the "status" field. +func (u *CIWorkflowResultUpsert) SetStatus(v string) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateStatus() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldStatus) + return u +} + +// ClearStatus clears the value of the "status" field. +func (u *CIWorkflowResultUpsert) ClearStatus() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldStatus) + return u +} + +// SetStartTime sets the "start_time" field. +func (u *CIWorkflowResultUpsert) SetStartTime(v int64) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldStartTime, v) + return u +} + +// UpdateStartTime sets the "start_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateStartTime() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldStartTime) + return u +} + +// AddStartTime adds v to the "start_time" field. +func (u *CIWorkflowResultUpsert) AddStartTime(v int64) *CIWorkflowResultUpsert { + u.Add(ciworkflowresult.FieldStartTime, v) + return u +} + +// ClearStartTime clears the value of the "start_time" field. +func (u *CIWorkflowResultUpsert) ClearStartTime() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldStartTime) + return u +} + +// SetEndTime sets the "end_time" field. +func (u *CIWorkflowResultUpsert) SetEndTime(v int64) *CIWorkflowResultUpsert { + u.Set(ciworkflowresult.FieldEndTime, v) + return u +} + +// UpdateEndTime sets the "end_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsert) UpdateEndTime() *CIWorkflowResultUpsert { + u.SetExcluded(ciworkflowresult.FieldEndTime) + return u +} + +// AddEndTime adds v to the "end_time" field. +func (u *CIWorkflowResultUpsert) AddEndTime(v int64) *CIWorkflowResultUpsert { + u.Add(ciworkflowresult.FieldEndTime, v) + return u +} + +// ClearEndTime clears the value of the "end_time" field. +func (u *CIWorkflowResultUpsert) ClearEndTime() *CIWorkflowResultUpsert { + u.SetNull(ciworkflowresult.FieldEndTime) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(ciworkflowresult.FieldID) +// }), +// ). +// Exec(ctx) +func (u *CIWorkflowResultUpsertOne) UpdateNewValues() *CIWorkflowResultUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(ciworkflowresult.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(ciworkflowresult.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *CIWorkflowResultUpsertOne) Ignore() *CIWorkflowResultUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *CIWorkflowResultUpsertOne) DoNothing() *CIWorkflowResultUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the CIWorkflowResultCreate.OnConflict +// documentation for more info. +func (u *CIWorkflowResultUpsertOne) Update(set func(*CIWorkflowResultUpsert)) *CIWorkflowResultUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&CIWorkflowResultUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *CIWorkflowResultUpsertOne) SetUpdateTime(v time.Time) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateUpdateTime() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetOperatingSystem sets the "operating_system" field. +func (u *CIWorkflowResultUpsertOne) SetOperatingSystem(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetOperatingSystem(v) + }) +} + +// UpdateOperatingSystem sets the "operating_system" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateOperatingSystem() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateOperatingSystem() + }) +} + +// SetGpuType sets the "gpu_type" field. +func (u *CIWorkflowResultUpsertOne) SetGpuType(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetGpuType(v) + }) +} + +// UpdateGpuType sets the "gpu_type" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateGpuType() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateGpuType() + }) +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (u *CIWorkflowResultUpsertOne) ClearGpuType() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearGpuType() + }) +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (u *CIWorkflowResultUpsertOne) SetPytorchVersion(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetPytorchVersion(v) + }) +} + +// UpdatePytorchVersion sets the "pytorch_version" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdatePytorchVersion() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdatePytorchVersion() + }) +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (u *CIWorkflowResultUpsertOne) ClearPytorchVersion() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearPytorchVersion() + }) +} + +// SetWorkflowName sets the "workflow_name" field. +func (u *CIWorkflowResultUpsertOne) SetWorkflowName(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetWorkflowName(v) + }) +} + +// UpdateWorkflowName sets the "workflow_name" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateWorkflowName() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateWorkflowName() + }) +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (u *CIWorkflowResultUpsertOne) ClearWorkflowName() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearWorkflowName() + }) +} + +// SetRunID sets the "run_id" field. +func (u *CIWorkflowResultUpsertOne) SetRunID(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetRunID(v) + }) +} + +// UpdateRunID sets the "run_id" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateRunID() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateRunID() + }) +} + +// ClearRunID clears the value of the "run_id" field. +func (u *CIWorkflowResultUpsertOne) ClearRunID() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearRunID() + }) +} + +// SetStatus sets the "status" field. +func (u *CIWorkflowResultUpsertOne) SetStatus(v string) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateStatus() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateStatus() + }) +} + +// ClearStatus clears the value of the "status" field. +func (u *CIWorkflowResultUpsertOne) ClearStatus() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearStatus() + }) +} + +// SetStartTime sets the "start_time" field. +func (u *CIWorkflowResultUpsertOne) SetStartTime(v int64) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetStartTime(v) + }) +} + +// AddStartTime adds v to the "start_time" field. +func (u *CIWorkflowResultUpsertOne) AddStartTime(v int64) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.AddStartTime(v) + }) +} + +// UpdateStartTime sets the "start_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateStartTime() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateStartTime() + }) +} + +// ClearStartTime clears the value of the "start_time" field. +func (u *CIWorkflowResultUpsertOne) ClearStartTime() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearStartTime() + }) +} + +// SetEndTime sets the "end_time" field. +func (u *CIWorkflowResultUpsertOne) SetEndTime(v int64) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetEndTime(v) + }) +} + +// AddEndTime adds v to the "end_time" field. +func (u *CIWorkflowResultUpsertOne) AddEndTime(v int64) *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.AddEndTime(v) + }) +} + +// UpdateEndTime sets the "end_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertOne) UpdateEndTime() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateEndTime() + }) +} + +// ClearEndTime clears the value of the "end_time" field. +func (u *CIWorkflowResultUpsertOne) ClearEndTime() *CIWorkflowResultUpsertOne { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearEndTime() + }) +} + +// Exec executes the query. +func (u *CIWorkflowResultUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for CIWorkflowResultCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *CIWorkflowResultUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *CIWorkflowResultUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: CIWorkflowResultUpsertOne.ID is not supported by MySQL driver. Use CIWorkflowResultUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *CIWorkflowResultUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// CIWorkflowResultCreateBulk is the builder for creating many CIWorkflowResult entities in bulk. +type CIWorkflowResultCreateBulk struct { + config + err error + builders []*CIWorkflowResultCreate + conflict []sql.ConflictOption +} + +// Save creates the CIWorkflowResult entities in the database. +func (cwrcb *CIWorkflowResultCreateBulk) Save(ctx context.Context) ([]*CIWorkflowResult, error) { + if cwrcb.err != nil { + return nil, cwrcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(cwrcb.builders)) + nodes := make([]*CIWorkflowResult, len(cwrcb.builders)) + mutators := make([]Mutator, len(cwrcb.builders)) + for i := range cwrcb.builders { + func(i int, root context.Context) { + builder := cwrcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*CIWorkflowResultMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, cwrcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = cwrcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, cwrcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, cwrcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (cwrcb *CIWorkflowResultCreateBulk) SaveX(ctx context.Context) []*CIWorkflowResult { + v, err := cwrcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cwrcb *CIWorkflowResultCreateBulk) Exec(ctx context.Context) error { + _, err := cwrcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwrcb *CIWorkflowResultCreateBulk) ExecX(ctx context.Context) { + if err := cwrcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.CIWorkflowResult.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.CIWorkflowResultUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (cwrcb *CIWorkflowResultCreateBulk) OnConflict(opts ...sql.ConflictOption) *CIWorkflowResultUpsertBulk { + cwrcb.conflict = opts + return &CIWorkflowResultUpsertBulk{ + create: cwrcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (cwrcb *CIWorkflowResultCreateBulk) OnConflictColumns(columns ...string) *CIWorkflowResultUpsertBulk { + cwrcb.conflict = append(cwrcb.conflict, sql.ConflictColumns(columns...)) + return &CIWorkflowResultUpsertBulk{ + create: cwrcb, + } +} + +// CIWorkflowResultUpsertBulk is the builder for "upsert"-ing +// a bulk of CIWorkflowResult nodes. +type CIWorkflowResultUpsertBulk struct { + create *CIWorkflowResultCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(ciworkflowresult.FieldID) +// }), +// ). +// Exec(ctx) +func (u *CIWorkflowResultUpsertBulk) UpdateNewValues() *CIWorkflowResultUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(ciworkflowresult.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(ciworkflowresult.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.CIWorkflowResult.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *CIWorkflowResultUpsertBulk) Ignore() *CIWorkflowResultUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *CIWorkflowResultUpsertBulk) DoNothing() *CIWorkflowResultUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the CIWorkflowResultCreateBulk.OnConflict +// documentation for more info. +func (u *CIWorkflowResultUpsertBulk) Update(set func(*CIWorkflowResultUpsert)) *CIWorkflowResultUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&CIWorkflowResultUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *CIWorkflowResultUpsertBulk) SetUpdateTime(v time.Time) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateUpdateTime() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetOperatingSystem sets the "operating_system" field. +func (u *CIWorkflowResultUpsertBulk) SetOperatingSystem(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetOperatingSystem(v) + }) +} + +// UpdateOperatingSystem sets the "operating_system" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateOperatingSystem() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateOperatingSystem() + }) +} + +// SetGpuType sets the "gpu_type" field. +func (u *CIWorkflowResultUpsertBulk) SetGpuType(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetGpuType(v) + }) +} + +// UpdateGpuType sets the "gpu_type" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateGpuType() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateGpuType() + }) +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (u *CIWorkflowResultUpsertBulk) ClearGpuType() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearGpuType() + }) +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (u *CIWorkflowResultUpsertBulk) SetPytorchVersion(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetPytorchVersion(v) + }) +} + +// UpdatePytorchVersion sets the "pytorch_version" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdatePytorchVersion() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdatePytorchVersion() + }) +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (u *CIWorkflowResultUpsertBulk) ClearPytorchVersion() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearPytorchVersion() + }) +} + +// SetWorkflowName sets the "workflow_name" field. +func (u *CIWorkflowResultUpsertBulk) SetWorkflowName(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetWorkflowName(v) + }) +} + +// UpdateWorkflowName sets the "workflow_name" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateWorkflowName() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateWorkflowName() + }) +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (u *CIWorkflowResultUpsertBulk) ClearWorkflowName() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearWorkflowName() + }) +} + +// SetRunID sets the "run_id" field. +func (u *CIWorkflowResultUpsertBulk) SetRunID(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetRunID(v) + }) +} + +// UpdateRunID sets the "run_id" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateRunID() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateRunID() + }) +} + +// ClearRunID clears the value of the "run_id" field. +func (u *CIWorkflowResultUpsertBulk) ClearRunID() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearRunID() + }) +} + +// SetStatus sets the "status" field. +func (u *CIWorkflowResultUpsertBulk) SetStatus(v string) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateStatus() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateStatus() + }) +} + +// ClearStatus clears the value of the "status" field. +func (u *CIWorkflowResultUpsertBulk) ClearStatus() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearStatus() + }) +} + +// SetStartTime sets the "start_time" field. +func (u *CIWorkflowResultUpsertBulk) SetStartTime(v int64) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetStartTime(v) + }) +} + +// AddStartTime adds v to the "start_time" field. +func (u *CIWorkflowResultUpsertBulk) AddStartTime(v int64) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.AddStartTime(v) + }) +} + +// UpdateStartTime sets the "start_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateStartTime() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateStartTime() + }) +} + +// ClearStartTime clears the value of the "start_time" field. +func (u *CIWorkflowResultUpsertBulk) ClearStartTime() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearStartTime() + }) +} + +// SetEndTime sets the "end_time" field. +func (u *CIWorkflowResultUpsertBulk) SetEndTime(v int64) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.SetEndTime(v) + }) +} + +// AddEndTime adds v to the "end_time" field. +func (u *CIWorkflowResultUpsertBulk) AddEndTime(v int64) *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.AddEndTime(v) + }) +} + +// UpdateEndTime sets the "end_time" field to the value that was provided on create. +func (u *CIWorkflowResultUpsertBulk) UpdateEndTime() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.UpdateEndTime() + }) +} + +// ClearEndTime clears the value of the "end_time" field. +func (u *CIWorkflowResultUpsertBulk) ClearEndTime() *CIWorkflowResultUpsertBulk { + return u.Update(func(s *CIWorkflowResultUpsert) { + s.ClearEndTime() + }) +} + +// Exec executes the query. +func (u *CIWorkflowResultUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the CIWorkflowResultCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for CIWorkflowResultCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *CIWorkflowResultUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/ciworkflowresult_delete.go b/ent/ciworkflowresult_delete.go new file mode 100644 index 0000000..7a1fa77 --- /dev/null +++ b/ent/ciworkflowresult_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CIWorkflowResultDelete is the builder for deleting a CIWorkflowResult entity. +type CIWorkflowResultDelete struct { + config + hooks []Hook + mutation *CIWorkflowResultMutation +} + +// Where appends a list predicates to the CIWorkflowResultDelete builder. +func (cwrd *CIWorkflowResultDelete) Where(ps ...predicate.CIWorkflowResult) *CIWorkflowResultDelete { + cwrd.mutation.Where(ps...) + return cwrd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (cwrd *CIWorkflowResultDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, cwrd.sqlExec, cwrd.mutation, cwrd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwrd *CIWorkflowResultDelete) ExecX(ctx context.Context) int { + n, err := cwrd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (cwrd *CIWorkflowResultDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(ciworkflowresult.Table, sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID)) + if ps := cwrd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, cwrd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cwrd.mutation.done = true + return affected, err +} + +// CIWorkflowResultDeleteOne is the builder for deleting a single CIWorkflowResult entity. +type CIWorkflowResultDeleteOne struct { + cwrd *CIWorkflowResultDelete +} + +// Where appends a list predicates to the CIWorkflowResultDelete builder. +func (cwrdo *CIWorkflowResultDeleteOne) Where(ps ...predicate.CIWorkflowResult) *CIWorkflowResultDeleteOne { + cwrdo.cwrd.mutation.Where(ps...) + return cwrdo +} + +// Exec executes the deletion query. +func (cwrdo *CIWorkflowResultDeleteOne) Exec(ctx context.Context) error { + n, err := cwrdo.cwrd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{ciworkflowresult.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwrdo *CIWorkflowResultDeleteOne) ExecX(ctx context.Context) { + if err := cwrdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/ciworkflowresult_query.go b/ent/ciworkflowresult_query.go new file mode 100644 index 0000000..dd4b8b0 --- /dev/null +++ b/ent/ciworkflowresult_query.go @@ -0,0 +1,726 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// CIWorkflowResultQuery is the builder for querying CIWorkflowResult entities. +type CIWorkflowResultQuery struct { + config + ctx *QueryContext + order []ciworkflowresult.OrderOption + inters []Interceptor + predicates []predicate.CIWorkflowResult + withGitcommit *GitCommitQuery + withStorageFile *StorageFileQuery + withFKs bool + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the CIWorkflowResultQuery builder. +func (cwrq *CIWorkflowResultQuery) Where(ps ...predicate.CIWorkflowResult) *CIWorkflowResultQuery { + cwrq.predicates = append(cwrq.predicates, ps...) + return cwrq +} + +// Limit the number of records to be returned by this query. +func (cwrq *CIWorkflowResultQuery) Limit(limit int) *CIWorkflowResultQuery { + cwrq.ctx.Limit = &limit + return cwrq +} + +// Offset to start from. +func (cwrq *CIWorkflowResultQuery) Offset(offset int) *CIWorkflowResultQuery { + cwrq.ctx.Offset = &offset + return cwrq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cwrq *CIWorkflowResultQuery) Unique(unique bool) *CIWorkflowResultQuery { + cwrq.ctx.Unique = &unique + return cwrq +} + +// Order specifies how the records should be ordered. +func (cwrq *CIWorkflowResultQuery) Order(o ...ciworkflowresult.OrderOption) *CIWorkflowResultQuery { + cwrq.order = append(cwrq.order, o...) + return cwrq +} + +// QueryGitcommit chains the current query on the "gitcommit" edge. +func (cwrq *CIWorkflowResultQuery) QueryGitcommit() *GitCommitQuery { + query := (&GitCommitClient{config: cwrq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cwrq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cwrq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(ciworkflowresult.Table, ciworkflowresult.FieldID, selector), + sqlgraph.To(gitcommit.Table, gitcommit.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ciworkflowresult.GitcommitTable, ciworkflowresult.GitcommitColumn), + ) + fromU = sqlgraph.SetNeighbors(cwrq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryStorageFile chains the current query on the "storage_file" edge. +func (cwrq *CIWorkflowResultQuery) QueryStorageFile() *StorageFileQuery { + query := (&StorageFileClient{config: cwrq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cwrq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cwrq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(ciworkflowresult.Table, ciworkflowresult.FieldID, selector), + sqlgraph.To(storagefile.Table, storagefile.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ciworkflowresult.StorageFileTable, ciworkflowresult.StorageFileColumn), + ) + fromU = sqlgraph.SetNeighbors(cwrq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first CIWorkflowResult entity from the query. +// Returns a *NotFoundError when no CIWorkflowResult was found. +func (cwrq *CIWorkflowResultQuery) First(ctx context.Context) (*CIWorkflowResult, error) { + nodes, err := cwrq.Limit(1).All(setContextOp(ctx, cwrq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{ciworkflowresult.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) FirstX(ctx context.Context) *CIWorkflowResult { + node, err := cwrq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first CIWorkflowResult ID from the query. +// Returns a *NotFoundError when no CIWorkflowResult ID was found. +func (cwrq *CIWorkflowResultQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = cwrq.Limit(1).IDs(setContextOp(ctx, cwrq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{ciworkflowresult.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := cwrq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single CIWorkflowResult entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one CIWorkflowResult entity is found. +// Returns a *NotFoundError when no CIWorkflowResult entities are found. +func (cwrq *CIWorkflowResultQuery) Only(ctx context.Context) (*CIWorkflowResult, error) { + nodes, err := cwrq.Limit(2).All(setContextOp(ctx, cwrq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{ciworkflowresult.Label} + default: + return nil, &NotSingularError{ciworkflowresult.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) OnlyX(ctx context.Context) *CIWorkflowResult { + node, err := cwrq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only CIWorkflowResult ID in the query. +// Returns a *NotSingularError when more than one CIWorkflowResult ID is found. +// Returns a *NotFoundError when no entities are found. +func (cwrq *CIWorkflowResultQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = cwrq.Limit(2).IDs(setContextOp(ctx, cwrq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{ciworkflowresult.Label} + default: + err = &NotSingularError{ciworkflowresult.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := cwrq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of CIWorkflowResults. +func (cwrq *CIWorkflowResultQuery) All(ctx context.Context) ([]*CIWorkflowResult, error) { + ctx = setContextOp(ctx, cwrq.ctx, "All") + if err := cwrq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*CIWorkflowResult, *CIWorkflowResultQuery]() + return withInterceptors[[]*CIWorkflowResult](ctx, cwrq, qr, cwrq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) AllX(ctx context.Context) []*CIWorkflowResult { + nodes, err := cwrq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of CIWorkflowResult IDs. +func (cwrq *CIWorkflowResultQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if cwrq.ctx.Unique == nil && cwrq.path != nil { + cwrq.Unique(true) + } + ctx = setContextOp(ctx, cwrq.ctx, "IDs") + if err = cwrq.Select(ciworkflowresult.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := cwrq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cwrq *CIWorkflowResultQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cwrq.ctx, "Count") + if err := cwrq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cwrq, querierCount[*CIWorkflowResultQuery](), cwrq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) CountX(ctx context.Context) int { + count, err := cwrq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cwrq *CIWorkflowResultQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cwrq.ctx, "Exist") + switch _, err := cwrq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cwrq *CIWorkflowResultQuery) ExistX(ctx context.Context) bool { + exist, err := cwrq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the CIWorkflowResultQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cwrq *CIWorkflowResultQuery) Clone() *CIWorkflowResultQuery { + if cwrq == nil { + return nil + } + return &CIWorkflowResultQuery{ + config: cwrq.config, + ctx: cwrq.ctx.Clone(), + order: append([]ciworkflowresult.OrderOption{}, cwrq.order...), + inters: append([]Interceptor{}, cwrq.inters...), + predicates: append([]predicate.CIWorkflowResult{}, cwrq.predicates...), + withGitcommit: cwrq.withGitcommit.Clone(), + withStorageFile: cwrq.withStorageFile.Clone(), + // clone intermediate query. + sql: cwrq.sql.Clone(), + path: cwrq.path, + } +} + +// WithGitcommit tells the query-builder to eager-load the nodes that are connected to +// the "gitcommit" edge. The optional arguments are used to configure the query builder of the edge. +func (cwrq *CIWorkflowResultQuery) WithGitcommit(opts ...func(*GitCommitQuery)) *CIWorkflowResultQuery { + query := (&GitCommitClient{config: cwrq.config}).Query() + for _, opt := range opts { + opt(query) + } + cwrq.withGitcommit = query + return cwrq +} + +// WithStorageFile tells the query-builder to eager-load the nodes that are connected to +// the "storage_file" edge. The optional arguments are used to configure the query builder of the edge. +func (cwrq *CIWorkflowResultQuery) WithStorageFile(opts ...func(*StorageFileQuery)) *CIWorkflowResultQuery { + query := (&StorageFileClient{config: cwrq.config}).Query() + for _, opt := range opts { + opt(query) + } + cwrq.withStorageFile = query + return cwrq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.CIWorkflowResult.Query(). +// GroupBy(ciworkflowresult.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (cwrq *CIWorkflowResultQuery) GroupBy(field string, fields ...string) *CIWorkflowResultGroupBy { + cwrq.ctx.Fields = append([]string{field}, fields...) + grbuild := &CIWorkflowResultGroupBy{build: cwrq} + grbuild.flds = &cwrq.ctx.Fields + grbuild.label = ciworkflowresult.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.CIWorkflowResult.Query(). +// Select(ciworkflowresult.FieldCreateTime). +// Scan(ctx, &v) +func (cwrq *CIWorkflowResultQuery) Select(fields ...string) *CIWorkflowResultSelect { + cwrq.ctx.Fields = append(cwrq.ctx.Fields, fields...) + sbuild := &CIWorkflowResultSelect{CIWorkflowResultQuery: cwrq} + sbuild.label = ciworkflowresult.Label + sbuild.flds, sbuild.scan = &cwrq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a CIWorkflowResultSelect configured with the given aggregations. +func (cwrq *CIWorkflowResultQuery) Aggregate(fns ...AggregateFunc) *CIWorkflowResultSelect { + return cwrq.Select().Aggregate(fns...) +} + +func (cwrq *CIWorkflowResultQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cwrq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cwrq); err != nil { + return err + } + } + } + for _, f := range cwrq.ctx.Fields { + if !ciworkflowresult.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if cwrq.path != nil { + prev, err := cwrq.path(ctx) + if err != nil { + return err + } + cwrq.sql = prev + } + return nil +} + +func (cwrq *CIWorkflowResultQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CIWorkflowResult, error) { + var ( + nodes = []*CIWorkflowResult{} + withFKs = cwrq.withFKs + _spec = cwrq.querySpec() + loadedTypes = [2]bool{ + cwrq.withGitcommit != nil, + cwrq.withStorageFile != nil, + } + ) + if cwrq.withGitcommit != nil || cwrq.withStorageFile != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, ciworkflowresult.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*CIWorkflowResult).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &CIWorkflowResult{config: cwrq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(cwrq.modifiers) > 0 { + _spec.Modifiers = cwrq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cwrq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := cwrq.withGitcommit; query != nil { + if err := cwrq.loadGitcommit(ctx, query, nodes, nil, + func(n *CIWorkflowResult, e *GitCommit) { n.Edges.Gitcommit = e }); err != nil { + return nil, err + } + } + if query := cwrq.withStorageFile; query != nil { + if err := cwrq.loadStorageFile(ctx, query, nodes, nil, + func(n *CIWorkflowResult, e *StorageFile) { n.Edges.StorageFile = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (cwrq *CIWorkflowResultQuery) loadGitcommit(ctx context.Context, query *GitCommitQuery, nodes []*CIWorkflowResult, init func(*CIWorkflowResult), assign func(*CIWorkflowResult, *GitCommit)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*CIWorkflowResult) + for i := range nodes { + if nodes[i].git_commit_results == nil { + continue + } + fk := *nodes[i].git_commit_results + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(gitcommit.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "git_commit_results" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (cwrq *CIWorkflowResultQuery) loadStorageFile(ctx context.Context, query *StorageFileQuery, nodes []*CIWorkflowResult, init func(*CIWorkflowResult), assign func(*CIWorkflowResult, *StorageFile)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*CIWorkflowResult) + for i := range nodes { + if nodes[i].ci_workflow_result_storage_file == nil { + continue + } + fk := *nodes[i].ci_workflow_result_storage_file + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(storagefile.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "ci_workflow_result_storage_file" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (cwrq *CIWorkflowResultQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cwrq.querySpec() + if len(cwrq.modifiers) > 0 { + _spec.Modifiers = cwrq.modifiers + } + _spec.Node.Columns = cwrq.ctx.Fields + if len(cwrq.ctx.Fields) > 0 { + _spec.Unique = cwrq.ctx.Unique != nil && *cwrq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cwrq.driver, _spec) +} + +func (cwrq *CIWorkflowResultQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(ciworkflowresult.Table, ciworkflowresult.Columns, sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID)) + _spec.From = cwrq.sql + if unique := cwrq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cwrq.path != nil { + _spec.Unique = true + } + if fields := cwrq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, ciworkflowresult.FieldID) + for i := range fields { + if fields[i] != ciworkflowresult.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cwrq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cwrq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cwrq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cwrq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cwrq *CIWorkflowResultQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cwrq.driver.Dialect()) + t1 := builder.Table(ciworkflowresult.Table) + columns := cwrq.ctx.Fields + if len(columns) == 0 { + columns = ciworkflowresult.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cwrq.sql != nil { + selector = cwrq.sql + selector.Select(selector.Columns(columns...)...) + } + if cwrq.ctx.Unique != nil && *cwrq.ctx.Unique { + selector.Distinct() + } + for _, m := range cwrq.modifiers { + m(selector) + } + for _, p := range cwrq.predicates { + p(selector) + } + for _, p := range cwrq.order { + p(selector) + } + if offset := cwrq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cwrq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (cwrq *CIWorkflowResultQuery) ForUpdate(opts ...sql.LockOption) *CIWorkflowResultQuery { + if cwrq.driver.Dialect() == dialect.Postgres { + cwrq.Unique(false) + } + cwrq.modifiers = append(cwrq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return cwrq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (cwrq *CIWorkflowResultQuery) ForShare(opts ...sql.LockOption) *CIWorkflowResultQuery { + if cwrq.driver.Dialect() == dialect.Postgres { + cwrq.Unique(false) + } + cwrq.modifiers = append(cwrq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return cwrq +} + +// CIWorkflowResultGroupBy is the group-by builder for CIWorkflowResult entities. +type CIWorkflowResultGroupBy struct { + selector + build *CIWorkflowResultQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cwrgb *CIWorkflowResultGroupBy) Aggregate(fns ...AggregateFunc) *CIWorkflowResultGroupBy { + cwrgb.fns = append(cwrgb.fns, fns...) + return cwrgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cwrgb *CIWorkflowResultGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cwrgb.build.ctx, "GroupBy") + if err := cwrgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CIWorkflowResultQuery, *CIWorkflowResultGroupBy](ctx, cwrgb.build, cwrgb, cwrgb.build.inters, v) +} + +func (cwrgb *CIWorkflowResultGroupBy) sqlScan(ctx context.Context, root *CIWorkflowResultQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cwrgb.fns)) + for _, fn := range cwrgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cwrgb.flds)+len(cwrgb.fns)) + for _, f := range *cwrgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cwrgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cwrgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// CIWorkflowResultSelect is the builder for selecting fields of CIWorkflowResult entities. +type CIWorkflowResultSelect struct { + *CIWorkflowResultQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cwrs *CIWorkflowResultSelect) Aggregate(fns ...AggregateFunc) *CIWorkflowResultSelect { + cwrs.fns = append(cwrs.fns, fns...) + return cwrs +} + +// Scan applies the selector query and scans the result into the given value. +func (cwrs *CIWorkflowResultSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cwrs.ctx, "Select") + if err := cwrs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CIWorkflowResultQuery, *CIWorkflowResultSelect](ctx, cwrs.CIWorkflowResultQuery, cwrs, cwrs.inters, v) +} + +func (cwrs *CIWorkflowResultSelect) sqlScan(ctx context.Context, root *CIWorkflowResultQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cwrs.fns)) + for _, fn := range cwrs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cwrs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cwrs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/ciworkflowresult_update.go b/ent/ciworkflowresult_update.go new file mode 100644 index 0000000..29d963f --- /dev/null +++ b/ent/ciworkflowresult_update.go @@ -0,0 +1,869 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// CIWorkflowResultUpdate is the builder for updating CIWorkflowResult entities. +type CIWorkflowResultUpdate struct { + config + hooks []Hook + mutation *CIWorkflowResultMutation +} + +// Where appends a list predicates to the CIWorkflowResultUpdate builder. +func (cwru *CIWorkflowResultUpdate) Where(ps ...predicate.CIWorkflowResult) *CIWorkflowResultUpdate { + cwru.mutation.Where(ps...) + return cwru +} + +// SetUpdateTime sets the "update_time" field. +func (cwru *CIWorkflowResultUpdate) SetUpdateTime(t time.Time) *CIWorkflowResultUpdate { + cwru.mutation.SetUpdateTime(t) + return cwru +} + +// SetOperatingSystem sets the "operating_system" field. +func (cwru *CIWorkflowResultUpdate) SetOperatingSystem(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetOperatingSystem(s) + return cwru +} + +// SetNillableOperatingSystem sets the "operating_system" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableOperatingSystem(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetOperatingSystem(*s) + } + return cwru +} + +// SetGpuType sets the "gpu_type" field. +func (cwru *CIWorkflowResultUpdate) SetGpuType(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetGpuType(s) + return cwru +} + +// SetNillableGpuType sets the "gpu_type" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableGpuType(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetGpuType(*s) + } + return cwru +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (cwru *CIWorkflowResultUpdate) ClearGpuType() *CIWorkflowResultUpdate { + cwru.mutation.ClearGpuType() + return cwru +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (cwru *CIWorkflowResultUpdate) SetPytorchVersion(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetPytorchVersion(s) + return cwru +} + +// SetNillablePytorchVersion sets the "pytorch_version" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillablePytorchVersion(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetPytorchVersion(*s) + } + return cwru +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (cwru *CIWorkflowResultUpdate) ClearPytorchVersion() *CIWorkflowResultUpdate { + cwru.mutation.ClearPytorchVersion() + return cwru +} + +// SetWorkflowName sets the "workflow_name" field. +func (cwru *CIWorkflowResultUpdate) SetWorkflowName(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetWorkflowName(s) + return cwru +} + +// SetNillableWorkflowName sets the "workflow_name" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableWorkflowName(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetWorkflowName(*s) + } + return cwru +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (cwru *CIWorkflowResultUpdate) ClearWorkflowName() *CIWorkflowResultUpdate { + cwru.mutation.ClearWorkflowName() + return cwru +} + +// SetRunID sets the "run_id" field. +func (cwru *CIWorkflowResultUpdate) SetRunID(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetRunID(s) + return cwru +} + +// SetNillableRunID sets the "run_id" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableRunID(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetRunID(*s) + } + return cwru +} + +// ClearRunID clears the value of the "run_id" field. +func (cwru *CIWorkflowResultUpdate) ClearRunID() *CIWorkflowResultUpdate { + cwru.mutation.ClearRunID() + return cwru +} + +// SetStatus sets the "status" field. +func (cwru *CIWorkflowResultUpdate) SetStatus(s string) *CIWorkflowResultUpdate { + cwru.mutation.SetStatus(s) + return cwru +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableStatus(s *string) *CIWorkflowResultUpdate { + if s != nil { + cwru.SetStatus(*s) + } + return cwru +} + +// ClearStatus clears the value of the "status" field. +func (cwru *CIWorkflowResultUpdate) ClearStatus() *CIWorkflowResultUpdate { + cwru.mutation.ClearStatus() + return cwru +} + +// SetStartTime sets the "start_time" field. +func (cwru *CIWorkflowResultUpdate) SetStartTime(i int64) *CIWorkflowResultUpdate { + cwru.mutation.ResetStartTime() + cwru.mutation.SetStartTime(i) + return cwru +} + +// SetNillableStartTime sets the "start_time" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableStartTime(i *int64) *CIWorkflowResultUpdate { + if i != nil { + cwru.SetStartTime(*i) + } + return cwru +} + +// AddStartTime adds i to the "start_time" field. +func (cwru *CIWorkflowResultUpdate) AddStartTime(i int64) *CIWorkflowResultUpdate { + cwru.mutation.AddStartTime(i) + return cwru +} + +// ClearStartTime clears the value of the "start_time" field. +func (cwru *CIWorkflowResultUpdate) ClearStartTime() *CIWorkflowResultUpdate { + cwru.mutation.ClearStartTime() + return cwru +} + +// SetEndTime sets the "end_time" field. +func (cwru *CIWorkflowResultUpdate) SetEndTime(i int64) *CIWorkflowResultUpdate { + cwru.mutation.ResetEndTime() + cwru.mutation.SetEndTime(i) + return cwru +} + +// SetNillableEndTime sets the "end_time" field if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableEndTime(i *int64) *CIWorkflowResultUpdate { + if i != nil { + cwru.SetEndTime(*i) + } + return cwru +} + +// AddEndTime adds i to the "end_time" field. +func (cwru *CIWorkflowResultUpdate) AddEndTime(i int64) *CIWorkflowResultUpdate { + cwru.mutation.AddEndTime(i) + return cwru +} + +// ClearEndTime clears the value of the "end_time" field. +func (cwru *CIWorkflowResultUpdate) ClearEndTime() *CIWorkflowResultUpdate { + cwru.mutation.ClearEndTime() + return cwru +} + +// SetGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID. +func (cwru *CIWorkflowResultUpdate) SetGitcommitID(id uuid.UUID) *CIWorkflowResultUpdate { + cwru.mutation.SetGitcommitID(id) + return cwru +} + +// SetNillableGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableGitcommitID(id *uuid.UUID) *CIWorkflowResultUpdate { + if id != nil { + cwru = cwru.SetGitcommitID(*id) + } + return cwru +} + +// SetGitcommit sets the "gitcommit" edge to the GitCommit entity. +func (cwru *CIWorkflowResultUpdate) SetGitcommit(g *GitCommit) *CIWorkflowResultUpdate { + return cwru.SetGitcommitID(g.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (cwru *CIWorkflowResultUpdate) SetStorageFileID(id uuid.UUID) *CIWorkflowResultUpdate { + cwru.mutation.SetStorageFileID(id) + return cwru +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (cwru *CIWorkflowResultUpdate) SetNillableStorageFileID(id *uuid.UUID) *CIWorkflowResultUpdate { + if id != nil { + cwru = cwru.SetStorageFileID(*id) + } + return cwru +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (cwru *CIWorkflowResultUpdate) SetStorageFile(s *StorageFile) *CIWorkflowResultUpdate { + return cwru.SetStorageFileID(s.ID) +} + +// Mutation returns the CIWorkflowResultMutation object of the builder. +func (cwru *CIWorkflowResultUpdate) Mutation() *CIWorkflowResultMutation { + return cwru.mutation +} + +// ClearGitcommit clears the "gitcommit" edge to the GitCommit entity. +func (cwru *CIWorkflowResultUpdate) ClearGitcommit() *CIWorkflowResultUpdate { + cwru.mutation.ClearGitcommit() + return cwru +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (cwru *CIWorkflowResultUpdate) ClearStorageFile() *CIWorkflowResultUpdate { + cwru.mutation.ClearStorageFile() + return cwru +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cwru *CIWorkflowResultUpdate) Save(ctx context.Context) (int, error) { + cwru.defaults() + return withHooks(ctx, cwru.sqlSave, cwru.mutation, cwru.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cwru *CIWorkflowResultUpdate) SaveX(ctx context.Context) int { + affected, err := cwru.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cwru *CIWorkflowResultUpdate) Exec(ctx context.Context) error { + _, err := cwru.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwru *CIWorkflowResultUpdate) ExecX(ctx context.Context) { + if err := cwru.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cwru *CIWorkflowResultUpdate) defaults() { + if _, ok := cwru.mutation.UpdateTime(); !ok { + v := ciworkflowresult.UpdateDefaultUpdateTime() + cwru.mutation.SetUpdateTime(v) + } +} + +func (cwru *CIWorkflowResultUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(ciworkflowresult.Table, ciworkflowresult.Columns, sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID)) + if ps := cwru.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cwru.mutation.UpdateTime(); ok { + _spec.SetField(ciworkflowresult.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := cwru.mutation.OperatingSystem(); ok { + _spec.SetField(ciworkflowresult.FieldOperatingSystem, field.TypeString, value) + } + if value, ok := cwru.mutation.GpuType(); ok { + _spec.SetField(ciworkflowresult.FieldGpuType, field.TypeString, value) + } + if cwru.mutation.GpuTypeCleared() { + _spec.ClearField(ciworkflowresult.FieldGpuType, field.TypeString) + } + if value, ok := cwru.mutation.PytorchVersion(); ok { + _spec.SetField(ciworkflowresult.FieldPytorchVersion, field.TypeString, value) + } + if cwru.mutation.PytorchVersionCleared() { + _spec.ClearField(ciworkflowresult.FieldPytorchVersion, field.TypeString) + } + if value, ok := cwru.mutation.WorkflowName(); ok { + _spec.SetField(ciworkflowresult.FieldWorkflowName, field.TypeString, value) + } + if cwru.mutation.WorkflowNameCleared() { + _spec.ClearField(ciworkflowresult.FieldWorkflowName, field.TypeString) + } + if value, ok := cwru.mutation.RunID(); ok { + _spec.SetField(ciworkflowresult.FieldRunID, field.TypeString, value) + } + if cwru.mutation.RunIDCleared() { + _spec.ClearField(ciworkflowresult.FieldRunID, field.TypeString) + } + if value, ok := cwru.mutation.Status(); ok { + _spec.SetField(ciworkflowresult.FieldStatus, field.TypeString, value) + } + if cwru.mutation.StatusCleared() { + _spec.ClearField(ciworkflowresult.FieldStatus, field.TypeString) + } + if value, ok := cwru.mutation.StartTime(); ok { + _spec.SetField(ciworkflowresult.FieldStartTime, field.TypeInt64, value) + } + if value, ok := cwru.mutation.AddedStartTime(); ok { + _spec.AddField(ciworkflowresult.FieldStartTime, field.TypeInt64, value) + } + if cwru.mutation.StartTimeCleared() { + _spec.ClearField(ciworkflowresult.FieldStartTime, field.TypeInt64) + } + if value, ok := cwru.mutation.EndTime(); ok { + _spec.SetField(ciworkflowresult.FieldEndTime, field.TypeInt64, value) + } + if value, ok := cwru.mutation.AddedEndTime(); ok { + _spec.AddField(ciworkflowresult.FieldEndTime, field.TypeInt64, value) + } + if cwru.mutation.EndTimeCleared() { + _spec.ClearField(ciworkflowresult.FieldEndTime, field.TypeInt64) + } + if cwru.mutation.GitcommitCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: ciworkflowresult.GitcommitTable, + Columns: []string{ciworkflowresult.GitcommitColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cwru.mutation.GitcommitIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: ciworkflowresult.GitcommitTable, + Columns: []string{ciworkflowresult.GitcommitColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cwru.mutation.StorageFileCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: ciworkflowresult.StorageFileTable, + Columns: []string{ciworkflowresult.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cwru.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: ciworkflowresult.StorageFileTable, + Columns: []string{ciworkflowresult.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, cwru.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{ciworkflowresult.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cwru.mutation.done = true + return n, nil +} + +// CIWorkflowResultUpdateOne is the builder for updating a single CIWorkflowResult entity. +type CIWorkflowResultUpdateOne struct { + config + fields []string + hooks []Hook + mutation *CIWorkflowResultMutation +} + +// SetUpdateTime sets the "update_time" field. +func (cwruo *CIWorkflowResultUpdateOne) SetUpdateTime(t time.Time) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetUpdateTime(t) + return cwruo +} + +// SetOperatingSystem sets the "operating_system" field. +func (cwruo *CIWorkflowResultUpdateOne) SetOperatingSystem(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetOperatingSystem(s) + return cwruo +} + +// SetNillableOperatingSystem sets the "operating_system" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableOperatingSystem(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetOperatingSystem(*s) + } + return cwruo +} + +// SetGpuType sets the "gpu_type" field. +func (cwruo *CIWorkflowResultUpdateOne) SetGpuType(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetGpuType(s) + return cwruo +} + +// SetNillableGpuType sets the "gpu_type" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableGpuType(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetGpuType(*s) + } + return cwruo +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearGpuType() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearGpuType() + return cwruo +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (cwruo *CIWorkflowResultUpdateOne) SetPytorchVersion(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetPytorchVersion(s) + return cwruo +} + +// SetNillablePytorchVersion sets the "pytorch_version" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillablePytorchVersion(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetPytorchVersion(*s) + } + return cwruo +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearPytorchVersion() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearPytorchVersion() + return cwruo +} + +// SetWorkflowName sets the "workflow_name" field. +func (cwruo *CIWorkflowResultUpdateOne) SetWorkflowName(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetWorkflowName(s) + return cwruo +} + +// SetNillableWorkflowName sets the "workflow_name" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableWorkflowName(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetWorkflowName(*s) + } + return cwruo +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearWorkflowName() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearWorkflowName() + return cwruo +} + +// SetRunID sets the "run_id" field. +func (cwruo *CIWorkflowResultUpdateOne) SetRunID(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetRunID(s) + return cwruo +} + +// SetNillableRunID sets the "run_id" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableRunID(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetRunID(*s) + } + return cwruo +} + +// ClearRunID clears the value of the "run_id" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearRunID() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearRunID() + return cwruo +} + +// SetStatus sets the "status" field. +func (cwruo *CIWorkflowResultUpdateOne) SetStatus(s string) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetStatus(s) + return cwruo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableStatus(s *string) *CIWorkflowResultUpdateOne { + if s != nil { + cwruo.SetStatus(*s) + } + return cwruo +} + +// ClearStatus clears the value of the "status" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearStatus() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearStatus() + return cwruo +} + +// SetStartTime sets the "start_time" field. +func (cwruo *CIWorkflowResultUpdateOne) SetStartTime(i int64) *CIWorkflowResultUpdateOne { + cwruo.mutation.ResetStartTime() + cwruo.mutation.SetStartTime(i) + return cwruo +} + +// SetNillableStartTime sets the "start_time" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableStartTime(i *int64) *CIWorkflowResultUpdateOne { + if i != nil { + cwruo.SetStartTime(*i) + } + return cwruo +} + +// AddStartTime adds i to the "start_time" field. +func (cwruo *CIWorkflowResultUpdateOne) AddStartTime(i int64) *CIWorkflowResultUpdateOne { + cwruo.mutation.AddStartTime(i) + return cwruo +} + +// ClearStartTime clears the value of the "start_time" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearStartTime() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearStartTime() + return cwruo +} + +// SetEndTime sets the "end_time" field. +func (cwruo *CIWorkflowResultUpdateOne) SetEndTime(i int64) *CIWorkflowResultUpdateOne { + cwruo.mutation.ResetEndTime() + cwruo.mutation.SetEndTime(i) + return cwruo +} + +// SetNillableEndTime sets the "end_time" field if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableEndTime(i *int64) *CIWorkflowResultUpdateOne { + if i != nil { + cwruo.SetEndTime(*i) + } + return cwruo +} + +// AddEndTime adds i to the "end_time" field. +func (cwruo *CIWorkflowResultUpdateOne) AddEndTime(i int64) *CIWorkflowResultUpdateOne { + cwruo.mutation.AddEndTime(i) + return cwruo +} + +// ClearEndTime clears the value of the "end_time" field. +func (cwruo *CIWorkflowResultUpdateOne) ClearEndTime() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearEndTime() + return cwruo +} + +// SetGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID. +func (cwruo *CIWorkflowResultUpdateOne) SetGitcommitID(id uuid.UUID) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetGitcommitID(id) + return cwruo +} + +// SetNillableGitcommitID sets the "gitcommit" edge to the GitCommit entity by ID if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableGitcommitID(id *uuid.UUID) *CIWorkflowResultUpdateOne { + if id != nil { + cwruo = cwruo.SetGitcommitID(*id) + } + return cwruo +} + +// SetGitcommit sets the "gitcommit" edge to the GitCommit entity. +func (cwruo *CIWorkflowResultUpdateOne) SetGitcommit(g *GitCommit) *CIWorkflowResultUpdateOne { + return cwruo.SetGitcommitID(g.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (cwruo *CIWorkflowResultUpdateOne) SetStorageFileID(id uuid.UUID) *CIWorkflowResultUpdateOne { + cwruo.mutation.SetStorageFileID(id) + return cwruo +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (cwruo *CIWorkflowResultUpdateOne) SetNillableStorageFileID(id *uuid.UUID) *CIWorkflowResultUpdateOne { + if id != nil { + cwruo = cwruo.SetStorageFileID(*id) + } + return cwruo +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (cwruo *CIWorkflowResultUpdateOne) SetStorageFile(s *StorageFile) *CIWorkflowResultUpdateOne { + return cwruo.SetStorageFileID(s.ID) +} + +// Mutation returns the CIWorkflowResultMutation object of the builder. +func (cwruo *CIWorkflowResultUpdateOne) Mutation() *CIWorkflowResultMutation { + return cwruo.mutation +} + +// ClearGitcommit clears the "gitcommit" edge to the GitCommit entity. +func (cwruo *CIWorkflowResultUpdateOne) ClearGitcommit() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearGitcommit() + return cwruo +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (cwruo *CIWorkflowResultUpdateOne) ClearStorageFile() *CIWorkflowResultUpdateOne { + cwruo.mutation.ClearStorageFile() + return cwruo +} + +// Where appends a list predicates to the CIWorkflowResultUpdate builder. +func (cwruo *CIWorkflowResultUpdateOne) Where(ps ...predicate.CIWorkflowResult) *CIWorkflowResultUpdateOne { + cwruo.mutation.Where(ps...) + return cwruo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cwruo *CIWorkflowResultUpdateOne) Select(field string, fields ...string) *CIWorkflowResultUpdateOne { + cwruo.fields = append([]string{field}, fields...) + return cwruo +} + +// Save executes the query and returns the updated CIWorkflowResult entity. +func (cwruo *CIWorkflowResultUpdateOne) Save(ctx context.Context) (*CIWorkflowResult, error) { + cwruo.defaults() + return withHooks(ctx, cwruo.sqlSave, cwruo.mutation, cwruo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cwruo *CIWorkflowResultUpdateOne) SaveX(ctx context.Context) *CIWorkflowResult { + node, err := cwruo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cwruo *CIWorkflowResultUpdateOne) Exec(ctx context.Context) error { + _, err := cwruo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cwruo *CIWorkflowResultUpdateOne) ExecX(ctx context.Context) { + if err := cwruo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cwruo *CIWorkflowResultUpdateOne) defaults() { + if _, ok := cwruo.mutation.UpdateTime(); !ok { + v := ciworkflowresult.UpdateDefaultUpdateTime() + cwruo.mutation.SetUpdateTime(v) + } +} + +func (cwruo *CIWorkflowResultUpdateOne) sqlSave(ctx context.Context) (_node *CIWorkflowResult, err error) { + _spec := sqlgraph.NewUpdateSpec(ciworkflowresult.Table, ciworkflowresult.Columns, sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID)) + id, ok := cwruo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "CIWorkflowResult.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cwruo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, ciworkflowresult.FieldID) + for _, f := range fields { + if !ciworkflowresult.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != ciworkflowresult.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cwruo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cwruo.mutation.UpdateTime(); ok { + _spec.SetField(ciworkflowresult.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := cwruo.mutation.OperatingSystem(); ok { + _spec.SetField(ciworkflowresult.FieldOperatingSystem, field.TypeString, value) + } + if value, ok := cwruo.mutation.GpuType(); ok { + _spec.SetField(ciworkflowresult.FieldGpuType, field.TypeString, value) + } + if cwruo.mutation.GpuTypeCleared() { + _spec.ClearField(ciworkflowresult.FieldGpuType, field.TypeString) + } + if value, ok := cwruo.mutation.PytorchVersion(); ok { + _spec.SetField(ciworkflowresult.FieldPytorchVersion, field.TypeString, value) + } + if cwruo.mutation.PytorchVersionCleared() { + _spec.ClearField(ciworkflowresult.FieldPytorchVersion, field.TypeString) + } + if value, ok := cwruo.mutation.WorkflowName(); ok { + _spec.SetField(ciworkflowresult.FieldWorkflowName, field.TypeString, value) + } + if cwruo.mutation.WorkflowNameCleared() { + _spec.ClearField(ciworkflowresult.FieldWorkflowName, field.TypeString) + } + if value, ok := cwruo.mutation.RunID(); ok { + _spec.SetField(ciworkflowresult.FieldRunID, field.TypeString, value) + } + if cwruo.mutation.RunIDCleared() { + _spec.ClearField(ciworkflowresult.FieldRunID, field.TypeString) + } + if value, ok := cwruo.mutation.Status(); ok { + _spec.SetField(ciworkflowresult.FieldStatus, field.TypeString, value) + } + if cwruo.mutation.StatusCleared() { + _spec.ClearField(ciworkflowresult.FieldStatus, field.TypeString) + } + if value, ok := cwruo.mutation.StartTime(); ok { + _spec.SetField(ciworkflowresult.FieldStartTime, field.TypeInt64, value) + } + if value, ok := cwruo.mutation.AddedStartTime(); ok { + _spec.AddField(ciworkflowresult.FieldStartTime, field.TypeInt64, value) + } + if cwruo.mutation.StartTimeCleared() { + _spec.ClearField(ciworkflowresult.FieldStartTime, field.TypeInt64) + } + if value, ok := cwruo.mutation.EndTime(); ok { + _spec.SetField(ciworkflowresult.FieldEndTime, field.TypeInt64, value) + } + if value, ok := cwruo.mutation.AddedEndTime(); ok { + _spec.AddField(ciworkflowresult.FieldEndTime, field.TypeInt64, value) + } + if cwruo.mutation.EndTimeCleared() { + _spec.ClearField(ciworkflowresult.FieldEndTime, field.TypeInt64) + } + if cwruo.mutation.GitcommitCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: ciworkflowresult.GitcommitTable, + Columns: []string{ciworkflowresult.GitcommitColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cwruo.mutation.GitcommitIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: ciworkflowresult.GitcommitTable, + Columns: []string{ciworkflowresult.GitcommitColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cwruo.mutation.StorageFileCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: ciworkflowresult.StorageFileTable, + Columns: []string{ciworkflowresult.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cwruo.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: ciworkflowresult.StorageFileTable, + Columns: []string{ciworkflowresult.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &CIWorkflowResult{config: cwruo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cwruo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{ciworkflowresult.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cwruo.mutation.done = true + return _node, nil +} diff --git a/ent/client.go b/ent/client.go new file mode 100644 index 0000000..462edde --- /dev/null +++ b/ent/client.go @@ -0,0 +1,1707 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "registry-backend/ent/migrate" + + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/storagefile" + "registry-backend/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // CIWorkflowResult is the client for interacting with the CIWorkflowResult builders. + CIWorkflowResult *CIWorkflowResultClient + // GitCommit is the client for interacting with the GitCommit builders. + GitCommit *GitCommitClient + // Node is the client for interacting with the Node builders. + Node *NodeClient + // NodeVersion is the client for interacting with the NodeVersion builders. + NodeVersion *NodeVersionClient + // PersonalAccessToken is the client for interacting with the PersonalAccessToken builders. + PersonalAccessToken *PersonalAccessTokenClient + // Publisher is the client for interacting with the Publisher builders. + Publisher *PublisherClient + // PublisherPermission is the client for interacting with the PublisherPermission builders. + PublisherPermission *PublisherPermissionClient + // StorageFile is the client for interacting with the StorageFile builders. + StorageFile *StorageFileClient + // User is the client for interacting with the User builders. + User *UserClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.CIWorkflowResult = NewCIWorkflowResultClient(c.config) + c.GitCommit = NewGitCommitClient(c.config) + c.Node = NewNodeClient(c.config) + c.NodeVersion = NewNodeVersionClient(c.config) + c.PersonalAccessToken = NewPersonalAccessTokenClient(c.config) + c.Publisher = NewPublisherClient(c.config) + c.PublisherPermission = NewPublisherPermissionClient(c.config) + c.StorageFile = NewStorageFileClient(c.config) + c.User = NewUserClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + CIWorkflowResult: NewCIWorkflowResultClient(cfg), + GitCommit: NewGitCommitClient(cfg), + Node: NewNodeClient(cfg), + NodeVersion: NewNodeVersionClient(cfg), + PersonalAccessToken: NewPersonalAccessTokenClient(cfg), + Publisher: NewPublisherClient(cfg), + PublisherPermission: NewPublisherPermissionClient(cfg), + StorageFile: NewStorageFileClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + CIWorkflowResult: NewCIWorkflowResultClient(cfg), + GitCommit: NewGitCommitClient(cfg), + Node: NewNodeClient(cfg), + NodeVersion: NewNodeVersionClient(cfg), + PersonalAccessToken: NewPersonalAccessTokenClient(cfg), + Publisher: NewPublisherClient(cfg), + PublisherPermission: NewPublisherPermissionClient(cfg), + StorageFile: NewStorageFileClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// CIWorkflowResult. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.CIWorkflowResult, c.GitCommit, c.Node, c.NodeVersion, c.PersonalAccessToken, + c.Publisher, c.PublisherPermission, c.StorageFile, c.User, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.CIWorkflowResult, c.GitCommit, c.Node, c.NodeVersion, c.PersonalAccessToken, + c.Publisher, c.PublisherPermission, c.StorageFile, c.User, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *CIWorkflowResultMutation: + return c.CIWorkflowResult.mutate(ctx, m) + case *GitCommitMutation: + return c.GitCommit.mutate(ctx, m) + case *NodeMutation: + return c.Node.mutate(ctx, m) + case *NodeVersionMutation: + return c.NodeVersion.mutate(ctx, m) + case *PersonalAccessTokenMutation: + return c.PersonalAccessToken.mutate(ctx, m) + case *PublisherMutation: + return c.Publisher.mutate(ctx, m) + case *PublisherPermissionMutation: + return c.PublisherPermission.mutate(ctx, m) + case *StorageFileMutation: + return c.StorageFile.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// CIWorkflowResultClient is a client for the CIWorkflowResult schema. +type CIWorkflowResultClient struct { + config +} + +// NewCIWorkflowResultClient returns a client for the CIWorkflowResult from the given config. +func NewCIWorkflowResultClient(c config) *CIWorkflowResultClient { + return &CIWorkflowResultClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `ciworkflowresult.Hooks(f(g(h())))`. +func (c *CIWorkflowResultClient) Use(hooks ...Hook) { + c.hooks.CIWorkflowResult = append(c.hooks.CIWorkflowResult, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `ciworkflowresult.Intercept(f(g(h())))`. +func (c *CIWorkflowResultClient) Intercept(interceptors ...Interceptor) { + c.inters.CIWorkflowResult = append(c.inters.CIWorkflowResult, interceptors...) +} + +// Create returns a builder for creating a CIWorkflowResult entity. +func (c *CIWorkflowResultClient) Create() *CIWorkflowResultCreate { + mutation := newCIWorkflowResultMutation(c.config, OpCreate) + return &CIWorkflowResultCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of CIWorkflowResult entities. +func (c *CIWorkflowResultClient) CreateBulk(builders ...*CIWorkflowResultCreate) *CIWorkflowResultCreateBulk { + return &CIWorkflowResultCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *CIWorkflowResultClient) MapCreateBulk(slice any, setFunc func(*CIWorkflowResultCreate, int)) *CIWorkflowResultCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &CIWorkflowResultCreateBulk{err: fmt.Errorf("calling to CIWorkflowResultClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*CIWorkflowResultCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &CIWorkflowResultCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for CIWorkflowResult. +func (c *CIWorkflowResultClient) Update() *CIWorkflowResultUpdate { + mutation := newCIWorkflowResultMutation(c.config, OpUpdate) + return &CIWorkflowResultUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *CIWorkflowResultClient) UpdateOne(cwr *CIWorkflowResult) *CIWorkflowResultUpdateOne { + mutation := newCIWorkflowResultMutation(c.config, OpUpdateOne, withCIWorkflowResult(cwr)) + return &CIWorkflowResultUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *CIWorkflowResultClient) UpdateOneID(id uuid.UUID) *CIWorkflowResultUpdateOne { + mutation := newCIWorkflowResultMutation(c.config, OpUpdateOne, withCIWorkflowResultID(id)) + return &CIWorkflowResultUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for CIWorkflowResult. +func (c *CIWorkflowResultClient) Delete() *CIWorkflowResultDelete { + mutation := newCIWorkflowResultMutation(c.config, OpDelete) + return &CIWorkflowResultDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *CIWorkflowResultClient) DeleteOne(cwr *CIWorkflowResult) *CIWorkflowResultDeleteOne { + return c.DeleteOneID(cwr.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *CIWorkflowResultClient) DeleteOneID(id uuid.UUID) *CIWorkflowResultDeleteOne { + builder := c.Delete().Where(ciworkflowresult.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &CIWorkflowResultDeleteOne{builder} +} + +// Query returns a query builder for CIWorkflowResult. +func (c *CIWorkflowResultClient) Query() *CIWorkflowResultQuery { + return &CIWorkflowResultQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeCIWorkflowResult}, + inters: c.Interceptors(), + } +} + +// Get returns a CIWorkflowResult entity by its id. +func (c *CIWorkflowResultClient) Get(ctx context.Context, id uuid.UUID) (*CIWorkflowResult, error) { + return c.Query().Where(ciworkflowresult.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *CIWorkflowResultClient) GetX(ctx context.Context, id uuid.UUID) *CIWorkflowResult { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGitcommit queries the gitcommit edge of a CIWorkflowResult. +func (c *CIWorkflowResultClient) QueryGitcommit(cwr *CIWorkflowResult) *GitCommitQuery { + query := (&GitCommitClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := cwr.ID + step := sqlgraph.NewStep( + sqlgraph.From(ciworkflowresult.Table, ciworkflowresult.FieldID, id), + sqlgraph.To(gitcommit.Table, gitcommit.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ciworkflowresult.GitcommitTable, ciworkflowresult.GitcommitColumn), + ) + fromV = sqlgraph.Neighbors(cwr.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryStorageFile queries the storage_file edge of a CIWorkflowResult. +func (c *CIWorkflowResultClient) QueryStorageFile(cwr *CIWorkflowResult) *StorageFileQuery { + query := (&StorageFileClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := cwr.ID + step := sqlgraph.NewStep( + sqlgraph.From(ciworkflowresult.Table, ciworkflowresult.FieldID, id), + sqlgraph.To(storagefile.Table, storagefile.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ciworkflowresult.StorageFileTable, ciworkflowresult.StorageFileColumn), + ) + fromV = sqlgraph.Neighbors(cwr.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *CIWorkflowResultClient) Hooks() []Hook { + return c.hooks.CIWorkflowResult +} + +// Interceptors returns the client interceptors. +func (c *CIWorkflowResultClient) Interceptors() []Interceptor { + return c.inters.CIWorkflowResult +} + +func (c *CIWorkflowResultClient) mutate(ctx context.Context, m *CIWorkflowResultMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&CIWorkflowResultCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&CIWorkflowResultUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&CIWorkflowResultUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&CIWorkflowResultDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown CIWorkflowResult mutation op: %q", m.Op()) + } +} + +// GitCommitClient is a client for the GitCommit schema. +type GitCommitClient struct { + config +} + +// NewGitCommitClient returns a client for the GitCommit from the given config. +func NewGitCommitClient(c config) *GitCommitClient { + return &GitCommitClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `gitcommit.Hooks(f(g(h())))`. +func (c *GitCommitClient) Use(hooks ...Hook) { + c.hooks.GitCommit = append(c.hooks.GitCommit, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `gitcommit.Intercept(f(g(h())))`. +func (c *GitCommitClient) Intercept(interceptors ...Interceptor) { + c.inters.GitCommit = append(c.inters.GitCommit, interceptors...) +} + +// Create returns a builder for creating a GitCommit entity. +func (c *GitCommitClient) Create() *GitCommitCreate { + mutation := newGitCommitMutation(c.config, OpCreate) + return &GitCommitCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of GitCommit entities. +func (c *GitCommitClient) CreateBulk(builders ...*GitCommitCreate) *GitCommitCreateBulk { + return &GitCommitCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GitCommitClient) MapCreateBulk(slice any, setFunc func(*GitCommitCreate, int)) *GitCommitCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GitCommitCreateBulk{err: fmt.Errorf("calling to GitCommitClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GitCommitCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GitCommitCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for GitCommit. +func (c *GitCommitClient) Update() *GitCommitUpdate { + mutation := newGitCommitMutation(c.config, OpUpdate) + return &GitCommitUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GitCommitClient) UpdateOne(gc *GitCommit) *GitCommitUpdateOne { + mutation := newGitCommitMutation(c.config, OpUpdateOne, withGitCommit(gc)) + return &GitCommitUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *GitCommitClient) UpdateOneID(id uuid.UUID) *GitCommitUpdateOne { + mutation := newGitCommitMutation(c.config, OpUpdateOne, withGitCommitID(id)) + return &GitCommitUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for GitCommit. +func (c *GitCommitClient) Delete() *GitCommitDelete { + mutation := newGitCommitMutation(c.config, OpDelete) + return &GitCommitDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *GitCommitClient) DeleteOne(gc *GitCommit) *GitCommitDeleteOne { + return c.DeleteOneID(gc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *GitCommitClient) DeleteOneID(id uuid.UUID) *GitCommitDeleteOne { + builder := c.Delete().Where(gitcommit.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &GitCommitDeleteOne{builder} +} + +// Query returns a query builder for GitCommit. +func (c *GitCommitClient) Query() *GitCommitQuery { + return &GitCommitQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeGitCommit}, + inters: c.Interceptors(), + } +} + +// Get returns a GitCommit entity by its id. +func (c *GitCommitClient) Get(ctx context.Context, id uuid.UUID) (*GitCommit, error) { + return c.Query().Where(gitcommit.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *GitCommitClient) GetX(ctx context.Context, id uuid.UUID) *GitCommit { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryResults queries the results edge of a GitCommit. +func (c *GitCommitClient) QueryResults(gc *GitCommit) *CIWorkflowResultQuery { + query := (&CIWorkflowResultClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := gc.ID + step := sqlgraph.NewStep( + sqlgraph.From(gitcommit.Table, gitcommit.FieldID, id), + sqlgraph.To(ciworkflowresult.Table, ciworkflowresult.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, gitcommit.ResultsTable, gitcommit.ResultsColumn), + ) + fromV = sqlgraph.Neighbors(gc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *GitCommitClient) Hooks() []Hook { + return c.hooks.GitCommit +} + +// Interceptors returns the client interceptors. +func (c *GitCommitClient) Interceptors() []Interceptor { + return c.inters.GitCommit +} + +func (c *GitCommitClient) mutate(ctx context.Context, m *GitCommitMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&GitCommitCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&GitCommitUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&GitCommitUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&GitCommitDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown GitCommit mutation op: %q", m.Op()) + } +} + +// NodeClient is a client for the Node schema. +type NodeClient struct { + config +} + +// NewNodeClient returns a client for the Node from the given config. +func NewNodeClient(c config) *NodeClient { + return &NodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `node.Hooks(f(g(h())))`. +func (c *NodeClient) Use(hooks ...Hook) { + c.hooks.Node = append(c.hooks.Node, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `node.Intercept(f(g(h())))`. +func (c *NodeClient) Intercept(interceptors ...Interceptor) { + c.inters.Node = append(c.inters.Node, interceptors...) +} + +// Create returns a builder for creating a Node entity. +func (c *NodeClient) Create() *NodeCreate { + mutation := newNodeMutation(c.config, OpCreate) + return &NodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Node entities. +func (c *NodeClient) CreateBulk(builders ...*NodeCreate) *NodeCreateBulk { + return &NodeCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *NodeClient) MapCreateBulk(slice any, setFunc func(*NodeCreate, int)) *NodeCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &NodeCreateBulk{err: fmt.Errorf("calling to NodeClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*NodeCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &NodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Node. +func (c *NodeClient) Update() *NodeUpdate { + mutation := newNodeMutation(c.config, OpUpdate) + return &NodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *NodeClient) UpdateOne(n *Node) *NodeUpdateOne { + mutation := newNodeMutation(c.config, OpUpdateOne, withNode(n)) + return &NodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *NodeClient) UpdateOneID(id string) *NodeUpdateOne { + mutation := newNodeMutation(c.config, OpUpdateOne, withNodeID(id)) + return &NodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Node. +func (c *NodeClient) Delete() *NodeDelete { + mutation := newNodeMutation(c.config, OpDelete) + return &NodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *NodeClient) DeleteOne(n *Node) *NodeDeleteOne { + return c.DeleteOneID(n.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *NodeClient) DeleteOneID(id string) *NodeDeleteOne { + builder := c.Delete().Where(node.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &NodeDeleteOne{builder} +} + +// Query returns a query builder for Node. +func (c *NodeClient) Query() *NodeQuery { + return &NodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeNode}, + inters: c.Interceptors(), + } +} + +// Get returns a Node entity by its id. +func (c *NodeClient) Get(ctx context.Context, id string) (*Node, error) { + return c.Query().Where(node.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *NodeClient) GetX(ctx context.Context, id string) *Node { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPublisher queries the publisher edge of a Node. +func (c *NodeClient) QueryPublisher(n *Node) *PublisherQuery { + query := (&PublisherClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(node.Table, node.FieldID, id), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, node.PublisherTable, node.PublisherColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryVersions queries the versions edge of a Node. +func (c *NodeClient) QueryVersions(n *Node) *NodeVersionQuery { + query := (&NodeVersionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(node.Table, node.FieldID, id), + sqlgraph.To(nodeversion.Table, nodeversion.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, node.VersionsTable, node.VersionsColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *NodeClient) Hooks() []Hook { + return c.hooks.Node +} + +// Interceptors returns the client interceptors. +func (c *NodeClient) Interceptors() []Interceptor { + return c.inters.Node +} + +func (c *NodeClient) mutate(ctx context.Context, m *NodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&NodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&NodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&NodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&NodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Node mutation op: %q", m.Op()) + } +} + +// NodeVersionClient is a client for the NodeVersion schema. +type NodeVersionClient struct { + config +} + +// NewNodeVersionClient returns a client for the NodeVersion from the given config. +func NewNodeVersionClient(c config) *NodeVersionClient { + return &NodeVersionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `nodeversion.Hooks(f(g(h())))`. +func (c *NodeVersionClient) Use(hooks ...Hook) { + c.hooks.NodeVersion = append(c.hooks.NodeVersion, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `nodeversion.Intercept(f(g(h())))`. +func (c *NodeVersionClient) Intercept(interceptors ...Interceptor) { + c.inters.NodeVersion = append(c.inters.NodeVersion, interceptors...) +} + +// Create returns a builder for creating a NodeVersion entity. +func (c *NodeVersionClient) Create() *NodeVersionCreate { + mutation := newNodeVersionMutation(c.config, OpCreate) + return &NodeVersionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of NodeVersion entities. +func (c *NodeVersionClient) CreateBulk(builders ...*NodeVersionCreate) *NodeVersionCreateBulk { + return &NodeVersionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *NodeVersionClient) MapCreateBulk(slice any, setFunc func(*NodeVersionCreate, int)) *NodeVersionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &NodeVersionCreateBulk{err: fmt.Errorf("calling to NodeVersionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*NodeVersionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &NodeVersionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for NodeVersion. +func (c *NodeVersionClient) Update() *NodeVersionUpdate { + mutation := newNodeVersionMutation(c.config, OpUpdate) + return &NodeVersionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *NodeVersionClient) UpdateOne(nv *NodeVersion) *NodeVersionUpdateOne { + mutation := newNodeVersionMutation(c.config, OpUpdateOne, withNodeVersion(nv)) + return &NodeVersionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *NodeVersionClient) UpdateOneID(id uuid.UUID) *NodeVersionUpdateOne { + mutation := newNodeVersionMutation(c.config, OpUpdateOne, withNodeVersionID(id)) + return &NodeVersionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for NodeVersion. +func (c *NodeVersionClient) Delete() *NodeVersionDelete { + mutation := newNodeVersionMutation(c.config, OpDelete) + return &NodeVersionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *NodeVersionClient) DeleteOne(nv *NodeVersion) *NodeVersionDeleteOne { + return c.DeleteOneID(nv.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *NodeVersionClient) DeleteOneID(id uuid.UUID) *NodeVersionDeleteOne { + builder := c.Delete().Where(nodeversion.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &NodeVersionDeleteOne{builder} +} + +// Query returns a query builder for NodeVersion. +func (c *NodeVersionClient) Query() *NodeVersionQuery { + return &NodeVersionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeNodeVersion}, + inters: c.Interceptors(), + } +} + +// Get returns a NodeVersion entity by its id. +func (c *NodeVersionClient) Get(ctx context.Context, id uuid.UUID) (*NodeVersion, error) { + return c.Query().Where(nodeversion.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *NodeVersionClient) GetX(ctx context.Context, id uuid.UUID) *NodeVersion { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryNode queries the node edge of a NodeVersion. +func (c *NodeVersionClient) QueryNode(nv *NodeVersion) *NodeQuery { + query := (&NodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := nv.ID + step := sqlgraph.NewStep( + sqlgraph.From(nodeversion.Table, nodeversion.FieldID, id), + sqlgraph.To(node.Table, node.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, nodeversion.NodeTable, nodeversion.NodeColumn), + ) + fromV = sqlgraph.Neighbors(nv.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryStorageFile queries the storage_file edge of a NodeVersion. +func (c *NodeVersionClient) QueryStorageFile(nv *NodeVersion) *StorageFileQuery { + query := (&StorageFileClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := nv.ID + step := sqlgraph.NewStep( + sqlgraph.From(nodeversion.Table, nodeversion.FieldID, id), + sqlgraph.To(storagefile.Table, storagefile.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, nodeversion.StorageFileTable, nodeversion.StorageFileColumn), + ) + fromV = sqlgraph.Neighbors(nv.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *NodeVersionClient) Hooks() []Hook { + return c.hooks.NodeVersion +} + +// Interceptors returns the client interceptors. +func (c *NodeVersionClient) Interceptors() []Interceptor { + return c.inters.NodeVersion +} + +func (c *NodeVersionClient) mutate(ctx context.Context, m *NodeVersionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&NodeVersionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&NodeVersionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&NodeVersionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&NodeVersionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown NodeVersion mutation op: %q", m.Op()) + } +} + +// PersonalAccessTokenClient is a client for the PersonalAccessToken schema. +type PersonalAccessTokenClient struct { + config +} + +// NewPersonalAccessTokenClient returns a client for the PersonalAccessToken from the given config. +func NewPersonalAccessTokenClient(c config) *PersonalAccessTokenClient { + return &PersonalAccessTokenClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `personalaccesstoken.Hooks(f(g(h())))`. +func (c *PersonalAccessTokenClient) Use(hooks ...Hook) { + c.hooks.PersonalAccessToken = append(c.hooks.PersonalAccessToken, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `personalaccesstoken.Intercept(f(g(h())))`. +func (c *PersonalAccessTokenClient) Intercept(interceptors ...Interceptor) { + c.inters.PersonalAccessToken = append(c.inters.PersonalAccessToken, interceptors...) +} + +// Create returns a builder for creating a PersonalAccessToken entity. +func (c *PersonalAccessTokenClient) Create() *PersonalAccessTokenCreate { + mutation := newPersonalAccessTokenMutation(c.config, OpCreate) + return &PersonalAccessTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PersonalAccessToken entities. +func (c *PersonalAccessTokenClient) CreateBulk(builders ...*PersonalAccessTokenCreate) *PersonalAccessTokenCreateBulk { + return &PersonalAccessTokenCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PersonalAccessTokenClient) MapCreateBulk(slice any, setFunc func(*PersonalAccessTokenCreate, int)) *PersonalAccessTokenCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PersonalAccessTokenCreateBulk{err: fmt.Errorf("calling to PersonalAccessTokenClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PersonalAccessTokenCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PersonalAccessTokenCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PersonalAccessToken. +func (c *PersonalAccessTokenClient) Update() *PersonalAccessTokenUpdate { + mutation := newPersonalAccessTokenMutation(c.config, OpUpdate) + return &PersonalAccessTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PersonalAccessTokenClient) UpdateOne(pat *PersonalAccessToken) *PersonalAccessTokenUpdateOne { + mutation := newPersonalAccessTokenMutation(c.config, OpUpdateOne, withPersonalAccessToken(pat)) + return &PersonalAccessTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PersonalAccessTokenClient) UpdateOneID(id uuid.UUID) *PersonalAccessTokenUpdateOne { + mutation := newPersonalAccessTokenMutation(c.config, OpUpdateOne, withPersonalAccessTokenID(id)) + return &PersonalAccessTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PersonalAccessToken. +func (c *PersonalAccessTokenClient) Delete() *PersonalAccessTokenDelete { + mutation := newPersonalAccessTokenMutation(c.config, OpDelete) + return &PersonalAccessTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PersonalAccessTokenClient) DeleteOne(pat *PersonalAccessToken) *PersonalAccessTokenDeleteOne { + return c.DeleteOneID(pat.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PersonalAccessTokenClient) DeleteOneID(id uuid.UUID) *PersonalAccessTokenDeleteOne { + builder := c.Delete().Where(personalaccesstoken.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PersonalAccessTokenDeleteOne{builder} +} + +// Query returns a query builder for PersonalAccessToken. +func (c *PersonalAccessTokenClient) Query() *PersonalAccessTokenQuery { + return &PersonalAccessTokenQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePersonalAccessToken}, + inters: c.Interceptors(), + } +} + +// Get returns a PersonalAccessToken entity by its id. +func (c *PersonalAccessTokenClient) Get(ctx context.Context, id uuid.UUID) (*PersonalAccessToken, error) { + return c.Query().Where(personalaccesstoken.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PersonalAccessTokenClient) GetX(ctx context.Context, id uuid.UUID) *PersonalAccessToken { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPublisher queries the publisher edge of a PersonalAccessToken. +func (c *PersonalAccessTokenClient) QueryPublisher(pat *PersonalAccessToken) *PublisherQuery { + query := (&PublisherClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pat.ID + step := sqlgraph.NewStep( + sqlgraph.From(personalaccesstoken.Table, personalaccesstoken.FieldID, id), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, personalaccesstoken.PublisherTable, personalaccesstoken.PublisherColumn), + ) + fromV = sqlgraph.Neighbors(pat.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PersonalAccessTokenClient) Hooks() []Hook { + return c.hooks.PersonalAccessToken +} + +// Interceptors returns the client interceptors. +func (c *PersonalAccessTokenClient) Interceptors() []Interceptor { + return c.inters.PersonalAccessToken +} + +func (c *PersonalAccessTokenClient) mutate(ctx context.Context, m *PersonalAccessTokenMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PersonalAccessTokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PersonalAccessTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PersonalAccessTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PersonalAccessTokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PersonalAccessToken mutation op: %q", m.Op()) + } +} + +// PublisherClient is a client for the Publisher schema. +type PublisherClient struct { + config +} + +// NewPublisherClient returns a client for the Publisher from the given config. +func NewPublisherClient(c config) *PublisherClient { + return &PublisherClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `publisher.Hooks(f(g(h())))`. +func (c *PublisherClient) Use(hooks ...Hook) { + c.hooks.Publisher = append(c.hooks.Publisher, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `publisher.Intercept(f(g(h())))`. +func (c *PublisherClient) Intercept(interceptors ...Interceptor) { + c.inters.Publisher = append(c.inters.Publisher, interceptors...) +} + +// Create returns a builder for creating a Publisher entity. +func (c *PublisherClient) Create() *PublisherCreate { + mutation := newPublisherMutation(c.config, OpCreate) + return &PublisherCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Publisher entities. +func (c *PublisherClient) CreateBulk(builders ...*PublisherCreate) *PublisherCreateBulk { + return &PublisherCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PublisherClient) MapCreateBulk(slice any, setFunc func(*PublisherCreate, int)) *PublisherCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PublisherCreateBulk{err: fmt.Errorf("calling to PublisherClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PublisherCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PublisherCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Publisher. +func (c *PublisherClient) Update() *PublisherUpdate { + mutation := newPublisherMutation(c.config, OpUpdate) + return &PublisherUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PublisherClient) UpdateOne(pu *Publisher) *PublisherUpdateOne { + mutation := newPublisherMutation(c.config, OpUpdateOne, withPublisher(pu)) + return &PublisherUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PublisherClient) UpdateOneID(id string) *PublisherUpdateOne { + mutation := newPublisherMutation(c.config, OpUpdateOne, withPublisherID(id)) + return &PublisherUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Publisher. +func (c *PublisherClient) Delete() *PublisherDelete { + mutation := newPublisherMutation(c.config, OpDelete) + return &PublisherDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PublisherClient) DeleteOne(pu *Publisher) *PublisherDeleteOne { + return c.DeleteOneID(pu.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PublisherClient) DeleteOneID(id string) *PublisherDeleteOne { + builder := c.Delete().Where(publisher.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PublisherDeleteOne{builder} +} + +// Query returns a query builder for Publisher. +func (c *PublisherClient) Query() *PublisherQuery { + return &PublisherQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePublisher}, + inters: c.Interceptors(), + } +} + +// Get returns a Publisher entity by its id. +func (c *PublisherClient) Get(ctx context.Context, id string) (*Publisher, error) { + return c.Query().Where(publisher.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PublisherClient) GetX(ctx context.Context, id string) *Publisher { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPublisherPermissions queries the publisher_permissions edge of a Publisher. +func (c *PublisherClient) QueryPublisherPermissions(pu *Publisher) *PublisherPermissionQuery { + query := (&PublisherPermissionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pu.ID + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, id), + sqlgraph.To(publisherpermission.Table, publisherpermission.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.PublisherPermissionsTable, publisher.PublisherPermissionsColumn), + ) + fromV = sqlgraph.Neighbors(pu.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryNodes queries the nodes edge of a Publisher. +func (c *PublisherClient) QueryNodes(pu *Publisher) *NodeQuery { + query := (&NodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pu.ID + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, id), + sqlgraph.To(node.Table, node.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.NodesTable, publisher.NodesColumn), + ) + fromV = sqlgraph.Neighbors(pu.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryPersonalAccessTokens queries the personal_access_tokens edge of a Publisher. +func (c *PublisherClient) QueryPersonalAccessTokens(pu *Publisher) *PersonalAccessTokenQuery { + query := (&PersonalAccessTokenClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pu.ID + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, id), + sqlgraph.To(personalaccesstoken.Table, personalaccesstoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.PersonalAccessTokensTable, publisher.PersonalAccessTokensColumn), + ) + fromV = sqlgraph.Neighbors(pu.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PublisherClient) Hooks() []Hook { + return c.hooks.Publisher +} + +// Interceptors returns the client interceptors. +func (c *PublisherClient) Interceptors() []Interceptor { + return c.inters.Publisher +} + +func (c *PublisherClient) mutate(ctx context.Context, m *PublisherMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PublisherCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PublisherUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PublisherUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PublisherDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Publisher mutation op: %q", m.Op()) + } +} + +// PublisherPermissionClient is a client for the PublisherPermission schema. +type PublisherPermissionClient struct { + config +} + +// NewPublisherPermissionClient returns a client for the PublisherPermission from the given config. +func NewPublisherPermissionClient(c config) *PublisherPermissionClient { + return &PublisherPermissionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `publisherpermission.Hooks(f(g(h())))`. +func (c *PublisherPermissionClient) Use(hooks ...Hook) { + c.hooks.PublisherPermission = append(c.hooks.PublisherPermission, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `publisherpermission.Intercept(f(g(h())))`. +func (c *PublisherPermissionClient) Intercept(interceptors ...Interceptor) { + c.inters.PublisherPermission = append(c.inters.PublisherPermission, interceptors...) +} + +// Create returns a builder for creating a PublisherPermission entity. +func (c *PublisherPermissionClient) Create() *PublisherPermissionCreate { + mutation := newPublisherPermissionMutation(c.config, OpCreate) + return &PublisherPermissionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PublisherPermission entities. +func (c *PublisherPermissionClient) CreateBulk(builders ...*PublisherPermissionCreate) *PublisherPermissionCreateBulk { + return &PublisherPermissionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PublisherPermissionClient) MapCreateBulk(slice any, setFunc func(*PublisherPermissionCreate, int)) *PublisherPermissionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PublisherPermissionCreateBulk{err: fmt.Errorf("calling to PublisherPermissionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PublisherPermissionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PublisherPermissionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PublisherPermission. +func (c *PublisherPermissionClient) Update() *PublisherPermissionUpdate { + mutation := newPublisherPermissionMutation(c.config, OpUpdate) + return &PublisherPermissionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PublisherPermissionClient) UpdateOne(pp *PublisherPermission) *PublisherPermissionUpdateOne { + mutation := newPublisherPermissionMutation(c.config, OpUpdateOne, withPublisherPermission(pp)) + return &PublisherPermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PublisherPermissionClient) UpdateOneID(id int) *PublisherPermissionUpdateOne { + mutation := newPublisherPermissionMutation(c.config, OpUpdateOne, withPublisherPermissionID(id)) + return &PublisherPermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PublisherPermission. +func (c *PublisherPermissionClient) Delete() *PublisherPermissionDelete { + mutation := newPublisherPermissionMutation(c.config, OpDelete) + return &PublisherPermissionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PublisherPermissionClient) DeleteOne(pp *PublisherPermission) *PublisherPermissionDeleteOne { + return c.DeleteOneID(pp.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PublisherPermissionClient) DeleteOneID(id int) *PublisherPermissionDeleteOne { + builder := c.Delete().Where(publisherpermission.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PublisherPermissionDeleteOne{builder} +} + +// Query returns a query builder for PublisherPermission. +func (c *PublisherPermissionClient) Query() *PublisherPermissionQuery { + return &PublisherPermissionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePublisherPermission}, + inters: c.Interceptors(), + } +} + +// Get returns a PublisherPermission entity by its id. +func (c *PublisherPermissionClient) Get(ctx context.Context, id int) (*PublisherPermission, error) { + return c.Query().Where(publisherpermission.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PublisherPermissionClient) GetX(ctx context.Context, id int) *PublisherPermission { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a PublisherPermission. +func (c *PublisherPermissionClient) QueryUser(pp *PublisherPermission) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pp.ID + step := sqlgraph.NewStep( + sqlgraph.From(publisherpermission.Table, publisherpermission.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, publisherpermission.UserTable, publisherpermission.UserColumn), + ) + fromV = sqlgraph.Neighbors(pp.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryPublisher queries the publisher edge of a PublisherPermission. +func (c *PublisherPermissionClient) QueryPublisher(pp *PublisherPermission) *PublisherQuery { + query := (&PublisherClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pp.ID + step := sqlgraph.NewStep( + sqlgraph.From(publisherpermission.Table, publisherpermission.FieldID, id), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, publisherpermission.PublisherTable, publisherpermission.PublisherColumn), + ) + fromV = sqlgraph.Neighbors(pp.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PublisherPermissionClient) Hooks() []Hook { + return c.hooks.PublisherPermission +} + +// Interceptors returns the client interceptors. +func (c *PublisherPermissionClient) Interceptors() []Interceptor { + return c.inters.PublisherPermission +} + +func (c *PublisherPermissionClient) mutate(ctx context.Context, m *PublisherPermissionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PublisherPermissionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PublisherPermissionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PublisherPermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PublisherPermissionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PublisherPermission mutation op: %q", m.Op()) + } +} + +// StorageFileClient is a client for the StorageFile schema. +type StorageFileClient struct { + config +} + +// NewStorageFileClient returns a client for the StorageFile from the given config. +func NewStorageFileClient(c config) *StorageFileClient { + return &StorageFileClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `storagefile.Hooks(f(g(h())))`. +func (c *StorageFileClient) Use(hooks ...Hook) { + c.hooks.StorageFile = append(c.hooks.StorageFile, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `storagefile.Intercept(f(g(h())))`. +func (c *StorageFileClient) Intercept(interceptors ...Interceptor) { + c.inters.StorageFile = append(c.inters.StorageFile, interceptors...) +} + +// Create returns a builder for creating a StorageFile entity. +func (c *StorageFileClient) Create() *StorageFileCreate { + mutation := newStorageFileMutation(c.config, OpCreate) + return &StorageFileCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of StorageFile entities. +func (c *StorageFileClient) CreateBulk(builders ...*StorageFileCreate) *StorageFileCreateBulk { + return &StorageFileCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *StorageFileClient) MapCreateBulk(slice any, setFunc func(*StorageFileCreate, int)) *StorageFileCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &StorageFileCreateBulk{err: fmt.Errorf("calling to StorageFileClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*StorageFileCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &StorageFileCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for StorageFile. +func (c *StorageFileClient) Update() *StorageFileUpdate { + mutation := newStorageFileMutation(c.config, OpUpdate) + return &StorageFileUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *StorageFileClient) UpdateOne(sf *StorageFile) *StorageFileUpdateOne { + mutation := newStorageFileMutation(c.config, OpUpdateOne, withStorageFile(sf)) + return &StorageFileUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *StorageFileClient) UpdateOneID(id uuid.UUID) *StorageFileUpdateOne { + mutation := newStorageFileMutation(c.config, OpUpdateOne, withStorageFileID(id)) + return &StorageFileUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for StorageFile. +func (c *StorageFileClient) Delete() *StorageFileDelete { + mutation := newStorageFileMutation(c.config, OpDelete) + return &StorageFileDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *StorageFileClient) DeleteOne(sf *StorageFile) *StorageFileDeleteOne { + return c.DeleteOneID(sf.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *StorageFileClient) DeleteOneID(id uuid.UUID) *StorageFileDeleteOne { + builder := c.Delete().Where(storagefile.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &StorageFileDeleteOne{builder} +} + +// Query returns a query builder for StorageFile. +func (c *StorageFileClient) Query() *StorageFileQuery { + return &StorageFileQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeStorageFile}, + inters: c.Interceptors(), + } +} + +// Get returns a StorageFile entity by its id. +func (c *StorageFileClient) Get(ctx context.Context, id uuid.UUID) (*StorageFile, error) { + return c.Query().Where(storagefile.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *StorageFileClient) GetX(ctx context.Context, id uuid.UUID) *StorageFile { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *StorageFileClient) Hooks() []Hook { + return c.hooks.StorageFile +} + +// Interceptors returns the client interceptors. +func (c *StorageFileClient) Interceptors() []Interceptor { + return c.inters.StorageFile +} + +func (c *StorageFileClient) mutate(ctx context.Context, m *StorageFileMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&StorageFileCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&StorageFileUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&StorageFileUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&StorageFileDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown StorageFile mutation op: %q", m.Op()) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(u)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id string) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { + return c.DeleteOneID(u.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id string) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id string) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id string) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPublisherPermissions queries the publisher_permissions edge of a User. +func (c *UserClient) QueryPublisherPermissions(u *User) *PublisherPermissionQuery { + query := (&PublisherPermissionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(publisherpermission.Table, publisherpermission.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PublisherPermissionsTable, user.PublisherPermissionsColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + return c.hooks.User +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + return c.inters.User +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + CIWorkflowResult, GitCommit, Node, NodeVersion, PersonalAccessToken, Publisher, + PublisherPermission, StorageFile, User []ent.Hook + } + inters struct { + CIWorkflowResult, GitCommit, Node, NodeVersion, PersonalAccessToken, Publisher, + PublisherPermission, StorageFile, User []ent.Interceptor + } +) diff --git a/ent/ent.go b/ent/ent.go new file mode 100644 index 0000000..52ab9e1 --- /dev/null +++ b/ent/ent.go @@ -0,0 +1,624 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/storagefile" + "registry-backend/ent/user" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + ciworkflowresult.Table: ciworkflowresult.ValidColumn, + gitcommit.Table: gitcommit.ValidColumn, + node.Table: node.ValidColumn, + nodeversion.Table: nodeversion.ValidColumn, + personalaccesstoken.Table: personalaccesstoken.ValidColumn, + publisher.Table: publisher.ValidColumn, + publisherpermission.Table: publisherpermission.ValidColumn, + storagefile.Table: storagefile.ValidColumn, + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/ent/enttest/enttest.go b/ent/enttest/enttest.go new file mode 100644 index 0000000..78e3f25 --- /dev/null +++ b/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + "registry-backend/ent" + // required by schema hooks. + _ "registry-backend/ent/runtime" + + "registry-backend/ent/migrate" + + "entgo.io/ent/dialect/sql/schema" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/ent/generate.go b/ent/generate.go new file mode 100644 index 0000000..10036c5 --- /dev/null +++ b/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert --feature sql/lock ./schema diff --git a/ent/gitcommit.go b/ent/gitcommit.go new file mode 100644 index 0000000..1f5480e --- /dev/null +++ b/ent/gitcommit.go @@ -0,0 +1,221 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/gitcommit" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// GitCommit is the model entity for the GitCommit schema. +type GitCommit struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // CommitHash holds the value of the "commit_hash" field. + CommitHash string `json:"commit_hash,omitempty"` + // BranchName holds the value of the "branch_name" field. + BranchName string `json:"branch_name,omitempty"` + // RepoName holds the value of the "repo_name" field. + RepoName string `json:"repo_name,omitempty"` + // CommitMessage holds the value of the "commit_message" field. + CommitMessage string `json:"commit_message,omitempty"` + // CommitTimestamp holds the value of the "commit_timestamp" field. + CommitTimestamp time.Time `json:"commit_timestamp,omitempty"` + // Author holds the value of the "author" field. + Author string `json:"author,omitempty"` + // Timestamp holds the value of the "timestamp" field. + Timestamp time.Time `json:"timestamp,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the GitCommitQuery when eager-loading is set. + Edges GitCommitEdges `json:"edges"` + selectValues sql.SelectValues +} + +// GitCommitEdges holds the relations/edges for other nodes in the graph. +type GitCommitEdges struct { + // Results holds the value of the results edge. + Results []*CIWorkflowResult `json:"results,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ResultsOrErr returns the Results value or an error if the edge +// was not loaded in eager-loading. +func (e GitCommitEdges) ResultsOrErr() ([]*CIWorkflowResult, error) { + if e.loadedTypes[0] { + return e.Results, nil + } + return nil, &NotLoadedError{edge: "results"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*GitCommit) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case gitcommit.FieldCommitHash, gitcommit.FieldBranchName, gitcommit.FieldRepoName, gitcommit.FieldCommitMessage, gitcommit.FieldAuthor: + values[i] = new(sql.NullString) + case gitcommit.FieldCreateTime, gitcommit.FieldUpdateTime, gitcommit.FieldCommitTimestamp, gitcommit.FieldTimestamp: + values[i] = new(sql.NullTime) + case gitcommit.FieldID: + values[i] = new(uuid.UUID) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the GitCommit fields. +func (gc *GitCommit) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case gitcommit.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + gc.ID = *value + } + case gitcommit.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + gc.CreateTime = value.Time + } + case gitcommit.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + gc.UpdateTime = value.Time + } + case gitcommit.FieldCommitHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field commit_hash", values[i]) + } else if value.Valid { + gc.CommitHash = value.String + } + case gitcommit.FieldBranchName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field branch_name", values[i]) + } else if value.Valid { + gc.BranchName = value.String + } + case gitcommit.FieldRepoName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field repo_name", values[i]) + } else if value.Valid { + gc.RepoName = value.String + } + case gitcommit.FieldCommitMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field commit_message", values[i]) + } else if value.Valid { + gc.CommitMessage = value.String + } + case gitcommit.FieldCommitTimestamp: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field commit_timestamp", values[i]) + } else if value.Valid { + gc.CommitTimestamp = value.Time + } + case gitcommit.FieldAuthor: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field author", values[i]) + } else if value.Valid { + gc.Author = value.String + } + case gitcommit.FieldTimestamp: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field timestamp", values[i]) + } else if value.Valid { + gc.Timestamp = value.Time + } + default: + gc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the GitCommit. +// This includes values selected through modifiers, order, etc. +func (gc *GitCommit) Value(name string) (ent.Value, error) { + return gc.selectValues.Get(name) +} + +// QueryResults queries the "results" edge of the GitCommit entity. +func (gc *GitCommit) QueryResults() *CIWorkflowResultQuery { + return NewGitCommitClient(gc.config).QueryResults(gc) +} + +// Update returns a builder for updating this GitCommit. +// Note that you need to call GitCommit.Unwrap() before calling this method if this GitCommit +// was returned from a transaction, and the transaction was committed or rolled back. +func (gc *GitCommit) Update() *GitCommitUpdateOne { + return NewGitCommitClient(gc.config).UpdateOne(gc) +} + +// Unwrap unwraps the GitCommit entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (gc *GitCommit) Unwrap() *GitCommit { + _tx, ok := gc.config.driver.(*txDriver) + if !ok { + panic("ent: GitCommit is not a transactional entity") + } + gc.config.driver = _tx.drv + return gc +} + +// String implements the fmt.Stringer. +func (gc *GitCommit) String() string { + var builder strings.Builder + builder.WriteString("GitCommit(") + builder.WriteString(fmt.Sprintf("id=%v, ", gc.ID)) + builder.WriteString("create_time=") + builder.WriteString(gc.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(gc.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("commit_hash=") + builder.WriteString(gc.CommitHash) + builder.WriteString(", ") + builder.WriteString("branch_name=") + builder.WriteString(gc.BranchName) + builder.WriteString(", ") + builder.WriteString("repo_name=") + builder.WriteString(gc.RepoName) + builder.WriteString(", ") + builder.WriteString("commit_message=") + builder.WriteString(gc.CommitMessage) + builder.WriteString(", ") + builder.WriteString("commit_timestamp=") + builder.WriteString(gc.CommitTimestamp.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("author=") + builder.WriteString(gc.Author) + builder.WriteString(", ") + builder.WriteString("timestamp=") + builder.WriteString(gc.Timestamp.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// GitCommits is a parsable slice of GitCommit. +type GitCommits []*GitCommit diff --git a/ent/gitcommit/gitcommit.go b/ent/gitcommit/gitcommit.go new file mode 100644 index 0000000..df2dbbc --- /dev/null +++ b/ent/gitcommit/gitcommit.go @@ -0,0 +1,156 @@ +// Code generated by ent, DO NOT EDIT. + +package gitcommit + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the gitcommit type in the database. + Label = "git_commit" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldCommitHash holds the string denoting the commit_hash field in the database. + FieldCommitHash = "commit_hash" + // FieldBranchName holds the string denoting the branch_name field in the database. + FieldBranchName = "branch_name" + // FieldRepoName holds the string denoting the repo_name field in the database. + FieldRepoName = "repo_name" + // FieldCommitMessage holds the string denoting the commit_message field in the database. + FieldCommitMessage = "commit_message" + // FieldCommitTimestamp holds the string denoting the commit_timestamp field in the database. + FieldCommitTimestamp = "commit_timestamp" + // FieldAuthor holds the string denoting the author field in the database. + FieldAuthor = "author" + // FieldTimestamp holds the string denoting the timestamp field in the database. + FieldTimestamp = "timestamp" + // EdgeResults holds the string denoting the results edge name in mutations. + EdgeResults = "results" + // Table holds the table name of the gitcommit in the database. + Table = "git_commits" + // ResultsTable is the table that holds the results relation/edge. + ResultsTable = "ci_workflow_results" + // ResultsInverseTable is the table name for the CIWorkflowResult entity. + // It exists in this package in order to avoid circular dependency with the "ciworkflowresult" package. + ResultsInverseTable = "ci_workflow_results" + // ResultsColumn is the table column denoting the results relation/edge. + ResultsColumn = "git_commit_results" +) + +// Columns holds all SQL columns for gitcommit fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldCommitHash, + FieldBranchName, + FieldRepoName, + FieldCommitMessage, + FieldCommitTimestamp, + FieldAuthor, + FieldTimestamp, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the GitCommit queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByCommitHash orders the results by the commit_hash field. +func ByCommitHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCommitHash, opts...).ToFunc() +} + +// ByBranchName orders the results by the branch_name field. +func ByBranchName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBranchName, opts...).ToFunc() +} + +// ByRepoName orders the results by the repo_name field. +func ByRepoName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRepoName, opts...).ToFunc() +} + +// ByCommitMessage orders the results by the commit_message field. +func ByCommitMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCommitMessage, opts...).ToFunc() +} + +// ByCommitTimestamp orders the results by the commit_timestamp field. +func ByCommitTimestamp(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCommitTimestamp, opts...).ToFunc() +} + +// ByAuthor orders the results by the author field. +func ByAuthor(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthor, opts...).ToFunc() +} + +// ByTimestamp orders the results by the timestamp field. +func ByTimestamp(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTimestamp, opts...).ToFunc() +} + +// ByResultsCount orders the results by results count. +func ByResultsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newResultsStep(), opts...) + } +} + +// ByResults orders the results by results terms. +func ByResults(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newResultsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newResultsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ResultsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ResultsTable, ResultsColumn), + ) +} diff --git a/ent/gitcommit/where.go b/ent/gitcommit/where.go new file mode 100644 index 0000000..494acf8 --- /dev/null +++ b/ent/gitcommit/where.go @@ -0,0 +1,645 @@ +// Code generated by ent, DO NOT EDIT. + +package gitcommit + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldUpdateTime, v)) +} + +// CommitHash applies equality check predicate on the "commit_hash" field. It's identical to CommitHashEQ. +func CommitHash(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitHash, v)) +} + +// BranchName applies equality check predicate on the "branch_name" field. It's identical to BranchNameEQ. +func BranchName(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldBranchName, v)) +} + +// RepoName applies equality check predicate on the "repo_name" field. It's identical to RepoNameEQ. +func RepoName(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldRepoName, v)) +} + +// CommitMessage applies equality check predicate on the "commit_message" field. It's identical to CommitMessageEQ. +func CommitMessage(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitMessage, v)) +} + +// CommitTimestamp applies equality check predicate on the "commit_timestamp" field. It's identical to CommitTimestampEQ. +func CommitTimestamp(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitTimestamp, v)) +} + +// Author applies equality check predicate on the "author" field. It's identical to AuthorEQ. +func Author(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldAuthor, v)) +} + +// Timestamp applies equality check predicate on the "timestamp" field. It's identical to TimestampEQ. +func Timestamp(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldTimestamp, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldUpdateTime, v)) +} + +// CommitHashEQ applies the EQ predicate on the "commit_hash" field. +func CommitHashEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitHash, v)) +} + +// CommitHashNEQ applies the NEQ predicate on the "commit_hash" field. +func CommitHashNEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldCommitHash, v)) +} + +// CommitHashIn applies the In predicate on the "commit_hash" field. +func CommitHashIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldCommitHash, vs...)) +} + +// CommitHashNotIn applies the NotIn predicate on the "commit_hash" field. +func CommitHashNotIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldCommitHash, vs...)) +} + +// CommitHashGT applies the GT predicate on the "commit_hash" field. +func CommitHashGT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldCommitHash, v)) +} + +// CommitHashGTE applies the GTE predicate on the "commit_hash" field. +func CommitHashGTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldCommitHash, v)) +} + +// CommitHashLT applies the LT predicate on the "commit_hash" field. +func CommitHashLT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldCommitHash, v)) +} + +// CommitHashLTE applies the LTE predicate on the "commit_hash" field. +func CommitHashLTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldCommitHash, v)) +} + +// CommitHashContains applies the Contains predicate on the "commit_hash" field. +func CommitHashContains(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContains(FieldCommitHash, v)) +} + +// CommitHashHasPrefix applies the HasPrefix predicate on the "commit_hash" field. +func CommitHashHasPrefix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasPrefix(FieldCommitHash, v)) +} + +// CommitHashHasSuffix applies the HasSuffix predicate on the "commit_hash" field. +func CommitHashHasSuffix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasSuffix(FieldCommitHash, v)) +} + +// CommitHashEqualFold applies the EqualFold predicate on the "commit_hash" field. +func CommitHashEqualFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEqualFold(FieldCommitHash, v)) +} + +// CommitHashContainsFold applies the ContainsFold predicate on the "commit_hash" field. +func CommitHashContainsFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContainsFold(FieldCommitHash, v)) +} + +// BranchNameEQ applies the EQ predicate on the "branch_name" field. +func BranchNameEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldBranchName, v)) +} + +// BranchNameNEQ applies the NEQ predicate on the "branch_name" field. +func BranchNameNEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldBranchName, v)) +} + +// BranchNameIn applies the In predicate on the "branch_name" field. +func BranchNameIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldBranchName, vs...)) +} + +// BranchNameNotIn applies the NotIn predicate on the "branch_name" field. +func BranchNameNotIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldBranchName, vs...)) +} + +// BranchNameGT applies the GT predicate on the "branch_name" field. +func BranchNameGT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldBranchName, v)) +} + +// BranchNameGTE applies the GTE predicate on the "branch_name" field. +func BranchNameGTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldBranchName, v)) +} + +// BranchNameLT applies the LT predicate on the "branch_name" field. +func BranchNameLT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldBranchName, v)) +} + +// BranchNameLTE applies the LTE predicate on the "branch_name" field. +func BranchNameLTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldBranchName, v)) +} + +// BranchNameContains applies the Contains predicate on the "branch_name" field. +func BranchNameContains(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContains(FieldBranchName, v)) +} + +// BranchNameHasPrefix applies the HasPrefix predicate on the "branch_name" field. +func BranchNameHasPrefix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasPrefix(FieldBranchName, v)) +} + +// BranchNameHasSuffix applies the HasSuffix predicate on the "branch_name" field. +func BranchNameHasSuffix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasSuffix(FieldBranchName, v)) +} + +// BranchNameEqualFold applies the EqualFold predicate on the "branch_name" field. +func BranchNameEqualFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEqualFold(FieldBranchName, v)) +} + +// BranchNameContainsFold applies the ContainsFold predicate on the "branch_name" field. +func BranchNameContainsFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContainsFold(FieldBranchName, v)) +} + +// RepoNameEQ applies the EQ predicate on the "repo_name" field. +func RepoNameEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldRepoName, v)) +} + +// RepoNameNEQ applies the NEQ predicate on the "repo_name" field. +func RepoNameNEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldRepoName, v)) +} + +// RepoNameIn applies the In predicate on the "repo_name" field. +func RepoNameIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldRepoName, vs...)) +} + +// RepoNameNotIn applies the NotIn predicate on the "repo_name" field. +func RepoNameNotIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldRepoName, vs...)) +} + +// RepoNameGT applies the GT predicate on the "repo_name" field. +func RepoNameGT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldRepoName, v)) +} + +// RepoNameGTE applies the GTE predicate on the "repo_name" field. +func RepoNameGTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldRepoName, v)) +} + +// RepoNameLT applies the LT predicate on the "repo_name" field. +func RepoNameLT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldRepoName, v)) +} + +// RepoNameLTE applies the LTE predicate on the "repo_name" field. +func RepoNameLTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldRepoName, v)) +} + +// RepoNameContains applies the Contains predicate on the "repo_name" field. +func RepoNameContains(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContains(FieldRepoName, v)) +} + +// RepoNameHasPrefix applies the HasPrefix predicate on the "repo_name" field. +func RepoNameHasPrefix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasPrefix(FieldRepoName, v)) +} + +// RepoNameHasSuffix applies the HasSuffix predicate on the "repo_name" field. +func RepoNameHasSuffix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasSuffix(FieldRepoName, v)) +} + +// RepoNameEqualFold applies the EqualFold predicate on the "repo_name" field. +func RepoNameEqualFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEqualFold(FieldRepoName, v)) +} + +// RepoNameContainsFold applies the ContainsFold predicate on the "repo_name" field. +func RepoNameContainsFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContainsFold(FieldRepoName, v)) +} + +// CommitMessageEQ applies the EQ predicate on the "commit_message" field. +func CommitMessageEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitMessage, v)) +} + +// CommitMessageNEQ applies the NEQ predicate on the "commit_message" field. +func CommitMessageNEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldCommitMessage, v)) +} + +// CommitMessageIn applies the In predicate on the "commit_message" field. +func CommitMessageIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldCommitMessage, vs...)) +} + +// CommitMessageNotIn applies the NotIn predicate on the "commit_message" field. +func CommitMessageNotIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldCommitMessage, vs...)) +} + +// CommitMessageGT applies the GT predicate on the "commit_message" field. +func CommitMessageGT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldCommitMessage, v)) +} + +// CommitMessageGTE applies the GTE predicate on the "commit_message" field. +func CommitMessageGTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldCommitMessage, v)) +} + +// CommitMessageLT applies the LT predicate on the "commit_message" field. +func CommitMessageLT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldCommitMessage, v)) +} + +// CommitMessageLTE applies the LTE predicate on the "commit_message" field. +func CommitMessageLTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldCommitMessage, v)) +} + +// CommitMessageContains applies the Contains predicate on the "commit_message" field. +func CommitMessageContains(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContains(FieldCommitMessage, v)) +} + +// CommitMessageHasPrefix applies the HasPrefix predicate on the "commit_message" field. +func CommitMessageHasPrefix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasPrefix(FieldCommitMessage, v)) +} + +// CommitMessageHasSuffix applies the HasSuffix predicate on the "commit_message" field. +func CommitMessageHasSuffix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasSuffix(FieldCommitMessage, v)) +} + +// CommitMessageEqualFold applies the EqualFold predicate on the "commit_message" field. +func CommitMessageEqualFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEqualFold(FieldCommitMessage, v)) +} + +// CommitMessageContainsFold applies the ContainsFold predicate on the "commit_message" field. +func CommitMessageContainsFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContainsFold(FieldCommitMessage, v)) +} + +// CommitTimestampEQ applies the EQ predicate on the "commit_timestamp" field. +func CommitTimestampEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldCommitTimestamp, v)) +} + +// CommitTimestampNEQ applies the NEQ predicate on the "commit_timestamp" field. +func CommitTimestampNEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldCommitTimestamp, v)) +} + +// CommitTimestampIn applies the In predicate on the "commit_timestamp" field. +func CommitTimestampIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldCommitTimestamp, vs...)) +} + +// CommitTimestampNotIn applies the NotIn predicate on the "commit_timestamp" field. +func CommitTimestampNotIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldCommitTimestamp, vs...)) +} + +// CommitTimestampGT applies the GT predicate on the "commit_timestamp" field. +func CommitTimestampGT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldCommitTimestamp, v)) +} + +// CommitTimestampGTE applies the GTE predicate on the "commit_timestamp" field. +func CommitTimestampGTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldCommitTimestamp, v)) +} + +// CommitTimestampLT applies the LT predicate on the "commit_timestamp" field. +func CommitTimestampLT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldCommitTimestamp, v)) +} + +// CommitTimestampLTE applies the LTE predicate on the "commit_timestamp" field. +func CommitTimestampLTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldCommitTimestamp, v)) +} + +// AuthorEQ applies the EQ predicate on the "author" field. +func AuthorEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldAuthor, v)) +} + +// AuthorNEQ applies the NEQ predicate on the "author" field. +func AuthorNEQ(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldAuthor, v)) +} + +// AuthorIn applies the In predicate on the "author" field. +func AuthorIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldAuthor, vs...)) +} + +// AuthorNotIn applies the NotIn predicate on the "author" field. +func AuthorNotIn(vs ...string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldAuthor, vs...)) +} + +// AuthorGT applies the GT predicate on the "author" field. +func AuthorGT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldAuthor, v)) +} + +// AuthorGTE applies the GTE predicate on the "author" field. +func AuthorGTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldAuthor, v)) +} + +// AuthorLT applies the LT predicate on the "author" field. +func AuthorLT(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldAuthor, v)) +} + +// AuthorLTE applies the LTE predicate on the "author" field. +func AuthorLTE(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldAuthor, v)) +} + +// AuthorContains applies the Contains predicate on the "author" field. +func AuthorContains(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContains(FieldAuthor, v)) +} + +// AuthorHasPrefix applies the HasPrefix predicate on the "author" field. +func AuthorHasPrefix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasPrefix(FieldAuthor, v)) +} + +// AuthorHasSuffix applies the HasSuffix predicate on the "author" field. +func AuthorHasSuffix(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldHasSuffix(FieldAuthor, v)) +} + +// AuthorIsNil applies the IsNil predicate on the "author" field. +func AuthorIsNil() predicate.GitCommit { + return predicate.GitCommit(sql.FieldIsNull(FieldAuthor)) +} + +// AuthorNotNil applies the NotNil predicate on the "author" field. +func AuthorNotNil() predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotNull(FieldAuthor)) +} + +// AuthorEqualFold applies the EqualFold predicate on the "author" field. +func AuthorEqualFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEqualFold(FieldAuthor, v)) +} + +// AuthorContainsFold applies the ContainsFold predicate on the "author" field. +func AuthorContainsFold(v string) predicate.GitCommit { + return predicate.GitCommit(sql.FieldContainsFold(FieldAuthor, v)) +} + +// TimestampEQ applies the EQ predicate on the "timestamp" field. +func TimestampEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldEQ(FieldTimestamp, v)) +} + +// TimestampNEQ applies the NEQ predicate on the "timestamp" field. +func TimestampNEQ(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNEQ(FieldTimestamp, v)) +} + +// TimestampIn applies the In predicate on the "timestamp" field. +func TimestampIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldIn(FieldTimestamp, vs...)) +} + +// TimestampNotIn applies the NotIn predicate on the "timestamp" field. +func TimestampNotIn(vs ...time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotIn(FieldTimestamp, vs...)) +} + +// TimestampGT applies the GT predicate on the "timestamp" field. +func TimestampGT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGT(FieldTimestamp, v)) +} + +// TimestampGTE applies the GTE predicate on the "timestamp" field. +func TimestampGTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldGTE(FieldTimestamp, v)) +} + +// TimestampLT applies the LT predicate on the "timestamp" field. +func TimestampLT(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLT(FieldTimestamp, v)) +} + +// TimestampLTE applies the LTE predicate on the "timestamp" field. +func TimestampLTE(v time.Time) predicate.GitCommit { + return predicate.GitCommit(sql.FieldLTE(FieldTimestamp, v)) +} + +// TimestampIsNil applies the IsNil predicate on the "timestamp" field. +func TimestampIsNil() predicate.GitCommit { + return predicate.GitCommit(sql.FieldIsNull(FieldTimestamp)) +} + +// TimestampNotNil applies the NotNil predicate on the "timestamp" field. +func TimestampNotNil() predicate.GitCommit { + return predicate.GitCommit(sql.FieldNotNull(FieldTimestamp)) +} + +// HasResults applies the HasEdge predicate on the "results" edge. +func HasResults() predicate.GitCommit { + return predicate.GitCommit(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ResultsTable, ResultsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasResultsWith applies the HasEdge predicate on the "results" edge with a given conditions (other predicates). +func HasResultsWith(preds ...predicate.CIWorkflowResult) predicate.GitCommit { + return predicate.GitCommit(func(s *sql.Selector) { + step := newResultsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.GitCommit) predicate.GitCommit { + return predicate.GitCommit(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.GitCommit) predicate.GitCommit { + return predicate.GitCommit(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.GitCommit) predicate.GitCommit { + return predicate.GitCommit(sql.NotPredicates(p)) +} diff --git a/ent/gitcommit_create.go b/ent/gitcommit_create.go new file mode 100644 index 0000000..6e74e43 --- /dev/null +++ b/ent/gitcommit_create.go @@ -0,0 +1,1005 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// GitCommitCreate is the builder for creating a GitCommit entity. +type GitCommitCreate struct { + config + mutation *GitCommitMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (gcc *GitCommitCreate) SetCreateTime(t time.Time) *GitCommitCreate { + gcc.mutation.SetCreateTime(t) + return gcc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (gcc *GitCommitCreate) SetNillableCreateTime(t *time.Time) *GitCommitCreate { + if t != nil { + gcc.SetCreateTime(*t) + } + return gcc +} + +// SetUpdateTime sets the "update_time" field. +func (gcc *GitCommitCreate) SetUpdateTime(t time.Time) *GitCommitCreate { + gcc.mutation.SetUpdateTime(t) + return gcc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (gcc *GitCommitCreate) SetNillableUpdateTime(t *time.Time) *GitCommitCreate { + if t != nil { + gcc.SetUpdateTime(*t) + } + return gcc +} + +// SetCommitHash sets the "commit_hash" field. +func (gcc *GitCommitCreate) SetCommitHash(s string) *GitCommitCreate { + gcc.mutation.SetCommitHash(s) + return gcc +} + +// SetBranchName sets the "branch_name" field. +func (gcc *GitCommitCreate) SetBranchName(s string) *GitCommitCreate { + gcc.mutation.SetBranchName(s) + return gcc +} + +// SetRepoName sets the "repo_name" field. +func (gcc *GitCommitCreate) SetRepoName(s string) *GitCommitCreate { + gcc.mutation.SetRepoName(s) + return gcc +} + +// SetCommitMessage sets the "commit_message" field. +func (gcc *GitCommitCreate) SetCommitMessage(s string) *GitCommitCreate { + gcc.mutation.SetCommitMessage(s) + return gcc +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (gcc *GitCommitCreate) SetCommitTimestamp(t time.Time) *GitCommitCreate { + gcc.mutation.SetCommitTimestamp(t) + return gcc +} + +// SetAuthor sets the "author" field. +func (gcc *GitCommitCreate) SetAuthor(s string) *GitCommitCreate { + gcc.mutation.SetAuthor(s) + return gcc +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (gcc *GitCommitCreate) SetNillableAuthor(s *string) *GitCommitCreate { + if s != nil { + gcc.SetAuthor(*s) + } + return gcc +} + +// SetTimestamp sets the "timestamp" field. +func (gcc *GitCommitCreate) SetTimestamp(t time.Time) *GitCommitCreate { + gcc.mutation.SetTimestamp(t) + return gcc +} + +// SetNillableTimestamp sets the "timestamp" field if the given value is not nil. +func (gcc *GitCommitCreate) SetNillableTimestamp(t *time.Time) *GitCommitCreate { + if t != nil { + gcc.SetTimestamp(*t) + } + return gcc +} + +// SetID sets the "id" field. +func (gcc *GitCommitCreate) SetID(u uuid.UUID) *GitCommitCreate { + gcc.mutation.SetID(u) + return gcc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (gcc *GitCommitCreate) SetNillableID(u *uuid.UUID) *GitCommitCreate { + if u != nil { + gcc.SetID(*u) + } + return gcc +} + +// AddResultIDs adds the "results" edge to the CIWorkflowResult entity by IDs. +func (gcc *GitCommitCreate) AddResultIDs(ids ...uuid.UUID) *GitCommitCreate { + gcc.mutation.AddResultIDs(ids...) + return gcc +} + +// AddResults adds the "results" edges to the CIWorkflowResult entity. +func (gcc *GitCommitCreate) AddResults(c ...*CIWorkflowResult) *GitCommitCreate { + ids := make([]uuid.UUID, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return gcc.AddResultIDs(ids...) +} + +// Mutation returns the GitCommitMutation object of the builder. +func (gcc *GitCommitCreate) Mutation() *GitCommitMutation { + return gcc.mutation +} + +// Save creates the GitCommit in the database. +func (gcc *GitCommitCreate) Save(ctx context.Context) (*GitCommit, error) { + gcc.defaults() + return withHooks(ctx, gcc.sqlSave, gcc.mutation, gcc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (gcc *GitCommitCreate) SaveX(ctx context.Context) *GitCommit { + v, err := gcc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (gcc *GitCommitCreate) Exec(ctx context.Context) error { + _, err := gcc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcc *GitCommitCreate) ExecX(ctx context.Context) { + if err := gcc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (gcc *GitCommitCreate) defaults() { + if _, ok := gcc.mutation.CreateTime(); !ok { + v := gitcommit.DefaultCreateTime() + gcc.mutation.SetCreateTime(v) + } + if _, ok := gcc.mutation.UpdateTime(); !ok { + v := gitcommit.DefaultUpdateTime() + gcc.mutation.SetUpdateTime(v) + } + if _, ok := gcc.mutation.ID(); !ok { + v := gitcommit.DefaultID() + gcc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (gcc *GitCommitCreate) check() error { + if _, ok := gcc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "GitCommit.create_time"`)} + } + if _, ok := gcc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "GitCommit.update_time"`)} + } + if _, ok := gcc.mutation.CommitHash(); !ok { + return &ValidationError{Name: "commit_hash", err: errors.New(`ent: missing required field "GitCommit.commit_hash"`)} + } + if _, ok := gcc.mutation.BranchName(); !ok { + return &ValidationError{Name: "branch_name", err: errors.New(`ent: missing required field "GitCommit.branch_name"`)} + } + if _, ok := gcc.mutation.RepoName(); !ok { + return &ValidationError{Name: "repo_name", err: errors.New(`ent: missing required field "GitCommit.repo_name"`)} + } + if _, ok := gcc.mutation.CommitMessage(); !ok { + return &ValidationError{Name: "commit_message", err: errors.New(`ent: missing required field "GitCommit.commit_message"`)} + } + if _, ok := gcc.mutation.CommitTimestamp(); !ok { + return &ValidationError{Name: "commit_timestamp", err: errors.New(`ent: missing required field "GitCommit.commit_timestamp"`)} + } + return nil +} + +func (gcc *GitCommitCreate) sqlSave(ctx context.Context) (*GitCommit, error) { + if err := gcc.check(); err != nil { + return nil, err + } + _node, _spec := gcc.createSpec() + if err := sqlgraph.CreateNode(ctx, gcc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + gcc.mutation.id = &_node.ID + gcc.mutation.done = true + return _node, nil +} + +func (gcc *GitCommitCreate) createSpec() (*GitCommit, *sqlgraph.CreateSpec) { + var ( + _node = &GitCommit{config: gcc.config} + _spec = sqlgraph.NewCreateSpec(gitcommit.Table, sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = gcc.conflict + if id, ok := gcc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := gcc.mutation.CreateTime(); ok { + _spec.SetField(gitcommit.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := gcc.mutation.UpdateTime(); ok { + _spec.SetField(gitcommit.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := gcc.mutation.CommitHash(); ok { + _spec.SetField(gitcommit.FieldCommitHash, field.TypeString, value) + _node.CommitHash = value + } + if value, ok := gcc.mutation.BranchName(); ok { + _spec.SetField(gitcommit.FieldBranchName, field.TypeString, value) + _node.BranchName = value + } + if value, ok := gcc.mutation.RepoName(); ok { + _spec.SetField(gitcommit.FieldRepoName, field.TypeString, value) + _node.RepoName = value + } + if value, ok := gcc.mutation.CommitMessage(); ok { + _spec.SetField(gitcommit.FieldCommitMessage, field.TypeString, value) + _node.CommitMessage = value + } + if value, ok := gcc.mutation.CommitTimestamp(); ok { + _spec.SetField(gitcommit.FieldCommitTimestamp, field.TypeTime, value) + _node.CommitTimestamp = value + } + if value, ok := gcc.mutation.Author(); ok { + _spec.SetField(gitcommit.FieldAuthor, field.TypeString, value) + _node.Author = value + } + if value, ok := gcc.mutation.Timestamp(); ok { + _spec.SetField(gitcommit.FieldTimestamp, field.TypeTime, value) + _node.Timestamp = value + } + if nodes := gcc.mutation.ResultsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.GitCommit.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GitCommitUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (gcc *GitCommitCreate) OnConflict(opts ...sql.ConflictOption) *GitCommitUpsertOne { + gcc.conflict = opts + return &GitCommitUpsertOne{ + create: gcc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (gcc *GitCommitCreate) OnConflictColumns(columns ...string) *GitCommitUpsertOne { + gcc.conflict = append(gcc.conflict, sql.ConflictColumns(columns...)) + return &GitCommitUpsertOne{ + create: gcc, + } +} + +type ( + // GitCommitUpsertOne is the builder for "upsert"-ing + // one GitCommit node. + GitCommitUpsertOne struct { + create *GitCommitCreate + } + + // GitCommitUpsert is the "OnConflict" setter. + GitCommitUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *GitCommitUpsert) SetUpdateTime(v time.Time) *GitCommitUpsert { + u.Set(gitcommit.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateUpdateTime() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldUpdateTime) + return u +} + +// SetCommitHash sets the "commit_hash" field. +func (u *GitCommitUpsert) SetCommitHash(v string) *GitCommitUpsert { + u.Set(gitcommit.FieldCommitHash, v) + return u +} + +// UpdateCommitHash sets the "commit_hash" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateCommitHash() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldCommitHash) + return u +} + +// SetBranchName sets the "branch_name" field. +func (u *GitCommitUpsert) SetBranchName(v string) *GitCommitUpsert { + u.Set(gitcommit.FieldBranchName, v) + return u +} + +// UpdateBranchName sets the "branch_name" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateBranchName() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldBranchName) + return u +} + +// SetRepoName sets the "repo_name" field. +func (u *GitCommitUpsert) SetRepoName(v string) *GitCommitUpsert { + u.Set(gitcommit.FieldRepoName, v) + return u +} + +// UpdateRepoName sets the "repo_name" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateRepoName() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldRepoName) + return u +} + +// SetCommitMessage sets the "commit_message" field. +func (u *GitCommitUpsert) SetCommitMessage(v string) *GitCommitUpsert { + u.Set(gitcommit.FieldCommitMessage, v) + return u +} + +// UpdateCommitMessage sets the "commit_message" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateCommitMessage() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldCommitMessage) + return u +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (u *GitCommitUpsert) SetCommitTimestamp(v time.Time) *GitCommitUpsert { + u.Set(gitcommit.FieldCommitTimestamp, v) + return u +} + +// UpdateCommitTimestamp sets the "commit_timestamp" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateCommitTimestamp() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldCommitTimestamp) + return u +} + +// SetAuthor sets the "author" field. +func (u *GitCommitUpsert) SetAuthor(v string) *GitCommitUpsert { + u.Set(gitcommit.FieldAuthor, v) + return u +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateAuthor() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldAuthor) + return u +} + +// ClearAuthor clears the value of the "author" field. +func (u *GitCommitUpsert) ClearAuthor() *GitCommitUpsert { + u.SetNull(gitcommit.FieldAuthor) + return u +} + +// SetTimestamp sets the "timestamp" field. +func (u *GitCommitUpsert) SetTimestamp(v time.Time) *GitCommitUpsert { + u.Set(gitcommit.FieldTimestamp, v) + return u +} + +// UpdateTimestamp sets the "timestamp" field to the value that was provided on create. +func (u *GitCommitUpsert) UpdateTimestamp() *GitCommitUpsert { + u.SetExcluded(gitcommit.FieldTimestamp) + return u +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (u *GitCommitUpsert) ClearTimestamp() *GitCommitUpsert { + u.SetNull(gitcommit.FieldTimestamp) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(gitcommit.FieldID) +// }), +// ). +// Exec(ctx) +func (u *GitCommitUpsertOne) UpdateNewValues() *GitCommitUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(gitcommit.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(gitcommit.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GitCommitUpsertOne) Ignore() *GitCommitUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GitCommitUpsertOne) DoNothing() *GitCommitUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GitCommitCreate.OnConflict +// documentation for more info. +func (u *GitCommitUpsertOne) Update(set func(*GitCommitUpsert)) *GitCommitUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GitCommitUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *GitCommitUpsertOne) SetUpdateTime(v time.Time) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateUpdateTime() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetCommitHash sets the "commit_hash" field. +func (u *GitCommitUpsertOne) SetCommitHash(v string) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitHash(v) + }) +} + +// UpdateCommitHash sets the "commit_hash" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateCommitHash() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitHash() + }) +} + +// SetBranchName sets the "branch_name" field. +func (u *GitCommitUpsertOne) SetBranchName(v string) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetBranchName(v) + }) +} + +// UpdateBranchName sets the "branch_name" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateBranchName() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateBranchName() + }) +} + +// SetRepoName sets the "repo_name" field. +func (u *GitCommitUpsertOne) SetRepoName(v string) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetRepoName(v) + }) +} + +// UpdateRepoName sets the "repo_name" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateRepoName() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateRepoName() + }) +} + +// SetCommitMessage sets the "commit_message" field. +func (u *GitCommitUpsertOne) SetCommitMessage(v string) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitMessage(v) + }) +} + +// UpdateCommitMessage sets the "commit_message" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateCommitMessage() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitMessage() + }) +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (u *GitCommitUpsertOne) SetCommitTimestamp(v time.Time) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitTimestamp(v) + }) +} + +// UpdateCommitTimestamp sets the "commit_timestamp" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateCommitTimestamp() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitTimestamp() + }) +} + +// SetAuthor sets the "author" field. +func (u *GitCommitUpsertOne) SetAuthor(v string) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetAuthor(v) + }) +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateAuthor() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateAuthor() + }) +} + +// ClearAuthor clears the value of the "author" field. +func (u *GitCommitUpsertOne) ClearAuthor() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.ClearAuthor() + }) +} + +// SetTimestamp sets the "timestamp" field. +func (u *GitCommitUpsertOne) SetTimestamp(v time.Time) *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.SetTimestamp(v) + }) +} + +// UpdateTimestamp sets the "timestamp" field to the value that was provided on create. +func (u *GitCommitUpsertOne) UpdateTimestamp() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateTimestamp() + }) +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (u *GitCommitUpsertOne) ClearTimestamp() *GitCommitUpsertOne { + return u.Update(func(s *GitCommitUpsert) { + s.ClearTimestamp() + }) +} + +// Exec executes the query. +func (u *GitCommitUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GitCommitCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GitCommitUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *GitCommitUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: GitCommitUpsertOne.ID is not supported by MySQL driver. Use GitCommitUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *GitCommitUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// GitCommitCreateBulk is the builder for creating many GitCommit entities in bulk. +type GitCommitCreateBulk struct { + config + err error + builders []*GitCommitCreate + conflict []sql.ConflictOption +} + +// Save creates the GitCommit entities in the database. +func (gccb *GitCommitCreateBulk) Save(ctx context.Context) ([]*GitCommit, error) { + if gccb.err != nil { + return nil, gccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(gccb.builders)) + nodes := make([]*GitCommit, len(gccb.builders)) + mutators := make([]Mutator, len(gccb.builders)) + for i := range gccb.builders { + func(i int, root context.Context) { + builder := gccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*GitCommitMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, gccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = gccb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, gccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, gccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (gccb *GitCommitCreateBulk) SaveX(ctx context.Context) []*GitCommit { + v, err := gccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (gccb *GitCommitCreateBulk) Exec(ctx context.Context) error { + _, err := gccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gccb *GitCommitCreateBulk) ExecX(ctx context.Context) { + if err := gccb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.GitCommit.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GitCommitUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (gccb *GitCommitCreateBulk) OnConflict(opts ...sql.ConflictOption) *GitCommitUpsertBulk { + gccb.conflict = opts + return &GitCommitUpsertBulk{ + create: gccb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (gccb *GitCommitCreateBulk) OnConflictColumns(columns ...string) *GitCommitUpsertBulk { + gccb.conflict = append(gccb.conflict, sql.ConflictColumns(columns...)) + return &GitCommitUpsertBulk{ + create: gccb, + } +} + +// GitCommitUpsertBulk is the builder for "upsert"-ing +// a bulk of GitCommit nodes. +type GitCommitUpsertBulk struct { + create *GitCommitCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(gitcommit.FieldID) +// }), +// ). +// Exec(ctx) +func (u *GitCommitUpsertBulk) UpdateNewValues() *GitCommitUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(gitcommit.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(gitcommit.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.GitCommit.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GitCommitUpsertBulk) Ignore() *GitCommitUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GitCommitUpsertBulk) DoNothing() *GitCommitUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GitCommitCreateBulk.OnConflict +// documentation for more info. +func (u *GitCommitUpsertBulk) Update(set func(*GitCommitUpsert)) *GitCommitUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GitCommitUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *GitCommitUpsertBulk) SetUpdateTime(v time.Time) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateUpdateTime() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetCommitHash sets the "commit_hash" field. +func (u *GitCommitUpsertBulk) SetCommitHash(v string) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitHash(v) + }) +} + +// UpdateCommitHash sets the "commit_hash" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateCommitHash() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitHash() + }) +} + +// SetBranchName sets the "branch_name" field. +func (u *GitCommitUpsertBulk) SetBranchName(v string) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetBranchName(v) + }) +} + +// UpdateBranchName sets the "branch_name" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateBranchName() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateBranchName() + }) +} + +// SetRepoName sets the "repo_name" field. +func (u *GitCommitUpsertBulk) SetRepoName(v string) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetRepoName(v) + }) +} + +// UpdateRepoName sets the "repo_name" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateRepoName() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateRepoName() + }) +} + +// SetCommitMessage sets the "commit_message" field. +func (u *GitCommitUpsertBulk) SetCommitMessage(v string) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitMessage(v) + }) +} + +// UpdateCommitMessage sets the "commit_message" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateCommitMessage() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitMessage() + }) +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (u *GitCommitUpsertBulk) SetCommitTimestamp(v time.Time) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetCommitTimestamp(v) + }) +} + +// UpdateCommitTimestamp sets the "commit_timestamp" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateCommitTimestamp() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateCommitTimestamp() + }) +} + +// SetAuthor sets the "author" field. +func (u *GitCommitUpsertBulk) SetAuthor(v string) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetAuthor(v) + }) +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateAuthor() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateAuthor() + }) +} + +// ClearAuthor clears the value of the "author" field. +func (u *GitCommitUpsertBulk) ClearAuthor() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.ClearAuthor() + }) +} + +// SetTimestamp sets the "timestamp" field. +func (u *GitCommitUpsertBulk) SetTimestamp(v time.Time) *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.SetTimestamp(v) + }) +} + +// UpdateTimestamp sets the "timestamp" field to the value that was provided on create. +func (u *GitCommitUpsertBulk) UpdateTimestamp() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.UpdateTimestamp() + }) +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (u *GitCommitUpsertBulk) ClearTimestamp() *GitCommitUpsertBulk { + return u.Update(func(s *GitCommitUpsert) { + s.ClearTimestamp() + }) +} + +// Exec executes the query. +func (u *GitCommitUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the GitCommitCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GitCommitCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GitCommitUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/gitcommit_delete.go b/ent/gitcommit_delete.go new file mode 100644 index 0000000..b2b821d --- /dev/null +++ b/ent/gitcommit_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/gitcommit" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// GitCommitDelete is the builder for deleting a GitCommit entity. +type GitCommitDelete struct { + config + hooks []Hook + mutation *GitCommitMutation +} + +// Where appends a list predicates to the GitCommitDelete builder. +func (gcd *GitCommitDelete) Where(ps ...predicate.GitCommit) *GitCommitDelete { + gcd.mutation.Where(ps...) + return gcd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (gcd *GitCommitDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, gcd.sqlExec, gcd.mutation, gcd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcd *GitCommitDelete) ExecX(ctx context.Context) int { + n, err := gcd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (gcd *GitCommitDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(gitcommit.Table, sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID)) + if ps := gcd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, gcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + gcd.mutation.done = true + return affected, err +} + +// GitCommitDeleteOne is the builder for deleting a single GitCommit entity. +type GitCommitDeleteOne struct { + gcd *GitCommitDelete +} + +// Where appends a list predicates to the GitCommitDelete builder. +func (gcdo *GitCommitDeleteOne) Where(ps ...predicate.GitCommit) *GitCommitDeleteOne { + gcdo.gcd.mutation.Where(ps...) + return gcdo +} + +// Exec executes the deletion query. +func (gcdo *GitCommitDeleteOne) Exec(ctx context.Context) error { + n, err := gcdo.gcd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{gitcommit.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcdo *GitCommitDeleteOne) ExecX(ctx context.Context) { + if err := gcdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/gitcommit_query.go b/ent/gitcommit_query.go new file mode 100644 index 0000000..37cac41 --- /dev/null +++ b/ent/gitcommit_query.go @@ -0,0 +1,644 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// GitCommitQuery is the builder for querying GitCommit entities. +type GitCommitQuery struct { + config + ctx *QueryContext + order []gitcommit.OrderOption + inters []Interceptor + predicates []predicate.GitCommit + withResults *CIWorkflowResultQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the GitCommitQuery builder. +func (gcq *GitCommitQuery) Where(ps ...predicate.GitCommit) *GitCommitQuery { + gcq.predicates = append(gcq.predicates, ps...) + return gcq +} + +// Limit the number of records to be returned by this query. +func (gcq *GitCommitQuery) Limit(limit int) *GitCommitQuery { + gcq.ctx.Limit = &limit + return gcq +} + +// Offset to start from. +func (gcq *GitCommitQuery) Offset(offset int) *GitCommitQuery { + gcq.ctx.Offset = &offset + return gcq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (gcq *GitCommitQuery) Unique(unique bool) *GitCommitQuery { + gcq.ctx.Unique = &unique + return gcq +} + +// Order specifies how the records should be ordered. +func (gcq *GitCommitQuery) Order(o ...gitcommit.OrderOption) *GitCommitQuery { + gcq.order = append(gcq.order, o...) + return gcq +} + +// QueryResults chains the current query on the "results" edge. +func (gcq *GitCommitQuery) QueryResults() *CIWorkflowResultQuery { + query := (&CIWorkflowResultClient{config: gcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := gcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := gcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(gitcommit.Table, gitcommit.FieldID, selector), + sqlgraph.To(ciworkflowresult.Table, ciworkflowresult.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, gitcommit.ResultsTable, gitcommit.ResultsColumn), + ) + fromU = sqlgraph.SetNeighbors(gcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first GitCommit entity from the query. +// Returns a *NotFoundError when no GitCommit was found. +func (gcq *GitCommitQuery) First(ctx context.Context) (*GitCommit, error) { + nodes, err := gcq.Limit(1).All(setContextOp(ctx, gcq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{gitcommit.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (gcq *GitCommitQuery) FirstX(ctx context.Context) *GitCommit { + node, err := gcq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first GitCommit ID from the query. +// Returns a *NotFoundError when no GitCommit ID was found. +func (gcq *GitCommitQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = gcq.Limit(1).IDs(setContextOp(ctx, gcq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{gitcommit.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (gcq *GitCommitQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := gcq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single GitCommit entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one GitCommit entity is found. +// Returns a *NotFoundError when no GitCommit entities are found. +func (gcq *GitCommitQuery) Only(ctx context.Context) (*GitCommit, error) { + nodes, err := gcq.Limit(2).All(setContextOp(ctx, gcq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{gitcommit.Label} + default: + return nil, &NotSingularError{gitcommit.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (gcq *GitCommitQuery) OnlyX(ctx context.Context) *GitCommit { + node, err := gcq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only GitCommit ID in the query. +// Returns a *NotSingularError when more than one GitCommit ID is found. +// Returns a *NotFoundError when no entities are found. +func (gcq *GitCommitQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = gcq.Limit(2).IDs(setContextOp(ctx, gcq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{gitcommit.Label} + default: + err = &NotSingularError{gitcommit.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (gcq *GitCommitQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := gcq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of GitCommits. +func (gcq *GitCommitQuery) All(ctx context.Context) ([]*GitCommit, error) { + ctx = setContextOp(ctx, gcq.ctx, "All") + if err := gcq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*GitCommit, *GitCommitQuery]() + return withInterceptors[[]*GitCommit](ctx, gcq, qr, gcq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (gcq *GitCommitQuery) AllX(ctx context.Context) []*GitCommit { + nodes, err := gcq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of GitCommit IDs. +func (gcq *GitCommitQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if gcq.ctx.Unique == nil && gcq.path != nil { + gcq.Unique(true) + } + ctx = setContextOp(ctx, gcq.ctx, "IDs") + if err = gcq.Select(gitcommit.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (gcq *GitCommitQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := gcq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (gcq *GitCommitQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, gcq.ctx, "Count") + if err := gcq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, gcq, querierCount[*GitCommitQuery](), gcq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (gcq *GitCommitQuery) CountX(ctx context.Context) int { + count, err := gcq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (gcq *GitCommitQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, gcq.ctx, "Exist") + switch _, err := gcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (gcq *GitCommitQuery) ExistX(ctx context.Context) bool { + exist, err := gcq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the GitCommitQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (gcq *GitCommitQuery) Clone() *GitCommitQuery { + if gcq == nil { + return nil + } + return &GitCommitQuery{ + config: gcq.config, + ctx: gcq.ctx.Clone(), + order: append([]gitcommit.OrderOption{}, gcq.order...), + inters: append([]Interceptor{}, gcq.inters...), + predicates: append([]predicate.GitCommit{}, gcq.predicates...), + withResults: gcq.withResults.Clone(), + // clone intermediate query. + sql: gcq.sql.Clone(), + path: gcq.path, + } +} + +// WithResults tells the query-builder to eager-load the nodes that are connected to +// the "results" edge. The optional arguments are used to configure the query builder of the edge. +func (gcq *GitCommitQuery) WithResults(opts ...func(*CIWorkflowResultQuery)) *GitCommitQuery { + query := (&CIWorkflowResultClient{config: gcq.config}).Query() + for _, opt := range opts { + opt(query) + } + gcq.withResults = query + return gcq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.GitCommit.Query(). +// GroupBy(gitcommit.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (gcq *GitCommitQuery) GroupBy(field string, fields ...string) *GitCommitGroupBy { + gcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &GitCommitGroupBy{build: gcq} + grbuild.flds = &gcq.ctx.Fields + grbuild.label = gitcommit.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.GitCommit.Query(). +// Select(gitcommit.FieldCreateTime). +// Scan(ctx, &v) +func (gcq *GitCommitQuery) Select(fields ...string) *GitCommitSelect { + gcq.ctx.Fields = append(gcq.ctx.Fields, fields...) + sbuild := &GitCommitSelect{GitCommitQuery: gcq} + sbuild.label = gitcommit.Label + sbuild.flds, sbuild.scan = &gcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a GitCommitSelect configured with the given aggregations. +func (gcq *GitCommitQuery) Aggregate(fns ...AggregateFunc) *GitCommitSelect { + return gcq.Select().Aggregate(fns...) +} + +func (gcq *GitCommitQuery) prepareQuery(ctx context.Context) error { + for _, inter := range gcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, gcq); err != nil { + return err + } + } + } + for _, f := range gcq.ctx.Fields { + if !gitcommit.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if gcq.path != nil { + prev, err := gcq.path(ctx) + if err != nil { + return err + } + gcq.sql = prev + } + return nil +} + +func (gcq *GitCommitQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*GitCommit, error) { + var ( + nodes = []*GitCommit{} + _spec = gcq.querySpec() + loadedTypes = [1]bool{ + gcq.withResults != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*GitCommit).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &GitCommit{config: gcq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(gcq.modifiers) > 0 { + _spec.Modifiers = gcq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, gcq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := gcq.withResults; query != nil { + if err := gcq.loadResults(ctx, query, nodes, + func(n *GitCommit) { n.Edges.Results = []*CIWorkflowResult{} }, + func(n *GitCommit, e *CIWorkflowResult) { n.Edges.Results = append(n.Edges.Results, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (gcq *GitCommitQuery) loadResults(ctx context.Context, query *CIWorkflowResultQuery, nodes []*GitCommit, init func(*GitCommit), assign func(*GitCommit, *CIWorkflowResult)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*GitCommit) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.CIWorkflowResult(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(gitcommit.ResultsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.git_commit_results + if fk == nil { + return fmt.Errorf(`foreign-key "git_commit_results" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "git_commit_results" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (gcq *GitCommitQuery) sqlCount(ctx context.Context) (int, error) { + _spec := gcq.querySpec() + if len(gcq.modifiers) > 0 { + _spec.Modifiers = gcq.modifiers + } + _spec.Node.Columns = gcq.ctx.Fields + if len(gcq.ctx.Fields) > 0 { + _spec.Unique = gcq.ctx.Unique != nil && *gcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, gcq.driver, _spec) +} + +func (gcq *GitCommitQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(gitcommit.Table, gitcommit.Columns, sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID)) + _spec.From = gcq.sql + if unique := gcq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if gcq.path != nil { + _spec.Unique = true + } + if fields := gcq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, gitcommit.FieldID) + for i := range fields { + if fields[i] != gitcommit.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := gcq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := gcq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := gcq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := gcq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (gcq *GitCommitQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(gcq.driver.Dialect()) + t1 := builder.Table(gitcommit.Table) + columns := gcq.ctx.Fields + if len(columns) == 0 { + columns = gitcommit.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if gcq.sql != nil { + selector = gcq.sql + selector.Select(selector.Columns(columns...)...) + } + if gcq.ctx.Unique != nil && *gcq.ctx.Unique { + selector.Distinct() + } + for _, m := range gcq.modifiers { + m(selector) + } + for _, p := range gcq.predicates { + p(selector) + } + for _, p := range gcq.order { + p(selector) + } + if offset := gcq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := gcq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (gcq *GitCommitQuery) ForUpdate(opts ...sql.LockOption) *GitCommitQuery { + if gcq.driver.Dialect() == dialect.Postgres { + gcq.Unique(false) + } + gcq.modifiers = append(gcq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return gcq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (gcq *GitCommitQuery) ForShare(opts ...sql.LockOption) *GitCommitQuery { + if gcq.driver.Dialect() == dialect.Postgres { + gcq.Unique(false) + } + gcq.modifiers = append(gcq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return gcq +} + +// GitCommitGroupBy is the group-by builder for GitCommit entities. +type GitCommitGroupBy struct { + selector + build *GitCommitQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (gcgb *GitCommitGroupBy) Aggregate(fns ...AggregateFunc) *GitCommitGroupBy { + gcgb.fns = append(gcgb.fns, fns...) + return gcgb +} + +// Scan applies the selector query and scans the result into the given value. +func (gcgb *GitCommitGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, gcgb.build.ctx, "GroupBy") + if err := gcgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GitCommitQuery, *GitCommitGroupBy](ctx, gcgb.build, gcgb, gcgb.build.inters, v) +} + +func (gcgb *GitCommitGroupBy) sqlScan(ctx context.Context, root *GitCommitQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(gcgb.fns)) + for _, fn := range gcgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*gcgb.flds)+len(gcgb.fns)) + for _, f := range *gcgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*gcgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := gcgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// GitCommitSelect is the builder for selecting fields of GitCommit entities. +type GitCommitSelect struct { + *GitCommitQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (gcs *GitCommitSelect) Aggregate(fns ...AggregateFunc) *GitCommitSelect { + gcs.fns = append(gcs.fns, fns...) + return gcs +} + +// Scan applies the selector query and scans the result into the given value. +func (gcs *GitCommitSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, gcs.ctx, "Select") + if err := gcs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GitCommitQuery, *GitCommitSelect](ctx, gcs.GitCommitQuery, gcs, gcs.inters, v) +} + +func (gcs *GitCommitSelect) sqlScan(ctx context.Context, root *GitCommitQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(gcs.fns)) + for _, fn := range gcs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*gcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := gcs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/gitcommit_update.go b/ent/gitcommit_update.go new file mode 100644 index 0000000..bb347be --- /dev/null +++ b/ent/gitcommit_update.go @@ -0,0 +1,650 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// GitCommitUpdate is the builder for updating GitCommit entities. +type GitCommitUpdate struct { + config + hooks []Hook + mutation *GitCommitMutation +} + +// Where appends a list predicates to the GitCommitUpdate builder. +func (gcu *GitCommitUpdate) Where(ps ...predicate.GitCommit) *GitCommitUpdate { + gcu.mutation.Where(ps...) + return gcu +} + +// SetUpdateTime sets the "update_time" field. +func (gcu *GitCommitUpdate) SetUpdateTime(t time.Time) *GitCommitUpdate { + gcu.mutation.SetUpdateTime(t) + return gcu +} + +// SetCommitHash sets the "commit_hash" field. +func (gcu *GitCommitUpdate) SetCommitHash(s string) *GitCommitUpdate { + gcu.mutation.SetCommitHash(s) + return gcu +} + +// SetNillableCommitHash sets the "commit_hash" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableCommitHash(s *string) *GitCommitUpdate { + if s != nil { + gcu.SetCommitHash(*s) + } + return gcu +} + +// SetBranchName sets the "branch_name" field. +func (gcu *GitCommitUpdate) SetBranchName(s string) *GitCommitUpdate { + gcu.mutation.SetBranchName(s) + return gcu +} + +// SetNillableBranchName sets the "branch_name" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableBranchName(s *string) *GitCommitUpdate { + if s != nil { + gcu.SetBranchName(*s) + } + return gcu +} + +// SetRepoName sets the "repo_name" field. +func (gcu *GitCommitUpdate) SetRepoName(s string) *GitCommitUpdate { + gcu.mutation.SetRepoName(s) + return gcu +} + +// SetNillableRepoName sets the "repo_name" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableRepoName(s *string) *GitCommitUpdate { + if s != nil { + gcu.SetRepoName(*s) + } + return gcu +} + +// SetCommitMessage sets the "commit_message" field. +func (gcu *GitCommitUpdate) SetCommitMessage(s string) *GitCommitUpdate { + gcu.mutation.SetCommitMessage(s) + return gcu +} + +// SetNillableCommitMessage sets the "commit_message" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableCommitMessage(s *string) *GitCommitUpdate { + if s != nil { + gcu.SetCommitMessage(*s) + } + return gcu +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (gcu *GitCommitUpdate) SetCommitTimestamp(t time.Time) *GitCommitUpdate { + gcu.mutation.SetCommitTimestamp(t) + return gcu +} + +// SetNillableCommitTimestamp sets the "commit_timestamp" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableCommitTimestamp(t *time.Time) *GitCommitUpdate { + if t != nil { + gcu.SetCommitTimestamp(*t) + } + return gcu +} + +// SetAuthor sets the "author" field. +func (gcu *GitCommitUpdate) SetAuthor(s string) *GitCommitUpdate { + gcu.mutation.SetAuthor(s) + return gcu +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableAuthor(s *string) *GitCommitUpdate { + if s != nil { + gcu.SetAuthor(*s) + } + return gcu +} + +// ClearAuthor clears the value of the "author" field. +func (gcu *GitCommitUpdate) ClearAuthor() *GitCommitUpdate { + gcu.mutation.ClearAuthor() + return gcu +} + +// SetTimestamp sets the "timestamp" field. +func (gcu *GitCommitUpdate) SetTimestamp(t time.Time) *GitCommitUpdate { + gcu.mutation.SetTimestamp(t) + return gcu +} + +// SetNillableTimestamp sets the "timestamp" field if the given value is not nil. +func (gcu *GitCommitUpdate) SetNillableTimestamp(t *time.Time) *GitCommitUpdate { + if t != nil { + gcu.SetTimestamp(*t) + } + return gcu +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (gcu *GitCommitUpdate) ClearTimestamp() *GitCommitUpdate { + gcu.mutation.ClearTimestamp() + return gcu +} + +// AddResultIDs adds the "results" edge to the CIWorkflowResult entity by IDs. +func (gcu *GitCommitUpdate) AddResultIDs(ids ...uuid.UUID) *GitCommitUpdate { + gcu.mutation.AddResultIDs(ids...) + return gcu +} + +// AddResults adds the "results" edges to the CIWorkflowResult entity. +func (gcu *GitCommitUpdate) AddResults(c ...*CIWorkflowResult) *GitCommitUpdate { + ids := make([]uuid.UUID, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return gcu.AddResultIDs(ids...) +} + +// Mutation returns the GitCommitMutation object of the builder. +func (gcu *GitCommitUpdate) Mutation() *GitCommitMutation { + return gcu.mutation +} + +// ClearResults clears all "results" edges to the CIWorkflowResult entity. +func (gcu *GitCommitUpdate) ClearResults() *GitCommitUpdate { + gcu.mutation.ClearResults() + return gcu +} + +// RemoveResultIDs removes the "results" edge to CIWorkflowResult entities by IDs. +func (gcu *GitCommitUpdate) RemoveResultIDs(ids ...uuid.UUID) *GitCommitUpdate { + gcu.mutation.RemoveResultIDs(ids...) + return gcu +} + +// RemoveResults removes "results" edges to CIWorkflowResult entities. +func (gcu *GitCommitUpdate) RemoveResults(c ...*CIWorkflowResult) *GitCommitUpdate { + ids := make([]uuid.UUID, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return gcu.RemoveResultIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (gcu *GitCommitUpdate) Save(ctx context.Context) (int, error) { + gcu.defaults() + return withHooks(ctx, gcu.sqlSave, gcu.mutation, gcu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (gcu *GitCommitUpdate) SaveX(ctx context.Context) int { + affected, err := gcu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (gcu *GitCommitUpdate) Exec(ctx context.Context) error { + _, err := gcu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcu *GitCommitUpdate) ExecX(ctx context.Context) { + if err := gcu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (gcu *GitCommitUpdate) defaults() { + if _, ok := gcu.mutation.UpdateTime(); !ok { + v := gitcommit.UpdateDefaultUpdateTime() + gcu.mutation.SetUpdateTime(v) + } +} + +func (gcu *GitCommitUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(gitcommit.Table, gitcommit.Columns, sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID)) + if ps := gcu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := gcu.mutation.UpdateTime(); ok { + _spec.SetField(gitcommit.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := gcu.mutation.CommitHash(); ok { + _spec.SetField(gitcommit.FieldCommitHash, field.TypeString, value) + } + if value, ok := gcu.mutation.BranchName(); ok { + _spec.SetField(gitcommit.FieldBranchName, field.TypeString, value) + } + if value, ok := gcu.mutation.RepoName(); ok { + _spec.SetField(gitcommit.FieldRepoName, field.TypeString, value) + } + if value, ok := gcu.mutation.CommitMessage(); ok { + _spec.SetField(gitcommit.FieldCommitMessage, field.TypeString, value) + } + if value, ok := gcu.mutation.CommitTimestamp(); ok { + _spec.SetField(gitcommit.FieldCommitTimestamp, field.TypeTime, value) + } + if value, ok := gcu.mutation.Author(); ok { + _spec.SetField(gitcommit.FieldAuthor, field.TypeString, value) + } + if gcu.mutation.AuthorCleared() { + _spec.ClearField(gitcommit.FieldAuthor, field.TypeString) + } + if value, ok := gcu.mutation.Timestamp(); ok { + _spec.SetField(gitcommit.FieldTimestamp, field.TypeTime, value) + } + if gcu.mutation.TimestampCleared() { + _spec.ClearField(gitcommit.FieldTimestamp, field.TypeTime) + } + if gcu.mutation.ResultsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gcu.mutation.RemovedResultsIDs(); len(nodes) > 0 && !gcu.mutation.ResultsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gcu.mutation.ResultsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, gcu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{gitcommit.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + gcu.mutation.done = true + return n, nil +} + +// GitCommitUpdateOne is the builder for updating a single GitCommit entity. +type GitCommitUpdateOne struct { + config + fields []string + hooks []Hook + mutation *GitCommitMutation +} + +// SetUpdateTime sets the "update_time" field. +func (gcuo *GitCommitUpdateOne) SetUpdateTime(t time.Time) *GitCommitUpdateOne { + gcuo.mutation.SetUpdateTime(t) + return gcuo +} + +// SetCommitHash sets the "commit_hash" field. +func (gcuo *GitCommitUpdateOne) SetCommitHash(s string) *GitCommitUpdateOne { + gcuo.mutation.SetCommitHash(s) + return gcuo +} + +// SetNillableCommitHash sets the "commit_hash" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableCommitHash(s *string) *GitCommitUpdateOne { + if s != nil { + gcuo.SetCommitHash(*s) + } + return gcuo +} + +// SetBranchName sets the "branch_name" field. +func (gcuo *GitCommitUpdateOne) SetBranchName(s string) *GitCommitUpdateOne { + gcuo.mutation.SetBranchName(s) + return gcuo +} + +// SetNillableBranchName sets the "branch_name" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableBranchName(s *string) *GitCommitUpdateOne { + if s != nil { + gcuo.SetBranchName(*s) + } + return gcuo +} + +// SetRepoName sets the "repo_name" field. +func (gcuo *GitCommitUpdateOne) SetRepoName(s string) *GitCommitUpdateOne { + gcuo.mutation.SetRepoName(s) + return gcuo +} + +// SetNillableRepoName sets the "repo_name" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableRepoName(s *string) *GitCommitUpdateOne { + if s != nil { + gcuo.SetRepoName(*s) + } + return gcuo +} + +// SetCommitMessage sets the "commit_message" field. +func (gcuo *GitCommitUpdateOne) SetCommitMessage(s string) *GitCommitUpdateOne { + gcuo.mutation.SetCommitMessage(s) + return gcuo +} + +// SetNillableCommitMessage sets the "commit_message" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableCommitMessage(s *string) *GitCommitUpdateOne { + if s != nil { + gcuo.SetCommitMessage(*s) + } + return gcuo +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (gcuo *GitCommitUpdateOne) SetCommitTimestamp(t time.Time) *GitCommitUpdateOne { + gcuo.mutation.SetCommitTimestamp(t) + return gcuo +} + +// SetNillableCommitTimestamp sets the "commit_timestamp" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableCommitTimestamp(t *time.Time) *GitCommitUpdateOne { + if t != nil { + gcuo.SetCommitTimestamp(*t) + } + return gcuo +} + +// SetAuthor sets the "author" field. +func (gcuo *GitCommitUpdateOne) SetAuthor(s string) *GitCommitUpdateOne { + gcuo.mutation.SetAuthor(s) + return gcuo +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableAuthor(s *string) *GitCommitUpdateOne { + if s != nil { + gcuo.SetAuthor(*s) + } + return gcuo +} + +// ClearAuthor clears the value of the "author" field. +func (gcuo *GitCommitUpdateOne) ClearAuthor() *GitCommitUpdateOne { + gcuo.mutation.ClearAuthor() + return gcuo +} + +// SetTimestamp sets the "timestamp" field. +func (gcuo *GitCommitUpdateOne) SetTimestamp(t time.Time) *GitCommitUpdateOne { + gcuo.mutation.SetTimestamp(t) + return gcuo +} + +// SetNillableTimestamp sets the "timestamp" field if the given value is not nil. +func (gcuo *GitCommitUpdateOne) SetNillableTimestamp(t *time.Time) *GitCommitUpdateOne { + if t != nil { + gcuo.SetTimestamp(*t) + } + return gcuo +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (gcuo *GitCommitUpdateOne) ClearTimestamp() *GitCommitUpdateOne { + gcuo.mutation.ClearTimestamp() + return gcuo +} + +// AddResultIDs adds the "results" edge to the CIWorkflowResult entity by IDs. +func (gcuo *GitCommitUpdateOne) AddResultIDs(ids ...uuid.UUID) *GitCommitUpdateOne { + gcuo.mutation.AddResultIDs(ids...) + return gcuo +} + +// AddResults adds the "results" edges to the CIWorkflowResult entity. +func (gcuo *GitCommitUpdateOne) AddResults(c ...*CIWorkflowResult) *GitCommitUpdateOne { + ids := make([]uuid.UUID, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return gcuo.AddResultIDs(ids...) +} + +// Mutation returns the GitCommitMutation object of the builder. +func (gcuo *GitCommitUpdateOne) Mutation() *GitCommitMutation { + return gcuo.mutation +} + +// ClearResults clears all "results" edges to the CIWorkflowResult entity. +func (gcuo *GitCommitUpdateOne) ClearResults() *GitCommitUpdateOne { + gcuo.mutation.ClearResults() + return gcuo +} + +// RemoveResultIDs removes the "results" edge to CIWorkflowResult entities by IDs. +func (gcuo *GitCommitUpdateOne) RemoveResultIDs(ids ...uuid.UUID) *GitCommitUpdateOne { + gcuo.mutation.RemoveResultIDs(ids...) + return gcuo +} + +// RemoveResults removes "results" edges to CIWorkflowResult entities. +func (gcuo *GitCommitUpdateOne) RemoveResults(c ...*CIWorkflowResult) *GitCommitUpdateOne { + ids := make([]uuid.UUID, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return gcuo.RemoveResultIDs(ids...) +} + +// Where appends a list predicates to the GitCommitUpdate builder. +func (gcuo *GitCommitUpdateOne) Where(ps ...predicate.GitCommit) *GitCommitUpdateOne { + gcuo.mutation.Where(ps...) + return gcuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (gcuo *GitCommitUpdateOne) Select(field string, fields ...string) *GitCommitUpdateOne { + gcuo.fields = append([]string{field}, fields...) + return gcuo +} + +// Save executes the query and returns the updated GitCommit entity. +func (gcuo *GitCommitUpdateOne) Save(ctx context.Context) (*GitCommit, error) { + gcuo.defaults() + return withHooks(ctx, gcuo.sqlSave, gcuo.mutation, gcuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (gcuo *GitCommitUpdateOne) SaveX(ctx context.Context) *GitCommit { + node, err := gcuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (gcuo *GitCommitUpdateOne) Exec(ctx context.Context) error { + _, err := gcuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gcuo *GitCommitUpdateOne) ExecX(ctx context.Context) { + if err := gcuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (gcuo *GitCommitUpdateOne) defaults() { + if _, ok := gcuo.mutation.UpdateTime(); !ok { + v := gitcommit.UpdateDefaultUpdateTime() + gcuo.mutation.SetUpdateTime(v) + } +} + +func (gcuo *GitCommitUpdateOne) sqlSave(ctx context.Context) (_node *GitCommit, err error) { + _spec := sqlgraph.NewUpdateSpec(gitcommit.Table, gitcommit.Columns, sqlgraph.NewFieldSpec(gitcommit.FieldID, field.TypeUUID)) + id, ok := gcuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "GitCommit.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := gcuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, gitcommit.FieldID) + for _, f := range fields { + if !gitcommit.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != gitcommit.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := gcuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := gcuo.mutation.UpdateTime(); ok { + _spec.SetField(gitcommit.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := gcuo.mutation.CommitHash(); ok { + _spec.SetField(gitcommit.FieldCommitHash, field.TypeString, value) + } + if value, ok := gcuo.mutation.BranchName(); ok { + _spec.SetField(gitcommit.FieldBranchName, field.TypeString, value) + } + if value, ok := gcuo.mutation.RepoName(); ok { + _spec.SetField(gitcommit.FieldRepoName, field.TypeString, value) + } + if value, ok := gcuo.mutation.CommitMessage(); ok { + _spec.SetField(gitcommit.FieldCommitMessage, field.TypeString, value) + } + if value, ok := gcuo.mutation.CommitTimestamp(); ok { + _spec.SetField(gitcommit.FieldCommitTimestamp, field.TypeTime, value) + } + if value, ok := gcuo.mutation.Author(); ok { + _spec.SetField(gitcommit.FieldAuthor, field.TypeString, value) + } + if gcuo.mutation.AuthorCleared() { + _spec.ClearField(gitcommit.FieldAuthor, field.TypeString) + } + if value, ok := gcuo.mutation.Timestamp(); ok { + _spec.SetField(gitcommit.FieldTimestamp, field.TypeTime, value) + } + if gcuo.mutation.TimestampCleared() { + _spec.ClearField(gitcommit.FieldTimestamp, field.TypeTime) + } + if gcuo.mutation.ResultsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gcuo.mutation.RemovedResultsIDs(); len(nodes) > 0 && !gcuo.mutation.ResultsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gcuo.mutation.ResultsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: gitcommit.ResultsTable, + Columns: []string{gitcommit.ResultsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(ciworkflowresult.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &GitCommit{config: gcuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, gcuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{gitcommit.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + gcuo.mutation.done = true + return _node, nil +} diff --git a/ent/hook/hook.go b/ent/hook/hook.go new file mode 100644 index 0000000..fda7f47 --- /dev/null +++ b/ent/hook/hook.go @@ -0,0 +1,294 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + "registry-backend/ent" +) + +// The CIWorkflowResultFunc type is an adapter to allow the use of ordinary +// function as CIWorkflowResult mutator. +type CIWorkflowResultFunc func(context.Context, *ent.CIWorkflowResultMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f CIWorkflowResultFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.CIWorkflowResultMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CIWorkflowResultMutation", m) +} + +// The GitCommitFunc type is an adapter to allow the use of ordinary +// function as GitCommit mutator. +type GitCommitFunc func(context.Context, *ent.GitCommitMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f GitCommitFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.GitCommitMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GitCommitMutation", m) +} + +// The NodeFunc type is an adapter to allow the use of ordinary +// function as Node mutator. +type NodeFunc func(context.Context, *ent.NodeMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f NodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.NodeMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NodeMutation", m) +} + +// The NodeVersionFunc type is an adapter to allow the use of ordinary +// function as NodeVersion mutator. +type NodeVersionFunc func(context.Context, *ent.NodeVersionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f NodeVersionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.NodeVersionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NodeVersionMutation", m) +} + +// The PersonalAccessTokenFunc type is an adapter to allow the use of ordinary +// function as PersonalAccessToken mutator. +type PersonalAccessTokenFunc func(context.Context, *ent.PersonalAccessTokenMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PersonalAccessTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PersonalAccessTokenMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PersonalAccessTokenMutation", m) +} + +// The PublisherFunc type is an adapter to allow the use of ordinary +// function as Publisher mutator. +type PublisherFunc func(context.Context, *ent.PublisherMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PublisherFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PublisherMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PublisherMutation", m) +} + +// The PublisherPermissionFunc type is an adapter to allow the use of ordinary +// function as PublisherPermission mutator. +type PublisherPermissionFunc func(context.Context, *ent.PublisherPermissionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PublisherPermissionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PublisherPermissionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PublisherPermissionMutation", m) +} + +// The StorageFileFunc type is an adapter to allow the use of ordinary +// function as StorageFile mutator. +type StorageFileFunc func(context.Context, *ent.StorageFileMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f StorageFileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.StorageFileMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.StorageFileMutation", m) +} + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/ent/migrate/migrate.go b/ent/migrate/migrate.go new file mode 100644 index 0000000..1956a6b --- /dev/null +++ b/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/ent/migrate/schema.go b/ent/migrate/schema.go new file mode 100644 index 0000000..87c9449 --- /dev/null +++ b/ent/migrate/schema.go @@ -0,0 +1,272 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // CiWorkflowResultsColumns holds the columns for the "ci_workflow_results" table. + CiWorkflowResultsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID, Unique: true}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "operating_system", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "gpu_type", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "pytorch_version", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "workflow_name", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "run_id", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "status", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "start_time", Type: field.TypeInt64, Nullable: true}, + {Name: "end_time", Type: field.TypeInt64, Nullable: true}, + {Name: "ci_workflow_result_storage_file", Type: field.TypeUUID, Nullable: true}, + {Name: "git_commit_results", Type: field.TypeUUID, Nullable: true}, + } + // CiWorkflowResultsTable holds the schema information for the "ci_workflow_results" table. + CiWorkflowResultsTable = &schema.Table{ + Name: "ci_workflow_results", + Columns: CiWorkflowResultsColumns, + PrimaryKey: []*schema.Column{CiWorkflowResultsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "ci_workflow_results_storage_files_storage_file", + Columns: []*schema.Column{CiWorkflowResultsColumns[11]}, + RefColumns: []*schema.Column{StorageFilesColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "ci_workflow_results_git_commits_results", + Columns: []*schema.Column{CiWorkflowResultsColumns[12]}, + RefColumns: []*schema.Column{GitCommitsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // GitCommitsColumns holds the columns for the "git_commits" table. + GitCommitsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "commit_hash", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "branch_name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "repo_name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "commit_message", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "commit_timestamp", Type: field.TypeTime}, + {Name: "author", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "timestamp", Type: field.TypeTime, Nullable: true}, + } + // GitCommitsTable holds the schema information for the "git_commits" table. + GitCommitsTable = &schema.Table{ + Name: "git_commits", + Columns: GitCommitsColumns, + PrimaryKey: []*schema.Column{GitCommitsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "gitcommit_repo_name_commit_hash", + Unique: true, + Columns: []*schema.Column{GitCommitsColumns[5], GitCommitsColumns[3]}, + }, + }, + } + // NodesColumns holds the columns for the "nodes" table. + NodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "description", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "author", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "license", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "repository_url", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "icon_url", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "tags", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "publisher_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + } + // NodesTable holds the schema information for the "nodes" table. + NodesTable = &schema.Table{ + Name: "nodes", + Columns: NodesColumns, + PrimaryKey: []*schema.Column{NodesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "nodes_publishers_nodes", + Columns: []*schema.Column{NodesColumns[10]}, + RefColumns: []*schema.Column{PublishersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + } + // NodeVersionsColumns holds the columns for the "node_versions" table. + NodeVersionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "version", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "changelog", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "pip_dependencies", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "deprecated", Type: field.TypeBool, Default: false}, + {Name: "node_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "node_version_storage_file", Type: field.TypeUUID, Nullable: true}, + } + // NodeVersionsTable holds the schema information for the "node_versions" table. + NodeVersionsTable = &schema.Table{ + Name: "node_versions", + Columns: NodeVersionsColumns, + PrimaryKey: []*schema.Column{NodeVersionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "node_versions_nodes_versions", + Columns: []*schema.Column{NodeVersionsColumns[7]}, + RefColumns: []*schema.Column{NodesColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "node_versions_storage_files_storage_file", + Columns: []*schema.Column{NodeVersionsColumns[8]}, + RefColumns: []*schema.Column{StorageFilesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "nodeversion_node_id_version", + Unique: true, + Columns: []*schema.Column{NodeVersionsColumns[7], NodeVersionsColumns[3]}, + }, + }, + } + // PersonalAccessTokensColumns holds the columns for the "personal_access_tokens" table. + PersonalAccessTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "description", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "token", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "publisher_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + } + // PersonalAccessTokensTable holds the schema information for the "personal_access_tokens" table. + PersonalAccessTokensTable = &schema.Table{ + Name: "personal_access_tokens", + Columns: PersonalAccessTokensColumns, + PrimaryKey: []*schema.Column{PersonalAccessTokensColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "personal_access_tokens_publishers_personal_access_tokens", + Columns: []*schema.Column{PersonalAccessTokensColumns[6]}, + RefColumns: []*schema.Column{PublishersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "personalaccesstoken_token", + Unique: true, + Columns: []*schema.Column{PersonalAccessTokensColumns[5]}, + }, + }, + } + // PublishersColumns holds the columns for the "publishers" table. + PublishersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "description", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "website", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "support_email", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "source_code_repo", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "logo_url", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + } + // PublishersTable holds the schema information for the "publishers" table. + PublishersTable = &schema.Table{ + Name: "publishers", + Columns: PublishersColumns, + PrimaryKey: []*schema.Column{PublishersColumns[0]}, + } + // PublisherPermissionsColumns holds the columns for the "publisher_permissions" table. + PublisherPermissionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "permission", Type: field.TypeEnum, Enums: []string{"owner", "member"}}, + {Name: "publisher_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "user_id", Type: field.TypeString}, + } + // PublisherPermissionsTable holds the schema information for the "publisher_permissions" table. + PublisherPermissionsTable = &schema.Table{ + Name: "publisher_permissions", + Columns: PublisherPermissionsColumns, + PrimaryKey: []*schema.Column{PublisherPermissionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "publisher_permissions_publishers_publisher_permissions", + Columns: []*schema.Column{PublisherPermissionsColumns[2]}, + RefColumns: []*schema.Column{PublishersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "publisher_permissions_users_publisher_permissions", + Columns: []*schema.Column{PublisherPermissionsColumns[3]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + } + // StorageFilesColumns holds the columns for the "storage_files" table. + StorageFilesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID, Unique: true}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "bucket_name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "object_name", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "file_path", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "file_type", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "file_url", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + } + // StorageFilesTable holds the schema information for the "storage_files" table. + StorageFilesTable = &schema.Table{ + Name: "storage_files", + Columns: StorageFilesColumns, + PrimaryKey: []*schema.Column{StorageFilesColumns[0]}, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString}, + {Name: "create_time", Type: field.TypeTime}, + {Name: "update_time", Type: field.TypeTime}, + {Name: "email", Type: field.TypeString, Nullable: true}, + {Name: "name", Type: field.TypeString, Nullable: true}, + {Name: "is_approved", Type: field.TypeBool, Default: false}, + {Name: "is_admin", Type: field.TypeBool, Default: false}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + CiWorkflowResultsTable, + GitCommitsTable, + NodesTable, + NodeVersionsTable, + PersonalAccessTokensTable, + PublishersTable, + PublisherPermissionsTable, + StorageFilesTable, + UsersTable, + } +) + +func init() { + CiWorkflowResultsTable.ForeignKeys[0].RefTable = StorageFilesTable + CiWorkflowResultsTable.ForeignKeys[1].RefTable = GitCommitsTable + NodesTable.ForeignKeys[0].RefTable = PublishersTable + NodeVersionsTable.ForeignKeys[0].RefTable = NodesTable + NodeVersionsTable.ForeignKeys[1].RefTable = StorageFilesTable + PersonalAccessTokensTable.ForeignKeys[0].RefTable = PublishersTable + PublisherPermissionsTable.ForeignKeys[0].RefTable = PublishersTable + PublisherPermissionsTable.ForeignKeys[1].RefTable = UsersTable +} diff --git a/ent/mutation.go b/ent/mutation.go new file mode 100644 index 0000000..8a60a91 --- /dev/null +++ b/ent/mutation.go @@ -0,0 +1,7625 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/schema" + "registry-backend/ent/storagefile" + "registry-backend/ent/user" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeCIWorkflowResult = "CIWorkflowResult" + TypeGitCommit = "GitCommit" + TypeNode = "Node" + TypeNodeVersion = "NodeVersion" + TypePersonalAccessToken = "PersonalAccessToken" + TypePublisher = "Publisher" + TypePublisherPermission = "PublisherPermission" + TypeStorageFile = "StorageFile" + TypeUser = "User" +) + +// CIWorkflowResultMutation represents an operation that mutates the CIWorkflowResult nodes in the graph. +type CIWorkflowResultMutation struct { + config + op Op + typ string + id *uuid.UUID + create_time *time.Time + update_time *time.Time + operating_system *string + gpu_type *string + pytorch_version *string + workflow_name *string + run_id *string + status *string + start_time *int64 + addstart_time *int64 + end_time *int64 + addend_time *int64 + clearedFields map[string]struct{} + gitcommit *uuid.UUID + clearedgitcommit bool + storage_file *uuid.UUID + clearedstorage_file bool + done bool + oldValue func(context.Context) (*CIWorkflowResult, error) + predicates []predicate.CIWorkflowResult +} + +var _ ent.Mutation = (*CIWorkflowResultMutation)(nil) + +// ciworkflowresultOption allows management of the mutation configuration using functional options. +type ciworkflowresultOption func(*CIWorkflowResultMutation) + +// newCIWorkflowResultMutation creates new mutation for the CIWorkflowResult entity. +func newCIWorkflowResultMutation(c config, op Op, opts ...ciworkflowresultOption) *CIWorkflowResultMutation { + m := &CIWorkflowResultMutation{ + config: c, + op: op, + typ: TypeCIWorkflowResult, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withCIWorkflowResultID sets the ID field of the mutation. +func withCIWorkflowResultID(id uuid.UUID) ciworkflowresultOption { + return func(m *CIWorkflowResultMutation) { + var ( + err error + once sync.Once + value *CIWorkflowResult + ) + m.oldValue = func(ctx context.Context) (*CIWorkflowResult, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().CIWorkflowResult.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withCIWorkflowResult sets the old CIWorkflowResult of the mutation. +func withCIWorkflowResult(node *CIWorkflowResult) ciworkflowresultOption { + return func(m *CIWorkflowResultMutation) { + m.oldValue = func(context.Context) (*CIWorkflowResult, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m CIWorkflowResultMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m CIWorkflowResultMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of CIWorkflowResult entities. +func (m *CIWorkflowResultMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *CIWorkflowResultMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *CIWorkflowResultMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().CIWorkflowResult.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *CIWorkflowResultMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *CIWorkflowResultMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *CIWorkflowResultMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *CIWorkflowResultMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *CIWorkflowResultMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *CIWorkflowResultMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetOperatingSystem sets the "operating_system" field. +func (m *CIWorkflowResultMutation) SetOperatingSystem(s string) { + m.operating_system = &s +} + +// OperatingSystem returns the value of the "operating_system" field in the mutation. +func (m *CIWorkflowResultMutation) OperatingSystem() (r string, exists bool) { + v := m.operating_system + if v == nil { + return + } + return *v, true +} + +// OldOperatingSystem returns the old "operating_system" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldOperatingSystem(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOperatingSystem is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOperatingSystem requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOperatingSystem: %w", err) + } + return oldValue.OperatingSystem, nil +} + +// ResetOperatingSystem resets all changes to the "operating_system" field. +func (m *CIWorkflowResultMutation) ResetOperatingSystem() { + m.operating_system = nil +} + +// SetGpuType sets the "gpu_type" field. +func (m *CIWorkflowResultMutation) SetGpuType(s string) { + m.gpu_type = &s +} + +// GpuType returns the value of the "gpu_type" field in the mutation. +func (m *CIWorkflowResultMutation) GpuType() (r string, exists bool) { + v := m.gpu_type + if v == nil { + return + } + return *v, true +} + +// OldGpuType returns the old "gpu_type" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldGpuType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGpuType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGpuType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGpuType: %w", err) + } + return oldValue.GpuType, nil +} + +// ClearGpuType clears the value of the "gpu_type" field. +func (m *CIWorkflowResultMutation) ClearGpuType() { + m.gpu_type = nil + m.clearedFields[ciworkflowresult.FieldGpuType] = struct{}{} +} + +// GpuTypeCleared returns if the "gpu_type" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) GpuTypeCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldGpuType] + return ok +} + +// ResetGpuType resets all changes to the "gpu_type" field. +func (m *CIWorkflowResultMutation) ResetGpuType() { + m.gpu_type = nil + delete(m.clearedFields, ciworkflowresult.FieldGpuType) +} + +// SetPytorchVersion sets the "pytorch_version" field. +func (m *CIWorkflowResultMutation) SetPytorchVersion(s string) { + m.pytorch_version = &s +} + +// PytorchVersion returns the value of the "pytorch_version" field in the mutation. +func (m *CIWorkflowResultMutation) PytorchVersion() (r string, exists bool) { + v := m.pytorch_version + if v == nil { + return + } + return *v, true +} + +// OldPytorchVersion returns the old "pytorch_version" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldPytorchVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPytorchVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPytorchVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPytorchVersion: %w", err) + } + return oldValue.PytorchVersion, nil +} + +// ClearPytorchVersion clears the value of the "pytorch_version" field. +func (m *CIWorkflowResultMutation) ClearPytorchVersion() { + m.pytorch_version = nil + m.clearedFields[ciworkflowresult.FieldPytorchVersion] = struct{}{} +} + +// PytorchVersionCleared returns if the "pytorch_version" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) PytorchVersionCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldPytorchVersion] + return ok +} + +// ResetPytorchVersion resets all changes to the "pytorch_version" field. +func (m *CIWorkflowResultMutation) ResetPytorchVersion() { + m.pytorch_version = nil + delete(m.clearedFields, ciworkflowresult.FieldPytorchVersion) +} + +// SetWorkflowName sets the "workflow_name" field. +func (m *CIWorkflowResultMutation) SetWorkflowName(s string) { + m.workflow_name = &s +} + +// WorkflowName returns the value of the "workflow_name" field in the mutation. +func (m *CIWorkflowResultMutation) WorkflowName() (r string, exists bool) { + v := m.workflow_name + if v == nil { + return + } + return *v, true +} + +// OldWorkflowName returns the old "workflow_name" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldWorkflowName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWorkflowName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWorkflowName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWorkflowName: %w", err) + } + return oldValue.WorkflowName, nil +} + +// ClearWorkflowName clears the value of the "workflow_name" field. +func (m *CIWorkflowResultMutation) ClearWorkflowName() { + m.workflow_name = nil + m.clearedFields[ciworkflowresult.FieldWorkflowName] = struct{}{} +} + +// WorkflowNameCleared returns if the "workflow_name" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) WorkflowNameCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldWorkflowName] + return ok +} + +// ResetWorkflowName resets all changes to the "workflow_name" field. +func (m *CIWorkflowResultMutation) ResetWorkflowName() { + m.workflow_name = nil + delete(m.clearedFields, ciworkflowresult.FieldWorkflowName) +} + +// SetRunID sets the "run_id" field. +func (m *CIWorkflowResultMutation) SetRunID(s string) { + m.run_id = &s +} + +// RunID returns the value of the "run_id" field in the mutation. +func (m *CIWorkflowResultMutation) RunID() (r string, exists bool) { + v := m.run_id + if v == nil { + return + } + return *v, true +} + +// OldRunID returns the old "run_id" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldRunID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRunID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRunID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRunID: %w", err) + } + return oldValue.RunID, nil +} + +// ClearRunID clears the value of the "run_id" field. +func (m *CIWorkflowResultMutation) ClearRunID() { + m.run_id = nil + m.clearedFields[ciworkflowresult.FieldRunID] = struct{}{} +} + +// RunIDCleared returns if the "run_id" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) RunIDCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldRunID] + return ok +} + +// ResetRunID resets all changes to the "run_id" field. +func (m *CIWorkflowResultMutation) ResetRunID() { + m.run_id = nil + delete(m.clearedFields, ciworkflowresult.FieldRunID) +} + +// SetStatus sets the "status" field. +func (m *CIWorkflowResultMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *CIWorkflowResultMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ClearStatus clears the value of the "status" field. +func (m *CIWorkflowResultMutation) ClearStatus() { + m.status = nil + m.clearedFields[ciworkflowresult.FieldStatus] = struct{}{} +} + +// StatusCleared returns if the "status" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) StatusCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldStatus] + return ok +} + +// ResetStatus resets all changes to the "status" field. +func (m *CIWorkflowResultMutation) ResetStatus() { + m.status = nil + delete(m.clearedFields, ciworkflowresult.FieldStatus) +} + +// SetStartTime sets the "start_time" field. +func (m *CIWorkflowResultMutation) SetStartTime(i int64) { + m.start_time = &i + m.addstart_time = nil +} + +// StartTime returns the value of the "start_time" field in the mutation. +func (m *CIWorkflowResultMutation) StartTime() (r int64, exists bool) { + v := m.start_time + if v == nil { + return + } + return *v, true +} + +// OldStartTime returns the old "start_time" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldStartTime(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartTime: %w", err) + } + return oldValue.StartTime, nil +} + +// AddStartTime adds i to the "start_time" field. +func (m *CIWorkflowResultMutation) AddStartTime(i int64) { + if m.addstart_time != nil { + *m.addstart_time += i + } else { + m.addstart_time = &i + } +} + +// AddedStartTime returns the value that was added to the "start_time" field in this mutation. +func (m *CIWorkflowResultMutation) AddedStartTime() (r int64, exists bool) { + v := m.addstart_time + if v == nil { + return + } + return *v, true +} + +// ClearStartTime clears the value of the "start_time" field. +func (m *CIWorkflowResultMutation) ClearStartTime() { + m.start_time = nil + m.addstart_time = nil + m.clearedFields[ciworkflowresult.FieldStartTime] = struct{}{} +} + +// StartTimeCleared returns if the "start_time" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) StartTimeCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldStartTime] + return ok +} + +// ResetStartTime resets all changes to the "start_time" field. +func (m *CIWorkflowResultMutation) ResetStartTime() { + m.start_time = nil + m.addstart_time = nil + delete(m.clearedFields, ciworkflowresult.FieldStartTime) +} + +// SetEndTime sets the "end_time" field. +func (m *CIWorkflowResultMutation) SetEndTime(i int64) { + m.end_time = &i + m.addend_time = nil +} + +// EndTime returns the value of the "end_time" field in the mutation. +func (m *CIWorkflowResultMutation) EndTime() (r int64, exists bool) { + v := m.end_time + if v == nil { + return + } + return *v, true +} + +// OldEndTime returns the old "end_time" field's value of the CIWorkflowResult entity. +// If the CIWorkflowResult object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CIWorkflowResultMutation) OldEndTime(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndTime: %w", err) + } + return oldValue.EndTime, nil +} + +// AddEndTime adds i to the "end_time" field. +func (m *CIWorkflowResultMutation) AddEndTime(i int64) { + if m.addend_time != nil { + *m.addend_time += i + } else { + m.addend_time = &i + } +} + +// AddedEndTime returns the value that was added to the "end_time" field in this mutation. +func (m *CIWorkflowResultMutation) AddedEndTime() (r int64, exists bool) { + v := m.addend_time + if v == nil { + return + } + return *v, true +} + +// ClearEndTime clears the value of the "end_time" field. +func (m *CIWorkflowResultMutation) ClearEndTime() { + m.end_time = nil + m.addend_time = nil + m.clearedFields[ciworkflowresult.FieldEndTime] = struct{}{} +} + +// EndTimeCleared returns if the "end_time" field was cleared in this mutation. +func (m *CIWorkflowResultMutation) EndTimeCleared() bool { + _, ok := m.clearedFields[ciworkflowresult.FieldEndTime] + return ok +} + +// ResetEndTime resets all changes to the "end_time" field. +func (m *CIWorkflowResultMutation) ResetEndTime() { + m.end_time = nil + m.addend_time = nil + delete(m.clearedFields, ciworkflowresult.FieldEndTime) +} + +// SetGitcommitID sets the "gitcommit" edge to the GitCommit entity by id. +func (m *CIWorkflowResultMutation) SetGitcommitID(id uuid.UUID) { + m.gitcommit = &id +} + +// ClearGitcommit clears the "gitcommit" edge to the GitCommit entity. +func (m *CIWorkflowResultMutation) ClearGitcommit() { + m.clearedgitcommit = true +} + +// GitcommitCleared reports if the "gitcommit" edge to the GitCommit entity was cleared. +func (m *CIWorkflowResultMutation) GitcommitCleared() bool { + return m.clearedgitcommit +} + +// GitcommitID returns the "gitcommit" edge ID in the mutation. +func (m *CIWorkflowResultMutation) GitcommitID() (id uuid.UUID, exists bool) { + if m.gitcommit != nil { + return *m.gitcommit, true + } + return +} + +// GitcommitIDs returns the "gitcommit" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GitcommitID instead. It exists only for internal usage by the builders. +func (m *CIWorkflowResultMutation) GitcommitIDs() (ids []uuid.UUID) { + if id := m.gitcommit; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGitcommit resets all changes to the "gitcommit" edge. +func (m *CIWorkflowResultMutation) ResetGitcommit() { + m.gitcommit = nil + m.clearedgitcommit = false +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by id. +func (m *CIWorkflowResultMutation) SetStorageFileID(id uuid.UUID) { + m.storage_file = &id +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (m *CIWorkflowResultMutation) ClearStorageFile() { + m.clearedstorage_file = true +} + +// StorageFileCleared reports if the "storage_file" edge to the StorageFile entity was cleared. +func (m *CIWorkflowResultMutation) StorageFileCleared() bool { + return m.clearedstorage_file +} + +// StorageFileID returns the "storage_file" edge ID in the mutation. +func (m *CIWorkflowResultMutation) StorageFileID() (id uuid.UUID, exists bool) { + if m.storage_file != nil { + return *m.storage_file, true + } + return +} + +// StorageFileIDs returns the "storage_file" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// StorageFileID instead. It exists only for internal usage by the builders. +func (m *CIWorkflowResultMutation) StorageFileIDs() (ids []uuid.UUID) { + if id := m.storage_file; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetStorageFile resets all changes to the "storage_file" edge. +func (m *CIWorkflowResultMutation) ResetStorageFile() { + m.storage_file = nil + m.clearedstorage_file = false +} + +// Where appends a list predicates to the CIWorkflowResultMutation builder. +func (m *CIWorkflowResultMutation) Where(ps ...predicate.CIWorkflowResult) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the CIWorkflowResultMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *CIWorkflowResultMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.CIWorkflowResult, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *CIWorkflowResultMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *CIWorkflowResultMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (CIWorkflowResult). +func (m *CIWorkflowResultMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *CIWorkflowResultMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.create_time != nil { + fields = append(fields, ciworkflowresult.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, ciworkflowresult.FieldUpdateTime) + } + if m.operating_system != nil { + fields = append(fields, ciworkflowresult.FieldOperatingSystem) + } + if m.gpu_type != nil { + fields = append(fields, ciworkflowresult.FieldGpuType) + } + if m.pytorch_version != nil { + fields = append(fields, ciworkflowresult.FieldPytorchVersion) + } + if m.workflow_name != nil { + fields = append(fields, ciworkflowresult.FieldWorkflowName) + } + if m.run_id != nil { + fields = append(fields, ciworkflowresult.FieldRunID) + } + if m.status != nil { + fields = append(fields, ciworkflowresult.FieldStatus) + } + if m.start_time != nil { + fields = append(fields, ciworkflowresult.FieldStartTime) + } + if m.end_time != nil { + fields = append(fields, ciworkflowresult.FieldEndTime) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *CIWorkflowResultMutation) Field(name string) (ent.Value, bool) { + switch name { + case ciworkflowresult.FieldCreateTime: + return m.CreateTime() + case ciworkflowresult.FieldUpdateTime: + return m.UpdateTime() + case ciworkflowresult.FieldOperatingSystem: + return m.OperatingSystem() + case ciworkflowresult.FieldGpuType: + return m.GpuType() + case ciworkflowresult.FieldPytorchVersion: + return m.PytorchVersion() + case ciworkflowresult.FieldWorkflowName: + return m.WorkflowName() + case ciworkflowresult.FieldRunID: + return m.RunID() + case ciworkflowresult.FieldStatus: + return m.Status() + case ciworkflowresult.FieldStartTime: + return m.StartTime() + case ciworkflowresult.FieldEndTime: + return m.EndTime() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *CIWorkflowResultMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case ciworkflowresult.FieldCreateTime: + return m.OldCreateTime(ctx) + case ciworkflowresult.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case ciworkflowresult.FieldOperatingSystem: + return m.OldOperatingSystem(ctx) + case ciworkflowresult.FieldGpuType: + return m.OldGpuType(ctx) + case ciworkflowresult.FieldPytorchVersion: + return m.OldPytorchVersion(ctx) + case ciworkflowresult.FieldWorkflowName: + return m.OldWorkflowName(ctx) + case ciworkflowresult.FieldRunID: + return m.OldRunID(ctx) + case ciworkflowresult.FieldStatus: + return m.OldStatus(ctx) + case ciworkflowresult.FieldStartTime: + return m.OldStartTime(ctx) + case ciworkflowresult.FieldEndTime: + return m.OldEndTime(ctx) + } + return nil, fmt.Errorf("unknown CIWorkflowResult field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CIWorkflowResultMutation) SetField(name string, value ent.Value) error { + switch name { + case ciworkflowresult.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case ciworkflowresult.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case ciworkflowresult.FieldOperatingSystem: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOperatingSystem(v) + return nil + case ciworkflowresult.FieldGpuType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGpuType(v) + return nil + case ciworkflowresult.FieldPytorchVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPytorchVersion(v) + return nil + case ciworkflowresult.FieldWorkflowName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWorkflowName(v) + return nil + case ciworkflowresult.FieldRunID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRunID(v) + return nil + case ciworkflowresult.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case ciworkflowresult.FieldStartTime: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartTime(v) + return nil + case ciworkflowresult.FieldEndTime: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndTime(v) + return nil + } + return fmt.Errorf("unknown CIWorkflowResult field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *CIWorkflowResultMutation) AddedFields() []string { + var fields []string + if m.addstart_time != nil { + fields = append(fields, ciworkflowresult.FieldStartTime) + } + if m.addend_time != nil { + fields = append(fields, ciworkflowresult.FieldEndTime) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *CIWorkflowResultMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case ciworkflowresult.FieldStartTime: + return m.AddedStartTime() + case ciworkflowresult.FieldEndTime: + return m.AddedEndTime() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CIWorkflowResultMutation) AddField(name string, value ent.Value) error { + switch name { + case ciworkflowresult.FieldStartTime: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartTime(v) + return nil + case ciworkflowresult.FieldEndTime: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndTime(v) + return nil + } + return fmt.Errorf("unknown CIWorkflowResult numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *CIWorkflowResultMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(ciworkflowresult.FieldGpuType) { + fields = append(fields, ciworkflowresult.FieldGpuType) + } + if m.FieldCleared(ciworkflowresult.FieldPytorchVersion) { + fields = append(fields, ciworkflowresult.FieldPytorchVersion) + } + if m.FieldCleared(ciworkflowresult.FieldWorkflowName) { + fields = append(fields, ciworkflowresult.FieldWorkflowName) + } + if m.FieldCleared(ciworkflowresult.FieldRunID) { + fields = append(fields, ciworkflowresult.FieldRunID) + } + if m.FieldCleared(ciworkflowresult.FieldStatus) { + fields = append(fields, ciworkflowresult.FieldStatus) + } + if m.FieldCleared(ciworkflowresult.FieldStartTime) { + fields = append(fields, ciworkflowresult.FieldStartTime) + } + if m.FieldCleared(ciworkflowresult.FieldEndTime) { + fields = append(fields, ciworkflowresult.FieldEndTime) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *CIWorkflowResultMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *CIWorkflowResultMutation) ClearField(name string) error { + switch name { + case ciworkflowresult.FieldGpuType: + m.ClearGpuType() + return nil + case ciworkflowresult.FieldPytorchVersion: + m.ClearPytorchVersion() + return nil + case ciworkflowresult.FieldWorkflowName: + m.ClearWorkflowName() + return nil + case ciworkflowresult.FieldRunID: + m.ClearRunID() + return nil + case ciworkflowresult.FieldStatus: + m.ClearStatus() + return nil + case ciworkflowresult.FieldStartTime: + m.ClearStartTime() + return nil + case ciworkflowresult.FieldEndTime: + m.ClearEndTime() + return nil + } + return fmt.Errorf("unknown CIWorkflowResult nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *CIWorkflowResultMutation) ResetField(name string) error { + switch name { + case ciworkflowresult.FieldCreateTime: + m.ResetCreateTime() + return nil + case ciworkflowresult.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case ciworkflowresult.FieldOperatingSystem: + m.ResetOperatingSystem() + return nil + case ciworkflowresult.FieldGpuType: + m.ResetGpuType() + return nil + case ciworkflowresult.FieldPytorchVersion: + m.ResetPytorchVersion() + return nil + case ciworkflowresult.FieldWorkflowName: + m.ResetWorkflowName() + return nil + case ciworkflowresult.FieldRunID: + m.ResetRunID() + return nil + case ciworkflowresult.FieldStatus: + m.ResetStatus() + return nil + case ciworkflowresult.FieldStartTime: + m.ResetStartTime() + return nil + case ciworkflowresult.FieldEndTime: + m.ResetEndTime() + return nil + } + return fmt.Errorf("unknown CIWorkflowResult field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *CIWorkflowResultMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.gitcommit != nil { + edges = append(edges, ciworkflowresult.EdgeGitcommit) + } + if m.storage_file != nil { + edges = append(edges, ciworkflowresult.EdgeStorageFile) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *CIWorkflowResultMutation) AddedIDs(name string) []ent.Value { + switch name { + case ciworkflowresult.EdgeGitcommit: + if id := m.gitcommit; id != nil { + return []ent.Value{*id} + } + case ciworkflowresult.EdgeStorageFile: + if id := m.storage_file; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *CIWorkflowResultMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *CIWorkflowResultMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *CIWorkflowResultMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedgitcommit { + edges = append(edges, ciworkflowresult.EdgeGitcommit) + } + if m.clearedstorage_file { + edges = append(edges, ciworkflowresult.EdgeStorageFile) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *CIWorkflowResultMutation) EdgeCleared(name string) bool { + switch name { + case ciworkflowresult.EdgeGitcommit: + return m.clearedgitcommit + case ciworkflowresult.EdgeStorageFile: + return m.clearedstorage_file + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *CIWorkflowResultMutation) ClearEdge(name string) error { + switch name { + case ciworkflowresult.EdgeGitcommit: + m.ClearGitcommit() + return nil + case ciworkflowresult.EdgeStorageFile: + m.ClearStorageFile() + return nil + } + return fmt.Errorf("unknown CIWorkflowResult unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *CIWorkflowResultMutation) ResetEdge(name string) error { + switch name { + case ciworkflowresult.EdgeGitcommit: + m.ResetGitcommit() + return nil + case ciworkflowresult.EdgeStorageFile: + m.ResetStorageFile() + return nil + } + return fmt.Errorf("unknown CIWorkflowResult edge %s", name) +} + +// GitCommitMutation represents an operation that mutates the GitCommit nodes in the graph. +type GitCommitMutation struct { + config + op Op + typ string + id *uuid.UUID + create_time *time.Time + update_time *time.Time + commit_hash *string + branch_name *string + repo_name *string + commit_message *string + commit_timestamp *time.Time + author *string + timestamp *time.Time + clearedFields map[string]struct{} + results map[uuid.UUID]struct{} + removedresults map[uuid.UUID]struct{} + clearedresults bool + done bool + oldValue func(context.Context) (*GitCommit, error) + predicates []predicate.GitCommit +} + +var _ ent.Mutation = (*GitCommitMutation)(nil) + +// gitcommitOption allows management of the mutation configuration using functional options. +type gitcommitOption func(*GitCommitMutation) + +// newGitCommitMutation creates new mutation for the GitCommit entity. +func newGitCommitMutation(c config, op Op, opts ...gitcommitOption) *GitCommitMutation { + m := &GitCommitMutation{ + config: c, + op: op, + typ: TypeGitCommit, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withGitCommitID sets the ID field of the mutation. +func withGitCommitID(id uuid.UUID) gitcommitOption { + return func(m *GitCommitMutation) { + var ( + err error + once sync.Once + value *GitCommit + ) + m.oldValue = func(ctx context.Context) (*GitCommit, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().GitCommit.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withGitCommit sets the old GitCommit of the mutation. +func withGitCommit(node *GitCommit) gitcommitOption { + return func(m *GitCommitMutation) { + m.oldValue = func(context.Context) (*GitCommit, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m GitCommitMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m GitCommitMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of GitCommit entities. +func (m *GitCommitMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *GitCommitMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *GitCommitMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().GitCommit.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *GitCommitMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *GitCommitMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *GitCommitMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *GitCommitMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *GitCommitMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *GitCommitMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetCommitHash sets the "commit_hash" field. +func (m *GitCommitMutation) SetCommitHash(s string) { + m.commit_hash = &s +} + +// CommitHash returns the value of the "commit_hash" field in the mutation. +func (m *GitCommitMutation) CommitHash() (r string, exists bool) { + v := m.commit_hash + if v == nil { + return + } + return *v, true +} + +// OldCommitHash returns the old "commit_hash" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldCommitHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCommitHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCommitHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCommitHash: %w", err) + } + return oldValue.CommitHash, nil +} + +// ResetCommitHash resets all changes to the "commit_hash" field. +func (m *GitCommitMutation) ResetCommitHash() { + m.commit_hash = nil +} + +// SetBranchName sets the "branch_name" field. +func (m *GitCommitMutation) SetBranchName(s string) { + m.branch_name = &s +} + +// BranchName returns the value of the "branch_name" field in the mutation. +func (m *GitCommitMutation) BranchName() (r string, exists bool) { + v := m.branch_name + if v == nil { + return + } + return *v, true +} + +// OldBranchName returns the old "branch_name" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldBranchName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBranchName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBranchName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBranchName: %w", err) + } + return oldValue.BranchName, nil +} + +// ResetBranchName resets all changes to the "branch_name" field. +func (m *GitCommitMutation) ResetBranchName() { + m.branch_name = nil +} + +// SetRepoName sets the "repo_name" field. +func (m *GitCommitMutation) SetRepoName(s string) { + m.repo_name = &s +} + +// RepoName returns the value of the "repo_name" field in the mutation. +func (m *GitCommitMutation) RepoName() (r string, exists bool) { + v := m.repo_name + if v == nil { + return + } + return *v, true +} + +// OldRepoName returns the old "repo_name" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldRepoName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRepoName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRepoName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRepoName: %w", err) + } + return oldValue.RepoName, nil +} + +// ResetRepoName resets all changes to the "repo_name" field. +func (m *GitCommitMutation) ResetRepoName() { + m.repo_name = nil +} + +// SetCommitMessage sets the "commit_message" field. +func (m *GitCommitMutation) SetCommitMessage(s string) { + m.commit_message = &s +} + +// CommitMessage returns the value of the "commit_message" field in the mutation. +func (m *GitCommitMutation) CommitMessage() (r string, exists bool) { + v := m.commit_message + if v == nil { + return + } + return *v, true +} + +// OldCommitMessage returns the old "commit_message" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldCommitMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCommitMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCommitMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCommitMessage: %w", err) + } + return oldValue.CommitMessage, nil +} + +// ResetCommitMessage resets all changes to the "commit_message" field. +func (m *GitCommitMutation) ResetCommitMessage() { + m.commit_message = nil +} + +// SetCommitTimestamp sets the "commit_timestamp" field. +func (m *GitCommitMutation) SetCommitTimestamp(t time.Time) { + m.commit_timestamp = &t +} + +// CommitTimestamp returns the value of the "commit_timestamp" field in the mutation. +func (m *GitCommitMutation) CommitTimestamp() (r time.Time, exists bool) { + v := m.commit_timestamp + if v == nil { + return + } + return *v, true +} + +// OldCommitTimestamp returns the old "commit_timestamp" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldCommitTimestamp(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCommitTimestamp is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCommitTimestamp requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCommitTimestamp: %w", err) + } + return oldValue.CommitTimestamp, nil +} + +// ResetCommitTimestamp resets all changes to the "commit_timestamp" field. +func (m *GitCommitMutation) ResetCommitTimestamp() { + m.commit_timestamp = nil +} + +// SetAuthor sets the "author" field. +func (m *GitCommitMutation) SetAuthor(s string) { + m.author = &s +} + +// Author returns the value of the "author" field in the mutation. +func (m *GitCommitMutation) Author() (r string, exists bool) { + v := m.author + if v == nil { + return + } + return *v, true +} + +// OldAuthor returns the old "author" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldAuthor(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAuthor is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAuthor requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAuthor: %w", err) + } + return oldValue.Author, nil +} + +// ClearAuthor clears the value of the "author" field. +func (m *GitCommitMutation) ClearAuthor() { + m.author = nil + m.clearedFields[gitcommit.FieldAuthor] = struct{}{} +} + +// AuthorCleared returns if the "author" field was cleared in this mutation. +func (m *GitCommitMutation) AuthorCleared() bool { + _, ok := m.clearedFields[gitcommit.FieldAuthor] + return ok +} + +// ResetAuthor resets all changes to the "author" field. +func (m *GitCommitMutation) ResetAuthor() { + m.author = nil + delete(m.clearedFields, gitcommit.FieldAuthor) +} + +// SetTimestamp sets the "timestamp" field. +func (m *GitCommitMutation) SetTimestamp(t time.Time) { + m.timestamp = &t +} + +// Timestamp returns the value of the "timestamp" field in the mutation. +func (m *GitCommitMutation) Timestamp() (r time.Time, exists bool) { + v := m.timestamp + if v == nil { + return + } + return *v, true +} + +// OldTimestamp returns the old "timestamp" field's value of the GitCommit entity. +// If the GitCommit object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GitCommitMutation) OldTimestamp(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTimestamp is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTimestamp requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTimestamp: %w", err) + } + return oldValue.Timestamp, nil +} + +// ClearTimestamp clears the value of the "timestamp" field. +func (m *GitCommitMutation) ClearTimestamp() { + m.timestamp = nil + m.clearedFields[gitcommit.FieldTimestamp] = struct{}{} +} + +// TimestampCleared returns if the "timestamp" field was cleared in this mutation. +func (m *GitCommitMutation) TimestampCleared() bool { + _, ok := m.clearedFields[gitcommit.FieldTimestamp] + return ok +} + +// ResetTimestamp resets all changes to the "timestamp" field. +func (m *GitCommitMutation) ResetTimestamp() { + m.timestamp = nil + delete(m.clearedFields, gitcommit.FieldTimestamp) +} + +// AddResultIDs adds the "results" edge to the CIWorkflowResult entity by ids. +func (m *GitCommitMutation) AddResultIDs(ids ...uuid.UUID) { + if m.results == nil { + m.results = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.results[ids[i]] = struct{}{} + } +} + +// ClearResults clears the "results" edge to the CIWorkflowResult entity. +func (m *GitCommitMutation) ClearResults() { + m.clearedresults = true +} + +// ResultsCleared reports if the "results" edge to the CIWorkflowResult entity was cleared. +func (m *GitCommitMutation) ResultsCleared() bool { + return m.clearedresults +} + +// RemoveResultIDs removes the "results" edge to the CIWorkflowResult entity by IDs. +func (m *GitCommitMutation) RemoveResultIDs(ids ...uuid.UUID) { + if m.removedresults == nil { + m.removedresults = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.results, ids[i]) + m.removedresults[ids[i]] = struct{}{} + } +} + +// RemovedResults returns the removed IDs of the "results" edge to the CIWorkflowResult entity. +func (m *GitCommitMutation) RemovedResultsIDs() (ids []uuid.UUID) { + for id := range m.removedresults { + ids = append(ids, id) + } + return +} + +// ResultsIDs returns the "results" edge IDs in the mutation. +func (m *GitCommitMutation) ResultsIDs() (ids []uuid.UUID) { + for id := range m.results { + ids = append(ids, id) + } + return +} + +// ResetResults resets all changes to the "results" edge. +func (m *GitCommitMutation) ResetResults() { + m.results = nil + m.clearedresults = false + m.removedresults = nil +} + +// Where appends a list predicates to the GitCommitMutation builder. +func (m *GitCommitMutation) Where(ps ...predicate.GitCommit) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the GitCommitMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *GitCommitMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.GitCommit, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *GitCommitMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *GitCommitMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (GitCommit). +func (m *GitCommitMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *GitCommitMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.create_time != nil { + fields = append(fields, gitcommit.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, gitcommit.FieldUpdateTime) + } + if m.commit_hash != nil { + fields = append(fields, gitcommit.FieldCommitHash) + } + if m.branch_name != nil { + fields = append(fields, gitcommit.FieldBranchName) + } + if m.repo_name != nil { + fields = append(fields, gitcommit.FieldRepoName) + } + if m.commit_message != nil { + fields = append(fields, gitcommit.FieldCommitMessage) + } + if m.commit_timestamp != nil { + fields = append(fields, gitcommit.FieldCommitTimestamp) + } + if m.author != nil { + fields = append(fields, gitcommit.FieldAuthor) + } + if m.timestamp != nil { + fields = append(fields, gitcommit.FieldTimestamp) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *GitCommitMutation) Field(name string) (ent.Value, bool) { + switch name { + case gitcommit.FieldCreateTime: + return m.CreateTime() + case gitcommit.FieldUpdateTime: + return m.UpdateTime() + case gitcommit.FieldCommitHash: + return m.CommitHash() + case gitcommit.FieldBranchName: + return m.BranchName() + case gitcommit.FieldRepoName: + return m.RepoName() + case gitcommit.FieldCommitMessage: + return m.CommitMessage() + case gitcommit.FieldCommitTimestamp: + return m.CommitTimestamp() + case gitcommit.FieldAuthor: + return m.Author() + case gitcommit.FieldTimestamp: + return m.Timestamp() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *GitCommitMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case gitcommit.FieldCreateTime: + return m.OldCreateTime(ctx) + case gitcommit.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case gitcommit.FieldCommitHash: + return m.OldCommitHash(ctx) + case gitcommit.FieldBranchName: + return m.OldBranchName(ctx) + case gitcommit.FieldRepoName: + return m.OldRepoName(ctx) + case gitcommit.FieldCommitMessage: + return m.OldCommitMessage(ctx) + case gitcommit.FieldCommitTimestamp: + return m.OldCommitTimestamp(ctx) + case gitcommit.FieldAuthor: + return m.OldAuthor(ctx) + case gitcommit.FieldTimestamp: + return m.OldTimestamp(ctx) + } + return nil, fmt.Errorf("unknown GitCommit field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GitCommitMutation) SetField(name string, value ent.Value) error { + switch name { + case gitcommit.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case gitcommit.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case gitcommit.FieldCommitHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCommitHash(v) + return nil + case gitcommit.FieldBranchName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBranchName(v) + return nil + case gitcommit.FieldRepoName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRepoName(v) + return nil + case gitcommit.FieldCommitMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCommitMessage(v) + return nil + case gitcommit.FieldCommitTimestamp: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCommitTimestamp(v) + return nil + case gitcommit.FieldAuthor: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAuthor(v) + return nil + case gitcommit.FieldTimestamp: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTimestamp(v) + return nil + } + return fmt.Errorf("unknown GitCommit field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *GitCommitMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *GitCommitMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GitCommitMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown GitCommit numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *GitCommitMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(gitcommit.FieldAuthor) { + fields = append(fields, gitcommit.FieldAuthor) + } + if m.FieldCleared(gitcommit.FieldTimestamp) { + fields = append(fields, gitcommit.FieldTimestamp) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *GitCommitMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *GitCommitMutation) ClearField(name string) error { + switch name { + case gitcommit.FieldAuthor: + m.ClearAuthor() + return nil + case gitcommit.FieldTimestamp: + m.ClearTimestamp() + return nil + } + return fmt.Errorf("unknown GitCommit nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *GitCommitMutation) ResetField(name string) error { + switch name { + case gitcommit.FieldCreateTime: + m.ResetCreateTime() + return nil + case gitcommit.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case gitcommit.FieldCommitHash: + m.ResetCommitHash() + return nil + case gitcommit.FieldBranchName: + m.ResetBranchName() + return nil + case gitcommit.FieldRepoName: + m.ResetRepoName() + return nil + case gitcommit.FieldCommitMessage: + m.ResetCommitMessage() + return nil + case gitcommit.FieldCommitTimestamp: + m.ResetCommitTimestamp() + return nil + case gitcommit.FieldAuthor: + m.ResetAuthor() + return nil + case gitcommit.FieldTimestamp: + m.ResetTimestamp() + return nil + } + return fmt.Errorf("unknown GitCommit field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *GitCommitMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.results != nil { + edges = append(edges, gitcommit.EdgeResults) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *GitCommitMutation) AddedIDs(name string) []ent.Value { + switch name { + case gitcommit.EdgeResults: + ids := make([]ent.Value, 0, len(m.results)) + for id := range m.results { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *GitCommitMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedresults != nil { + edges = append(edges, gitcommit.EdgeResults) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *GitCommitMutation) RemovedIDs(name string) []ent.Value { + switch name { + case gitcommit.EdgeResults: + ids := make([]ent.Value, 0, len(m.removedresults)) + for id := range m.removedresults { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *GitCommitMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedresults { + edges = append(edges, gitcommit.EdgeResults) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *GitCommitMutation) EdgeCleared(name string) bool { + switch name { + case gitcommit.EdgeResults: + return m.clearedresults + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *GitCommitMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown GitCommit unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *GitCommitMutation) ResetEdge(name string) error { + switch name { + case gitcommit.EdgeResults: + m.ResetResults() + return nil + } + return fmt.Errorf("unknown GitCommit edge %s", name) +} + +// NodeMutation represents an operation that mutates the Node nodes in the graph. +type NodeMutation struct { + config + op Op + typ string + id *string + create_time *time.Time + update_time *time.Time + name *string + description *string + author *string + license *string + repository_url *string + icon_url *string + tags *[]string + appendtags []string + clearedFields map[string]struct{} + publisher *string + clearedpublisher bool + versions map[uuid.UUID]struct{} + removedversions map[uuid.UUID]struct{} + clearedversions bool + done bool + oldValue func(context.Context) (*Node, error) + predicates []predicate.Node +} + +var _ ent.Mutation = (*NodeMutation)(nil) + +// nodeOption allows management of the mutation configuration using functional options. +type nodeOption func(*NodeMutation) + +// newNodeMutation creates new mutation for the Node entity. +func newNodeMutation(c config, op Op, opts ...nodeOption) *NodeMutation { + m := &NodeMutation{ + config: c, + op: op, + typ: TypeNode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withNodeID sets the ID field of the mutation. +func withNodeID(id string) nodeOption { + return func(m *NodeMutation) { + var ( + err error + once sync.Once + value *Node + ) + m.oldValue = func(ctx context.Context) (*Node, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Node.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withNode sets the old Node of the mutation. +func withNode(node *Node) nodeOption { + return func(m *NodeMutation) { + m.oldValue = func(context.Context) (*Node, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m NodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m NodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Node entities. +func (m *NodeMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *NodeMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *NodeMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Node.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *NodeMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *NodeMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *NodeMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *NodeMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *NodeMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *NodeMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetPublisherID sets the "publisher_id" field. +func (m *NodeMutation) SetPublisherID(s string) { + m.publisher = &s +} + +// PublisherID returns the value of the "publisher_id" field in the mutation. +func (m *NodeMutation) PublisherID() (r string, exists bool) { + v := m.publisher + if v == nil { + return + } + return *v, true +} + +// OldPublisherID returns the old "publisher_id" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldPublisherID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublisherID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublisherID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublisherID: %w", err) + } + return oldValue.PublisherID, nil +} + +// ResetPublisherID resets all changes to the "publisher_id" field. +func (m *NodeMutation) ResetPublisherID() { + m.publisher = nil +} + +// SetName sets the "name" field. +func (m *NodeMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *NodeMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *NodeMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *NodeMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *NodeMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *NodeMutation) ClearDescription() { + m.description = nil + m.clearedFields[node.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *NodeMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[node.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *NodeMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, node.FieldDescription) +} + +// SetAuthor sets the "author" field. +func (m *NodeMutation) SetAuthor(s string) { + m.author = &s +} + +// Author returns the value of the "author" field in the mutation. +func (m *NodeMutation) Author() (r string, exists bool) { + v := m.author + if v == nil { + return + } + return *v, true +} + +// OldAuthor returns the old "author" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldAuthor(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAuthor is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAuthor requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAuthor: %w", err) + } + return oldValue.Author, nil +} + +// ClearAuthor clears the value of the "author" field. +func (m *NodeMutation) ClearAuthor() { + m.author = nil + m.clearedFields[node.FieldAuthor] = struct{}{} +} + +// AuthorCleared returns if the "author" field was cleared in this mutation. +func (m *NodeMutation) AuthorCleared() bool { + _, ok := m.clearedFields[node.FieldAuthor] + return ok +} + +// ResetAuthor resets all changes to the "author" field. +func (m *NodeMutation) ResetAuthor() { + m.author = nil + delete(m.clearedFields, node.FieldAuthor) +} + +// SetLicense sets the "license" field. +func (m *NodeMutation) SetLicense(s string) { + m.license = &s +} + +// License returns the value of the "license" field in the mutation. +func (m *NodeMutation) License() (r string, exists bool) { + v := m.license + if v == nil { + return + } + return *v, true +} + +// OldLicense returns the old "license" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldLicense(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLicense is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLicense requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLicense: %w", err) + } + return oldValue.License, nil +} + +// ResetLicense resets all changes to the "license" field. +func (m *NodeMutation) ResetLicense() { + m.license = nil +} + +// SetRepositoryURL sets the "repository_url" field. +func (m *NodeMutation) SetRepositoryURL(s string) { + m.repository_url = &s +} + +// RepositoryURL returns the value of the "repository_url" field in the mutation. +func (m *NodeMutation) RepositoryURL() (r string, exists bool) { + v := m.repository_url + if v == nil { + return + } + return *v, true +} + +// OldRepositoryURL returns the old "repository_url" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldRepositoryURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRepositoryURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRepositoryURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRepositoryURL: %w", err) + } + return oldValue.RepositoryURL, nil +} + +// ResetRepositoryURL resets all changes to the "repository_url" field. +func (m *NodeMutation) ResetRepositoryURL() { + m.repository_url = nil +} + +// SetIconURL sets the "icon_url" field. +func (m *NodeMutation) SetIconURL(s string) { + m.icon_url = &s +} + +// IconURL returns the value of the "icon_url" field in the mutation. +func (m *NodeMutation) IconURL() (r string, exists bool) { + v := m.icon_url + if v == nil { + return + } + return *v, true +} + +// OldIconURL returns the old "icon_url" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldIconURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIconURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIconURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIconURL: %w", err) + } + return oldValue.IconURL, nil +} + +// ClearIconURL clears the value of the "icon_url" field. +func (m *NodeMutation) ClearIconURL() { + m.icon_url = nil + m.clearedFields[node.FieldIconURL] = struct{}{} +} + +// IconURLCleared returns if the "icon_url" field was cleared in this mutation. +func (m *NodeMutation) IconURLCleared() bool { + _, ok := m.clearedFields[node.FieldIconURL] + return ok +} + +// ResetIconURL resets all changes to the "icon_url" field. +func (m *NodeMutation) ResetIconURL() { + m.icon_url = nil + delete(m.clearedFields, node.FieldIconURL) +} + +// SetTags sets the "tags" field. +func (m *NodeMutation) SetTags(s []string) { + m.tags = &s + m.appendtags = nil +} + +// Tags returns the value of the "tags" field in the mutation. +func (m *NodeMutation) Tags() (r []string, exists bool) { + v := m.tags + if v == nil { + return + } + return *v, true +} + +// OldTags returns the old "tags" field's value of the Node entity. +// If the Node object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeMutation) OldTags(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTags: %w", err) + } + return oldValue.Tags, nil +} + +// AppendTags adds s to the "tags" field. +func (m *NodeMutation) AppendTags(s []string) { + m.appendtags = append(m.appendtags, s...) +} + +// AppendedTags returns the list of values that were appended to the "tags" field in this mutation. +func (m *NodeMutation) AppendedTags() ([]string, bool) { + if len(m.appendtags) == 0 { + return nil, false + } + return m.appendtags, true +} + +// ResetTags resets all changes to the "tags" field. +func (m *NodeMutation) ResetTags() { + m.tags = nil + m.appendtags = nil +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (m *NodeMutation) ClearPublisher() { + m.clearedpublisher = true + m.clearedFields[node.FieldPublisherID] = struct{}{} +} + +// PublisherCleared reports if the "publisher" edge to the Publisher entity was cleared. +func (m *NodeMutation) PublisherCleared() bool { + return m.clearedpublisher +} + +// PublisherIDs returns the "publisher" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PublisherID instead. It exists only for internal usage by the builders. +func (m *NodeMutation) PublisherIDs() (ids []string) { + if id := m.publisher; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPublisher resets all changes to the "publisher" edge. +func (m *NodeMutation) ResetPublisher() { + m.publisher = nil + m.clearedpublisher = false +} + +// AddVersionIDs adds the "versions" edge to the NodeVersion entity by ids. +func (m *NodeMutation) AddVersionIDs(ids ...uuid.UUID) { + if m.versions == nil { + m.versions = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.versions[ids[i]] = struct{}{} + } +} + +// ClearVersions clears the "versions" edge to the NodeVersion entity. +func (m *NodeMutation) ClearVersions() { + m.clearedversions = true +} + +// VersionsCleared reports if the "versions" edge to the NodeVersion entity was cleared. +func (m *NodeMutation) VersionsCleared() bool { + return m.clearedversions +} + +// RemoveVersionIDs removes the "versions" edge to the NodeVersion entity by IDs. +func (m *NodeMutation) RemoveVersionIDs(ids ...uuid.UUID) { + if m.removedversions == nil { + m.removedversions = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.versions, ids[i]) + m.removedversions[ids[i]] = struct{}{} + } +} + +// RemovedVersions returns the removed IDs of the "versions" edge to the NodeVersion entity. +func (m *NodeMutation) RemovedVersionsIDs() (ids []uuid.UUID) { + for id := range m.removedversions { + ids = append(ids, id) + } + return +} + +// VersionsIDs returns the "versions" edge IDs in the mutation. +func (m *NodeMutation) VersionsIDs() (ids []uuid.UUID) { + for id := range m.versions { + ids = append(ids, id) + } + return +} + +// ResetVersions resets all changes to the "versions" edge. +func (m *NodeMutation) ResetVersions() { + m.versions = nil + m.clearedversions = false + m.removedversions = nil +} + +// Where appends a list predicates to the NodeMutation builder. +func (m *NodeMutation) Where(ps ...predicate.Node) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the NodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *NodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Node, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *NodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *NodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Node). +func (m *NodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *NodeMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.create_time != nil { + fields = append(fields, node.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, node.FieldUpdateTime) + } + if m.publisher != nil { + fields = append(fields, node.FieldPublisherID) + } + if m.name != nil { + fields = append(fields, node.FieldName) + } + if m.description != nil { + fields = append(fields, node.FieldDescription) + } + if m.author != nil { + fields = append(fields, node.FieldAuthor) + } + if m.license != nil { + fields = append(fields, node.FieldLicense) + } + if m.repository_url != nil { + fields = append(fields, node.FieldRepositoryURL) + } + if m.icon_url != nil { + fields = append(fields, node.FieldIconURL) + } + if m.tags != nil { + fields = append(fields, node.FieldTags) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *NodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case node.FieldCreateTime: + return m.CreateTime() + case node.FieldUpdateTime: + return m.UpdateTime() + case node.FieldPublisherID: + return m.PublisherID() + case node.FieldName: + return m.Name() + case node.FieldDescription: + return m.Description() + case node.FieldAuthor: + return m.Author() + case node.FieldLicense: + return m.License() + case node.FieldRepositoryURL: + return m.RepositoryURL() + case node.FieldIconURL: + return m.IconURL() + case node.FieldTags: + return m.Tags() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *NodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case node.FieldCreateTime: + return m.OldCreateTime(ctx) + case node.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case node.FieldPublisherID: + return m.OldPublisherID(ctx) + case node.FieldName: + return m.OldName(ctx) + case node.FieldDescription: + return m.OldDescription(ctx) + case node.FieldAuthor: + return m.OldAuthor(ctx) + case node.FieldLicense: + return m.OldLicense(ctx) + case node.FieldRepositoryURL: + return m.OldRepositoryURL(ctx) + case node.FieldIconURL: + return m.OldIconURL(ctx) + case node.FieldTags: + return m.OldTags(ctx) + } + return nil, fmt.Errorf("unknown Node field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NodeMutation) SetField(name string, value ent.Value) error { + switch name { + case node.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case node.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case node.FieldPublisherID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublisherID(v) + return nil + case node.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case node.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case node.FieldAuthor: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAuthor(v) + return nil + case node.FieldLicense: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLicense(v) + return nil + case node.FieldRepositoryURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRepositoryURL(v) + return nil + case node.FieldIconURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIconURL(v) + return nil + case node.FieldTags: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTags(v) + return nil + } + return fmt.Errorf("unknown Node field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *NodeMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *NodeMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NodeMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Node numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *NodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(node.FieldDescription) { + fields = append(fields, node.FieldDescription) + } + if m.FieldCleared(node.FieldAuthor) { + fields = append(fields, node.FieldAuthor) + } + if m.FieldCleared(node.FieldIconURL) { + fields = append(fields, node.FieldIconURL) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *NodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *NodeMutation) ClearField(name string) error { + switch name { + case node.FieldDescription: + m.ClearDescription() + return nil + case node.FieldAuthor: + m.ClearAuthor() + return nil + case node.FieldIconURL: + m.ClearIconURL() + return nil + } + return fmt.Errorf("unknown Node nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *NodeMutation) ResetField(name string) error { + switch name { + case node.FieldCreateTime: + m.ResetCreateTime() + return nil + case node.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case node.FieldPublisherID: + m.ResetPublisherID() + return nil + case node.FieldName: + m.ResetName() + return nil + case node.FieldDescription: + m.ResetDescription() + return nil + case node.FieldAuthor: + m.ResetAuthor() + return nil + case node.FieldLicense: + m.ResetLicense() + return nil + case node.FieldRepositoryURL: + m.ResetRepositoryURL() + return nil + case node.FieldIconURL: + m.ResetIconURL() + return nil + case node.FieldTags: + m.ResetTags() + return nil + } + return fmt.Errorf("unknown Node field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *NodeMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.publisher != nil { + edges = append(edges, node.EdgePublisher) + } + if m.versions != nil { + edges = append(edges, node.EdgeVersions) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *NodeMutation) AddedIDs(name string) []ent.Value { + switch name { + case node.EdgePublisher: + if id := m.publisher; id != nil { + return []ent.Value{*id} + } + case node.EdgeVersions: + ids := make([]ent.Value, 0, len(m.versions)) + for id := range m.versions { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *NodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removedversions != nil { + edges = append(edges, node.EdgeVersions) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *NodeMutation) RemovedIDs(name string) []ent.Value { + switch name { + case node.EdgeVersions: + ids := make([]ent.Value, 0, len(m.removedversions)) + for id := range m.removedversions { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *NodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedpublisher { + edges = append(edges, node.EdgePublisher) + } + if m.clearedversions { + edges = append(edges, node.EdgeVersions) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *NodeMutation) EdgeCleared(name string) bool { + switch name { + case node.EdgePublisher: + return m.clearedpublisher + case node.EdgeVersions: + return m.clearedversions + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *NodeMutation) ClearEdge(name string) error { + switch name { + case node.EdgePublisher: + m.ClearPublisher() + return nil + } + return fmt.Errorf("unknown Node unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *NodeMutation) ResetEdge(name string) error { + switch name { + case node.EdgePublisher: + m.ResetPublisher() + return nil + case node.EdgeVersions: + m.ResetVersions() + return nil + } + return fmt.Errorf("unknown Node edge %s", name) +} + +// NodeVersionMutation represents an operation that mutates the NodeVersion nodes in the graph. +type NodeVersionMutation struct { + config + op Op + typ string + id *uuid.UUID + create_time *time.Time + update_time *time.Time + version *string + changelog *string + pip_dependencies *[]string + appendpip_dependencies []string + deprecated *bool + clearedFields map[string]struct{} + node *string + clearednode bool + storage_file *uuid.UUID + clearedstorage_file bool + done bool + oldValue func(context.Context) (*NodeVersion, error) + predicates []predicate.NodeVersion +} + +var _ ent.Mutation = (*NodeVersionMutation)(nil) + +// nodeversionOption allows management of the mutation configuration using functional options. +type nodeversionOption func(*NodeVersionMutation) + +// newNodeVersionMutation creates new mutation for the NodeVersion entity. +func newNodeVersionMutation(c config, op Op, opts ...nodeversionOption) *NodeVersionMutation { + m := &NodeVersionMutation{ + config: c, + op: op, + typ: TypeNodeVersion, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withNodeVersionID sets the ID field of the mutation. +func withNodeVersionID(id uuid.UUID) nodeversionOption { + return func(m *NodeVersionMutation) { + var ( + err error + once sync.Once + value *NodeVersion + ) + m.oldValue = func(ctx context.Context) (*NodeVersion, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().NodeVersion.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withNodeVersion sets the old NodeVersion of the mutation. +func withNodeVersion(node *NodeVersion) nodeversionOption { + return func(m *NodeVersionMutation) { + m.oldValue = func(context.Context) (*NodeVersion, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m NodeVersionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m NodeVersionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of NodeVersion entities. +func (m *NodeVersionMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *NodeVersionMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *NodeVersionMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().NodeVersion.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *NodeVersionMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *NodeVersionMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *NodeVersionMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *NodeVersionMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *NodeVersionMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *NodeVersionMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetNodeID sets the "node_id" field. +func (m *NodeVersionMutation) SetNodeID(s string) { + m.node = &s +} + +// NodeID returns the value of the "node_id" field in the mutation. +func (m *NodeVersionMutation) NodeID() (r string, exists bool) { + v := m.node + if v == nil { + return + } + return *v, true +} + +// OldNodeID returns the old "node_id" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldNodeID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNodeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNodeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNodeID: %w", err) + } + return oldValue.NodeID, nil +} + +// ResetNodeID resets all changes to the "node_id" field. +func (m *NodeVersionMutation) ResetNodeID() { + m.node = nil +} + +// SetVersion sets the "version" field. +func (m *NodeVersionMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the value of the "version" field in the mutation. +func (m *NodeVersionMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old "version" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ResetVersion resets all changes to the "version" field. +func (m *NodeVersionMutation) ResetVersion() { + m.version = nil +} + +// SetChangelog sets the "changelog" field. +func (m *NodeVersionMutation) SetChangelog(s string) { + m.changelog = &s +} + +// Changelog returns the value of the "changelog" field in the mutation. +func (m *NodeVersionMutation) Changelog() (r string, exists bool) { + v := m.changelog + if v == nil { + return + } + return *v, true +} + +// OldChangelog returns the old "changelog" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldChangelog(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldChangelog is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldChangelog requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldChangelog: %w", err) + } + return oldValue.Changelog, nil +} + +// ClearChangelog clears the value of the "changelog" field. +func (m *NodeVersionMutation) ClearChangelog() { + m.changelog = nil + m.clearedFields[nodeversion.FieldChangelog] = struct{}{} +} + +// ChangelogCleared returns if the "changelog" field was cleared in this mutation. +func (m *NodeVersionMutation) ChangelogCleared() bool { + _, ok := m.clearedFields[nodeversion.FieldChangelog] + return ok +} + +// ResetChangelog resets all changes to the "changelog" field. +func (m *NodeVersionMutation) ResetChangelog() { + m.changelog = nil + delete(m.clearedFields, nodeversion.FieldChangelog) +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (m *NodeVersionMutation) SetPipDependencies(s []string) { + m.pip_dependencies = &s + m.appendpip_dependencies = nil +} + +// PipDependencies returns the value of the "pip_dependencies" field in the mutation. +func (m *NodeVersionMutation) PipDependencies() (r []string, exists bool) { + v := m.pip_dependencies + if v == nil { + return + } + return *v, true +} + +// OldPipDependencies returns the old "pip_dependencies" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldPipDependencies(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPipDependencies is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPipDependencies requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPipDependencies: %w", err) + } + return oldValue.PipDependencies, nil +} + +// AppendPipDependencies adds s to the "pip_dependencies" field. +func (m *NodeVersionMutation) AppendPipDependencies(s []string) { + m.appendpip_dependencies = append(m.appendpip_dependencies, s...) +} + +// AppendedPipDependencies returns the list of values that were appended to the "pip_dependencies" field in this mutation. +func (m *NodeVersionMutation) AppendedPipDependencies() ([]string, bool) { + if len(m.appendpip_dependencies) == 0 { + return nil, false + } + return m.appendpip_dependencies, true +} + +// ResetPipDependencies resets all changes to the "pip_dependencies" field. +func (m *NodeVersionMutation) ResetPipDependencies() { + m.pip_dependencies = nil + m.appendpip_dependencies = nil +} + +// SetDeprecated sets the "deprecated" field. +func (m *NodeVersionMutation) SetDeprecated(b bool) { + m.deprecated = &b +} + +// Deprecated returns the value of the "deprecated" field in the mutation. +func (m *NodeVersionMutation) Deprecated() (r bool, exists bool) { + v := m.deprecated + if v == nil { + return + } + return *v, true +} + +// OldDeprecated returns the old "deprecated" field's value of the NodeVersion entity. +// If the NodeVersion object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NodeVersionMutation) OldDeprecated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeprecated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeprecated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeprecated: %w", err) + } + return oldValue.Deprecated, nil +} + +// ResetDeprecated resets all changes to the "deprecated" field. +func (m *NodeVersionMutation) ResetDeprecated() { + m.deprecated = nil +} + +// ClearNode clears the "node" edge to the Node entity. +func (m *NodeVersionMutation) ClearNode() { + m.clearednode = true + m.clearedFields[nodeversion.FieldNodeID] = struct{}{} +} + +// NodeCleared reports if the "node" edge to the Node entity was cleared. +func (m *NodeVersionMutation) NodeCleared() bool { + return m.clearednode +} + +// NodeIDs returns the "node" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// NodeID instead. It exists only for internal usage by the builders. +func (m *NodeVersionMutation) NodeIDs() (ids []string) { + if id := m.node; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetNode resets all changes to the "node" edge. +func (m *NodeVersionMutation) ResetNode() { + m.node = nil + m.clearednode = false +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by id. +func (m *NodeVersionMutation) SetStorageFileID(id uuid.UUID) { + m.storage_file = &id +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (m *NodeVersionMutation) ClearStorageFile() { + m.clearedstorage_file = true +} + +// StorageFileCleared reports if the "storage_file" edge to the StorageFile entity was cleared. +func (m *NodeVersionMutation) StorageFileCleared() bool { + return m.clearedstorage_file +} + +// StorageFileID returns the "storage_file" edge ID in the mutation. +func (m *NodeVersionMutation) StorageFileID() (id uuid.UUID, exists bool) { + if m.storage_file != nil { + return *m.storage_file, true + } + return +} + +// StorageFileIDs returns the "storage_file" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// StorageFileID instead. It exists only for internal usage by the builders. +func (m *NodeVersionMutation) StorageFileIDs() (ids []uuid.UUID) { + if id := m.storage_file; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetStorageFile resets all changes to the "storage_file" edge. +func (m *NodeVersionMutation) ResetStorageFile() { + m.storage_file = nil + m.clearedstorage_file = false +} + +// Where appends a list predicates to the NodeVersionMutation builder. +func (m *NodeVersionMutation) Where(ps ...predicate.NodeVersion) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the NodeVersionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *NodeVersionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.NodeVersion, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *NodeVersionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *NodeVersionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (NodeVersion). +func (m *NodeVersionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *NodeVersionMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.create_time != nil { + fields = append(fields, nodeversion.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, nodeversion.FieldUpdateTime) + } + if m.node != nil { + fields = append(fields, nodeversion.FieldNodeID) + } + if m.version != nil { + fields = append(fields, nodeversion.FieldVersion) + } + if m.changelog != nil { + fields = append(fields, nodeversion.FieldChangelog) + } + if m.pip_dependencies != nil { + fields = append(fields, nodeversion.FieldPipDependencies) + } + if m.deprecated != nil { + fields = append(fields, nodeversion.FieldDeprecated) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *NodeVersionMutation) Field(name string) (ent.Value, bool) { + switch name { + case nodeversion.FieldCreateTime: + return m.CreateTime() + case nodeversion.FieldUpdateTime: + return m.UpdateTime() + case nodeversion.FieldNodeID: + return m.NodeID() + case nodeversion.FieldVersion: + return m.Version() + case nodeversion.FieldChangelog: + return m.Changelog() + case nodeversion.FieldPipDependencies: + return m.PipDependencies() + case nodeversion.FieldDeprecated: + return m.Deprecated() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *NodeVersionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case nodeversion.FieldCreateTime: + return m.OldCreateTime(ctx) + case nodeversion.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case nodeversion.FieldNodeID: + return m.OldNodeID(ctx) + case nodeversion.FieldVersion: + return m.OldVersion(ctx) + case nodeversion.FieldChangelog: + return m.OldChangelog(ctx) + case nodeversion.FieldPipDependencies: + return m.OldPipDependencies(ctx) + case nodeversion.FieldDeprecated: + return m.OldDeprecated(ctx) + } + return nil, fmt.Errorf("unknown NodeVersion field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NodeVersionMutation) SetField(name string, value ent.Value) error { + switch name { + case nodeversion.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case nodeversion.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case nodeversion.FieldNodeID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNodeID(v) + return nil + case nodeversion.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case nodeversion.FieldChangelog: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetChangelog(v) + return nil + case nodeversion.FieldPipDependencies: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPipDependencies(v) + return nil + case nodeversion.FieldDeprecated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeprecated(v) + return nil + } + return fmt.Errorf("unknown NodeVersion field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *NodeVersionMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *NodeVersionMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NodeVersionMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown NodeVersion numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *NodeVersionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(nodeversion.FieldChangelog) { + fields = append(fields, nodeversion.FieldChangelog) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *NodeVersionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *NodeVersionMutation) ClearField(name string) error { + switch name { + case nodeversion.FieldChangelog: + m.ClearChangelog() + return nil + } + return fmt.Errorf("unknown NodeVersion nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *NodeVersionMutation) ResetField(name string) error { + switch name { + case nodeversion.FieldCreateTime: + m.ResetCreateTime() + return nil + case nodeversion.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case nodeversion.FieldNodeID: + m.ResetNodeID() + return nil + case nodeversion.FieldVersion: + m.ResetVersion() + return nil + case nodeversion.FieldChangelog: + m.ResetChangelog() + return nil + case nodeversion.FieldPipDependencies: + m.ResetPipDependencies() + return nil + case nodeversion.FieldDeprecated: + m.ResetDeprecated() + return nil + } + return fmt.Errorf("unknown NodeVersion field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *NodeVersionMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.node != nil { + edges = append(edges, nodeversion.EdgeNode) + } + if m.storage_file != nil { + edges = append(edges, nodeversion.EdgeStorageFile) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *NodeVersionMutation) AddedIDs(name string) []ent.Value { + switch name { + case nodeversion.EdgeNode: + if id := m.node; id != nil { + return []ent.Value{*id} + } + case nodeversion.EdgeStorageFile: + if id := m.storage_file; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *NodeVersionMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *NodeVersionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *NodeVersionMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearednode { + edges = append(edges, nodeversion.EdgeNode) + } + if m.clearedstorage_file { + edges = append(edges, nodeversion.EdgeStorageFile) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *NodeVersionMutation) EdgeCleared(name string) bool { + switch name { + case nodeversion.EdgeNode: + return m.clearednode + case nodeversion.EdgeStorageFile: + return m.clearedstorage_file + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *NodeVersionMutation) ClearEdge(name string) error { + switch name { + case nodeversion.EdgeNode: + m.ClearNode() + return nil + case nodeversion.EdgeStorageFile: + m.ClearStorageFile() + return nil + } + return fmt.Errorf("unknown NodeVersion unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *NodeVersionMutation) ResetEdge(name string) error { + switch name { + case nodeversion.EdgeNode: + m.ResetNode() + return nil + case nodeversion.EdgeStorageFile: + m.ResetStorageFile() + return nil + } + return fmt.Errorf("unknown NodeVersion edge %s", name) +} + +// PersonalAccessTokenMutation represents an operation that mutates the PersonalAccessToken nodes in the graph. +type PersonalAccessTokenMutation struct { + config + op Op + typ string + id *uuid.UUID + create_time *time.Time + update_time *time.Time + name *string + description *string + token *string + clearedFields map[string]struct{} + publisher *string + clearedpublisher bool + done bool + oldValue func(context.Context) (*PersonalAccessToken, error) + predicates []predicate.PersonalAccessToken +} + +var _ ent.Mutation = (*PersonalAccessTokenMutation)(nil) + +// personalaccesstokenOption allows management of the mutation configuration using functional options. +type personalaccesstokenOption func(*PersonalAccessTokenMutation) + +// newPersonalAccessTokenMutation creates new mutation for the PersonalAccessToken entity. +func newPersonalAccessTokenMutation(c config, op Op, opts ...personalaccesstokenOption) *PersonalAccessTokenMutation { + m := &PersonalAccessTokenMutation{ + config: c, + op: op, + typ: TypePersonalAccessToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPersonalAccessTokenID sets the ID field of the mutation. +func withPersonalAccessTokenID(id uuid.UUID) personalaccesstokenOption { + return func(m *PersonalAccessTokenMutation) { + var ( + err error + once sync.Once + value *PersonalAccessToken + ) + m.oldValue = func(ctx context.Context) (*PersonalAccessToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PersonalAccessToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPersonalAccessToken sets the old PersonalAccessToken of the mutation. +func withPersonalAccessToken(node *PersonalAccessToken) personalaccesstokenOption { + return func(m *PersonalAccessTokenMutation) { + m.oldValue = func(context.Context) (*PersonalAccessToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PersonalAccessTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PersonalAccessTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of PersonalAccessToken entities. +func (m *PersonalAccessTokenMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PersonalAccessTokenMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PersonalAccessTokenMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PersonalAccessToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *PersonalAccessTokenMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *PersonalAccessTokenMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *PersonalAccessTokenMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *PersonalAccessTokenMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *PersonalAccessTokenMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *PersonalAccessTokenMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetName sets the "name" field. +func (m *PersonalAccessTokenMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *PersonalAccessTokenMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *PersonalAccessTokenMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *PersonalAccessTokenMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *PersonalAccessTokenMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ResetDescription resets all changes to the "description" field. +func (m *PersonalAccessTokenMutation) ResetDescription() { + m.description = nil +} + +// SetPublisherID sets the "publisher_id" field. +func (m *PersonalAccessTokenMutation) SetPublisherID(s string) { + m.publisher = &s +} + +// PublisherID returns the value of the "publisher_id" field in the mutation. +func (m *PersonalAccessTokenMutation) PublisherID() (r string, exists bool) { + v := m.publisher + if v == nil { + return + } + return *v, true +} + +// OldPublisherID returns the old "publisher_id" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldPublisherID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublisherID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublisherID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublisherID: %w", err) + } + return oldValue.PublisherID, nil +} + +// ResetPublisherID resets all changes to the "publisher_id" field. +func (m *PersonalAccessTokenMutation) ResetPublisherID() { + m.publisher = nil +} + +// SetToken sets the "token" field. +func (m *PersonalAccessTokenMutation) SetToken(s string) { + m.token = &s +} + +// Token returns the value of the "token" field in the mutation. +func (m *PersonalAccessTokenMutation) Token() (r string, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the PersonalAccessToken entity. +// If the PersonalAccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PersonalAccessTokenMutation) OldToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ResetToken resets all changes to the "token" field. +func (m *PersonalAccessTokenMutation) ResetToken() { + m.token = nil +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (m *PersonalAccessTokenMutation) ClearPublisher() { + m.clearedpublisher = true + m.clearedFields[personalaccesstoken.FieldPublisherID] = struct{}{} +} + +// PublisherCleared reports if the "publisher" edge to the Publisher entity was cleared. +func (m *PersonalAccessTokenMutation) PublisherCleared() bool { + return m.clearedpublisher +} + +// PublisherIDs returns the "publisher" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PublisherID instead. It exists only for internal usage by the builders. +func (m *PersonalAccessTokenMutation) PublisherIDs() (ids []string) { + if id := m.publisher; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPublisher resets all changes to the "publisher" edge. +func (m *PersonalAccessTokenMutation) ResetPublisher() { + m.publisher = nil + m.clearedpublisher = false +} + +// Where appends a list predicates to the PersonalAccessTokenMutation builder. +func (m *PersonalAccessTokenMutation) Where(ps ...predicate.PersonalAccessToken) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PersonalAccessTokenMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PersonalAccessTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PersonalAccessToken, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PersonalAccessTokenMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PersonalAccessTokenMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PersonalAccessToken). +func (m *PersonalAccessTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PersonalAccessTokenMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.create_time != nil { + fields = append(fields, personalaccesstoken.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, personalaccesstoken.FieldUpdateTime) + } + if m.name != nil { + fields = append(fields, personalaccesstoken.FieldName) + } + if m.description != nil { + fields = append(fields, personalaccesstoken.FieldDescription) + } + if m.publisher != nil { + fields = append(fields, personalaccesstoken.FieldPublisherID) + } + if m.token != nil { + fields = append(fields, personalaccesstoken.FieldToken) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PersonalAccessTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case personalaccesstoken.FieldCreateTime: + return m.CreateTime() + case personalaccesstoken.FieldUpdateTime: + return m.UpdateTime() + case personalaccesstoken.FieldName: + return m.Name() + case personalaccesstoken.FieldDescription: + return m.Description() + case personalaccesstoken.FieldPublisherID: + return m.PublisherID() + case personalaccesstoken.FieldToken: + return m.Token() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PersonalAccessTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case personalaccesstoken.FieldCreateTime: + return m.OldCreateTime(ctx) + case personalaccesstoken.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case personalaccesstoken.FieldName: + return m.OldName(ctx) + case personalaccesstoken.FieldDescription: + return m.OldDescription(ctx) + case personalaccesstoken.FieldPublisherID: + return m.OldPublisherID(ctx) + case personalaccesstoken.FieldToken: + return m.OldToken(ctx) + } + return nil, fmt.Errorf("unknown PersonalAccessToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PersonalAccessTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case personalaccesstoken.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case personalaccesstoken.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case personalaccesstoken.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case personalaccesstoken.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case personalaccesstoken.FieldPublisherID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublisherID(v) + return nil + case personalaccesstoken.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + } + return fmt.Errorf("unknown PersonalAccessToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PersonalAccessTokenMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PersonalAccessTokenMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PersonalAccessTokenMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown PersonalAccessToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PersonalAccessTokenMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PersonalAccessTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PersonalAccessTokenMutation) ClearField(name string) error { + return fmt.Errorf("unknown PersonalAccessToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PersonalAccessTokenMutation) ResetField(name string) error { + switch name { + case personalaccesstoken.FieldCreateTime: + m.ResetCreateTime() + return nil + case personalaccesstoken.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case personalaccesstoken.FieldName: + m.ResetName() + return nil + case personalaccesstoken.FieldDescription: + m.ResetDescription() + return nil + case personalaccesstoken.FieldPublisherID: + m.ResetPublisherID() + return nil + case personalaccesstoken.FieldToken: + m.ResetToken() + return nil + } + return fmt.Errorf("unknown PersonalAccessToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PersonalAccessTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.publisher != nil { + edges = append(edges, personalaccesstoken.EdgePublisher) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PersonalAccessTokenMutation) AddedIDs(name string) []ent.Value { + switch name { + case personalaccesstoken.EdgePublisher: + if id := m.publisher; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PersonalAccessTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PersonalAccessTokenMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PersonalAccessTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedpublisher { + edges = append(edges, personalaccesstoken.EdgePublisher) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PersonalAccessTokenMutation) EdgeCleared(name string) bool { + switch name { + case personalaccesstoken.EdgePublisher: + return m.clearedpublisher + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PersonalAccessTokenMutation) ClearEdge(name string) error { + switch name { + case personalaccesstoken.EdgePublisher: + m.ClearPublisher() + return nil + } + return fmt.Errorf("unknown PersonalAccessToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PersonalAccessTokenMutation) ResetEdge(name string) error { + switch name { + case personalaccesstoken.EdgePublisher: + m.ResetPublisher() + return nil + } + return fmt.Errorf("unknown PersonalAccessToken edge %s", name) +} + +// PublisherMutation represents an operation that mutates the Publisher nodes in the graph. +type PublisherMutation struct { + config + op Op + typ string + id *string + create_time *time.Time + update_time *time.Time + name *string + description *string + website *string + support_email *string + source_code_repo *string + logo_url *string + clearedFields map[string]struct{} + publisher_permissions map[int]struct{} + removedpublisher_permissions map[int]struct{} + clearedpublisher_permissions bool + nodes map[string]struct{} + removednodes map[string]struct{} + clearednodes bool + personal_access_tokens map[uuid.UUID]struct{} + removedpersonal_access_tokens map[uuid.UUID]struct{} + clearedpersonal_access_tokens bool + done bool + oldValue func(context.Context) (*Publisher, error) + predicates []predicate.Publisher +} + +var _ ent.Mutation = (*PublisherMutation)(nil) + +// publisherOption allows management of the mutation configuration using functional options. +type publisherOption func(*PublisherMutation) + +// newPublisherMutation creates new mutation for the Publisher entity. +func newPublisherMutation(c config, op Op, opts ...publisherOption) *PublisherMutation { + m := &PublisherMutation{ + config: c, + op: op, + typ: TypePublisher, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPublisherID sets the ID field of the mutation. +func withPublisherID(id string) publisherOption { + return func(m *PublisherMutation) { + var ( + err error + once sync.Once + value *Publisher + ) + m.oldValue = func(ctx context.Context) (*Publisher, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Publisher.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPublisher sets the old Publisher of the mutation. +func withPublisher(node *Publisher) publisherOption { + return func(m *PublisherMutation) { + m.oldValue = func(context.Context) (*Publisher, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PublisherMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PublisherMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Publisher entities. +func (m *PublisherMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PublisherMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PublisherMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Publisher.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *PublisherMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *PublisherMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *PublisherMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *PublisherMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *PublisherMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *PublisherMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetName sets the "name" field. +func (m *PublisherMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *PublisherMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *PublisherMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *PublisherMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *PublisherMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *PublisherMutation) ClearDescription() { + m.description = nil + m.clearedFields[publisher.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *PublisherMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[publisher.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *PublisherMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, publisher.FieldDescription) +} + +// SetWebsite sets the "website" field. +func (m *PublisherMutation) SetWebsite(s string) { + m.website = &s +} + +// Website returns the value of the "website" field in the mutation. +func (m *PublisherMutation) Website() (r string, exists bool) { + v := m.website + if v == nil { + return + } + return *v, true +} + +// OldWebsite returns the old "website" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldWebsite(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWebsite is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWebsite requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWebsite: %w", err) + } + return oldValue.Website, nil +} + +// ClearWebsite clears the value of the "website" field. +func (m *PublisherMutation) ClearWebsite() { + m.website = nil + m.clearedFields[publisher.FieldWebsite] = struct{}{} +} + +// WebsiteCleared returns if the "website" field was cleared in this mutation. +func (m *PublisherMutation) WebsiteCleared() bool { + _, ok := m.clearedFields[publisher.FieldWebsite] + return ok +} + +// ResetWebsite resets all changes to the "website" field. +func (m *PublisherMutation) ResetWebsite() { + m.website = nil + delete(m.clearedFields, publisher.FieldWebsite) +} + +// SetSupportEmail sets the "support_email" field. +func (m *PublisherMutation) SetSupportEmail(s string) { + m.support_email = &s +} + +// SupportEmail returns the value of the "support_email" field in the mutation. +func (m *PublisherMutation) SupportEmail() (r string, exists bool) { + v := m.support_email + if v == nil { + return + } + return *v, true +} + +// OldSupportEmail returns the old "support_email" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldSupportEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSupportEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSupportEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSupportEmail: %w", err) + } + return oldValue.SupportEmail, nil +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (m *PublisherMutation) ClearSupportEmail() { + m.support_email = nil + m.clearedFields[publisher.FieldSupportEmail] = struct{}{} +} + +// SupportEmailCleared returns if the "support_email" field was cleared in this mutation. +func (m *PublisherMutation) SupportEmailCleared() bool { + _, ok := m.clearedFields[publisher.FieldSupportEmail] + return ok +} + +// ResetSupportEmail resets all changes to the "support_email" field. +func (m *PublisherMutation) ResetSupportEmail() { + m.support_email = nil + delete(m.clearedFields, publisher.FieldSupportEmail) +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (m *PublisherMutation) SetSourceCodeRepo(s string) { + m.source_code_repo = &s +} + +// SourceCodeRepo returns the value of the "source_code_repo" field in the mutation. +func (m *PublisherMutation) SourceCodeRepo() (r string, exists bool) { + v := m.source_code_repo + if v == nil { + return + } + return *v, true +} + +// OldSourceCodeRepo returns the old "source_code_repo" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldSourceCodeRepo(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceCodeRepo is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceCodeRepo requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceCodeRepo: %w", err) + } + return oldValue.SourceCodeRepo, nil +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (m *PublisherMutation) ClearSourceCodeRepo() { + m.source_code_repo = nil + m.clearedFields[publisher.FieldSourceCodeRepo] = struct{}{} +} + +// SourceCodeRepoCleared returns if the "source_code_repo" field was cleared in this mutation. +func (m *PublisherMutation) SourceCodeRepoCleared() bool { + _, ok := m.clearedFields[publisher.FieldSourceCodeRepo] + return ok +} + +// ResetSourceCodeRepo resets all changes to the "source_code_repo" field. +func (m *PublisherMutation) ResetSourceCodeRepo() { + m.source_code_repo = nil + delete(m.clearedFields, publisher.FieldSourceCodeRepo) +} + +// SetLogoURL sets the "logo_url" field. +func (m *PublisherMutation) SetLogoURL(s string) { + m.logo_url = &s +} + +// LogoURL returns the value of the "logo_url" field in the mutation. +func (m *PublisherMutation) LogoURL() (r string, exists bool) { + v := m.logo_url + if v == nil { + return + } + return *v, true +} + +// OldLogoURL returns the old "logo_url" field's value of the Publisher entity. +// If the Publisher object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherMutation) OldLogoURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLogoURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLogoURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLogoURL: %w", err) + } + return oldValue.LogoURL, nil +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (m *PublisherMutation) ClearLogoURL() { + m.logo_url = nil + m.clearedFields[publisher.FieldLogoURL] = struct{}{} +} + +// LogoURLCleared returns if the "logo_url" field was cleared in this mutation. +func (m *PublisherMutation) LogoURLCleared() bool { + _, ok := m.clearedFields[publisher.FieldLogoURL] + return ok +} + +// ResetLogoURL resets all changes to the "logo_url" field. +func (m *PublisherMutation) ResetLogoURL() { + m.logo_url = nil + delete(m.clearedFields, publisher.FieldLogoURL) +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by ids. +func (m *PublisherMutation) AddPublisherPermissionIDs(ids ...int) { + if m.publisher_permissions == nil { + m.publisher_permissions = make(map[int]struct{}) + } + for i := range ids { + m.publisher_permissions[ids[i]] = struct{}{} + } +} + +// ClearPublisherPermissions clears the "publisher_permissions" edge to the PublisherPermission entity. +func (m *PublisherMutation) ClearPublisherPermissions() { + m.clearedpublisher_permissions = true +} + +// PublisherPermissionsCleared reports if the "publisher_permissions" edge to the PublisherPermission entity was cleared. +func (m *PublisherMutation) PublisherPermissionsCleared() bool { + return m.clearedpublisher_permissions +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (m *PublisherMutation) RemovePublisherPermissionIDs(ids ...int) { + if m.removedpublisher_permissions == nil { + m.removedpublisher_permissions = make(map[int]struct{}) + } + for i := range ids { + delete(m.publisher_permissions, ids[i]) + m.removedpublisher_permissions[ids[i]] = struct{}{} + } +} + +// RemovedPublisherPermissions returns the removed IDs of the "publisher_permissions" edge to the PublisherPermission entity. +func (m *PublisherMutation) RemovedPublisherPermissionsIDs() (ids []int) { + for id := range m.removedpublisher_permissions { + ids = append(ids, id) + } + return +} + +// PublisherPermissionsIDs returns the "publisher_permissions" edge IDs in the mutation. +func (m *PublisherMutation) PublisherPermissionsIDs() (ids []int) { + for id := range m.publisher_permissions { + ids = append(ids, id) + } + return +} + +// ResetPublisherPermissions resets all changes to the "publisher_permissions" edge. +func (m *PublisherMutation) ResetPublisherPermissions() { + m.publisher_permissions = nil + m.clearedpublisher_permissions = false + m.removedpublisher_permissions = nil +} + +// AddNodeIDs adds the "nodes" edge to the Node entity by ids. +func (m *PublisherMutation) AddNodeIDs(ids ...string) { + if m.nodes == nil { + m.nodes = make(map[string]struct{}) + } + for i := range ids { + m.nodes[ids[i]] = struct{}{} + } +} + +// ClearNodes clears the "nodes" edge to the Node entity. +func (m *PublisherMutation) ClearNodes() { + m.clearednodes = true +} + +// NodesCleared reports if the "nodes" edge to the Node entity was cleared. +func (m *PublisherMutation) NodesCleared() bool { + return m.clearednodes +} + +// RemoveNodeIDs removes the "nodes" edge to the Node entity by IDs. +func (m *PublisherMutation) RemoveNodeIDs(ids ...string) { + if m.removednodes == nil { + m.removednodes = make(map[string]struct{}) + } + for i := range ids { + delete(m.nodes, ids[i]) + m.removednodes[ids[i]] = struct{}{} + } +} + +// RemovedNodes returns the removed IDs of the "nodes" edge to the Node entity. +func (m *PublisherMutation) RemovedNodesIDs() (ids []string) { + for id := range m.removednodes { + ids = append(ids, id) + } + return +} + +// NodesIDs returns the "nodes" edge IDs in the mutation. +func (m *PublisherMutation) NodesIDs() (ids []string) { + for id := range m.nodes { + ids = append(ids, id) + } + return +} + +// ResetNodes resets all changes to the "nodes" edge. +func (m *PublisherMutation) ResetNodes() { + m.nodes = nil + m.clearednodes = false + m.removednodes = nil +} + +// AddPersonalAccessTokenIDs adds the "personal_access_tokens" edge to the PersonalAccessToken entity by ids. +func (m *PublisherMutation) AddPersonalAccessTokenIDs(ids ...uuid.UUID) { + if m.personal_access_tokens == nil { + m.personal_access_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.personal_access_tokens[ids[i]] = struct{}{} + } +} + +// ClearPersonalAccessTokens clears the "personal_access_tokens" edge to the PersonalAccessToken entity. +func (m *PublisherMutation) ClearPersonalAccessTokens() { + m.clearedpersonal_access_tokens = true +} + +// PersonalAccessTokensCleared reports if the "personal_access_tokens" edge to the PersonalAccessToken entity was cleared. +func (m *PublisherMutation) PersonalAccessTokensCleared() bool { + return m.clearedpersonal_access_tokens +} + +// RemovePersonalAccessTokenIDs removes the "personal_access_tokens" edge to the PersonalAccessToken entity by IDs. +func (m *PublisherMutation) RemovePersonalAccessTokenIDs(ids ...uuid.UUID) { + if m.removedpersonal_access_tokens == nil { + m.removedpersonal_access_tokens = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.personal_access_tokens, ids[i]) + m.removedpersonal_access_tokens[ids[i]] = struct{}{} + } +} + +// RemovedPersonalAccessTokens returns the removed IDs of the "personal_access_tokens" edge to the PersonalAccessToken entity. +func (m *PublisherMutation) RemovedPersonalAccessTokensIDs() (ids []uuid.UUID) { + for id := range m.removedpersonal_access_tokens { + ids = append(ids, id) + } + return +} + +// PersonalAccessTokensIDs returns the "personal_access_tokens" edge IDs in the mutation. +func (m *PublisherMutation) PersonalAccessTokensIDs() (ids []uuid.UUID) { + for id := range m.personal_access_tokens { + ids = append(ids, id) + } + return +} + +// ResetPersonalAccessTokens resets all changes to the "personal_access_tokens" edge. +func (m *PublisherMutation) ResetPersonalAccessTokens() { + m.personal_access_tokens = nil + m.clearedpersonal_access_tokens = false + m.removedpersonal_access_tokens = nil +} + +// Where appends a list predicates to the PublisherMutation builder. +func (m *PublisherMutation) Where(ps ...predicate.Publisher) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PublisherMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PublisherMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Publisher, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PublisherMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PublisherMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Publisher). +func (m *PublisherMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PublisherMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.create_time != nil { + fields = append(fields, publisher.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, publisher.FieldUpdateTime) + } + if m.name != nil { + fields = append(fields, publisher.FieldName) + } + if m.description != nil { + fields = append(fields, publisher.FieldDescription) + } + if m.website != nil { + fields = append(fields, publisher.FieldWebsite) + } + if m.support_email != nil { + fields = append(fields, publisher.FieldSupportEmail) + } + if m.source_code_repo != nil { + fields = append(fields, publisher.FieldSourceCodeRepo) + } + if m.logo_url != nil { + fields = append(fields, publisher.FieldLogoURL) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PublisherMutation) Field(name string) (ent.Value, bool) { + switch name { + case publisher.FieldCreateTime: + return m.CreateTime() + case publisher.FieldUpdateTime: + return m.UpdateTime() + case publisher.FieldName: + return m.Name() + case publisher.FieldDescription: + return m.Description() + case publisher.FieldWebsite: + return m.Website() + case publisher.FieldSupportEmail: + return m.SupportEmail() + case publisher.FieldSourceCodeRepo: + return m.SourceCodeRepo() + case publisher.FieldLogoURL: + return m.LogoURL() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PublisherMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case publisher.FieldCreateTime: + return m.OldCreateTime(ctx) + case publisher.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case publisher.FieldName: + return m.OldName(ctx) + case publisher.FieldDescription: + return m.OldDescription(ctx) + case publisher.FieldWebsite: + return m.OldWebsite(ctx) + case publisher.FieldSupportEmail: + return m.OldSupportEmail(ctx) + case publisher.FieldSourceCodeRepo: + return m.OldSourceCodeRepo(ctx) + case publisher.FieldLogoURL: + return m.OldLogoURL(ctx) + } + return nil, fmt.Errorf("unknown Publisher field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PublisherMutation) SetField(name string, value ent.Value) error { + switch name { + case publisher.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case publisher.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case publisher.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case publisher.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case publisher.FieldWebsite: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWebsite(v) + return nil + case publisher.FieldSupportEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSupportEmail(v) + return nil + case publisher.FieldSourceCodeRepo: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceCodeRepo(v) + return nil + case publisher.FieldLogoURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLogoURL(v) + return nil + } + return fmt.Errorf("unknown Publisher field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PublisherMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PublisherMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PublisherMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Publisher numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PublisherMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(publisher.FieldDescription) { + fields = append(fields, publisher.FieldDescription) + } + if m.FieldCleared(publisher.FieldWebsite) { + fields = append(fields, publisher.FieldWebsite) + } + if m.FieldCleared(publisher.FieldSupportEmail) { + fields = append(fields, publisher.FieldSupportEmail) + } + if m.FieldCleared(publisher.FieldSourceCodeRepo) { + fields = append(fields, publisher.FieldSourceCodeRepo) + } + if m.FieldCleared(publisher.FieldLogoURL) { + fields = append(fields, publisher.FieldLogoURL) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PublisherMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PublisherMutation) ClearField(name string) error { + switch name { + case publisher.FieldDescription: + m.ClearDescription() + return nil + case publisher.FieldWebsite: + m.ClearWebsite() + return nil + case publisher.FieldSupportEmail: + m.ClearSupportEmail() + return nil + case publisher.FieldSourceCodeRepo: + m.ClearSourceCodeRepo() + return nil + case publisher.FieldLogoURL: + m.ClearLogoURL() + return nil + } + return fmt.Errorf("unknown Publisher nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PublisherMutation) ResetField(name string) error { + switch name { + case publisher.FieldCreateTime: + m.ResetCreateTime() + return nil + case publisher.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case publisher.FieldName: + m.ResetName() + return nil + case publisher.FieldDescription: + m.ResetDescription() + return nil + case publisher.FieldWebsite: + m.ResetWebsite() + return nil + case publisher.FieldSupportEmail: + m.ResetSupportEmail() + return nil + case publisher.FieldSourceCodeRepo: + m.ResetSourceCodeRepo() + return nil + case publisher.FieldLogoURL: + m.ResetLogoURL() + return nil + } + return fmt.Errorf("unknown Publisher field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PublisherMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.publisher_permissions != nil { + edges = append(edges, publisher.EdgePublisherPermissions) + } + if m.nodes != nil { + edges = append(edges, publisher.EdgeNodes) + } + if m.personal_access_tokens != nil { + edges = append(edges, publisher.EdgePersonalAccessTokens) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PublisherMutation) AddedIDs(name string) []ent.Value { + switch name { + case publisher.EdgePublisherPermissions: + ids := make([]ent.Value, 0, len(m.publisher_permissions)) + for id := range m.publisher_permissions { + ids = append(ids, id) + } + return ids + case publisher.EdgeNodes: + ids := make([]ent.Value, 0, len(m.nodes)) + for id := range m.nodes { + ids = append(ids, id) + } + return ids + case publisher.EdgePersonalAccessTokens: + ids := make([]ent.Value, 0, len(m.personal_access_tokens)) + for id := range m.personal_access_tokens { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PublisherMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedpublisher_permissions != nil { + edges = append(edges, publisher.EdgePublisherPermissions) + } + if m.removednodes != nil { + edges = append(edges, publisher.EdgeNodes) + } + if m.removedpersonal_access_tokens != nil { + edges = append(edges, publisher.EdgePersonalAccessTokens) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PublisherMutation) RemovedIDs(name string) []ent.Value { + switch name { + case publisher.EdgePublisherPermissions: + ids := make([]ent.Value, 0, len(m.removedpublisher_permissions)) + for id := range m.removedpublisher_permissions { + ids = append(ids, id) + } + return ids + case publisher.EdgeNodes: + ids := make([]ent.Value, 0, len(m.removednodes)) + for id := range m.removednodes { + ids = append(ids, id) + } + return ids + case publisher.EdgePersonalAccessTokens: + ids := make([]ent.Value, 0, len(m.removedpersonal_access_tokens)) + for id := range m.removedpersonal_access_tokens { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PublisherMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedpublisher_permissions { + edges = append(edges, publisher.EdgePublisherPermissions) + } + if m.clearednodes { + edges = append(edges, publisher.EdgeNodes) + } + if m.clearedpersonal_access_tokens { + edges = append(edges, publisher.EdgePersonalAccessTokens) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PublisherMutation) EdgeCleared(name string) bool { + switch name { + case publisher.EdgePublisherPermissions: + return m.clearedpublisher_permissions + case publisher.EdgeNodes: + return m.clearednodes + case publisher.EdgePersonalAccessTokens: + return m.clearedpersonal_access_tokens + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PublisherMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Publisher unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PublisherMutation) ResetEdge(name string) error { + switch name { + case publisher.EdgePublisherPermissions: + m.ResetPublisherPermissions() + return nil + case publisher.EdgeNodes: + m.ResetNodes() + return nil + case publisher.EdgePersonalAccessTokens: + m.ResetPersonalAccessTokens() + return nil + } + return fmt.Errorf("unknown Publisher edge %s", name) +} + +// PublisherPermissionMutation represents an operation that mutates the PublisherPermission nodes in the graph. +type PublisherPermissionMutation struct { + config + op Op + typ string + id *int + permission *schema.PublisherPermissionType + clearedFields map[string]struct{} + user *string + cleareduser bool + publisher *string + clearedpublisher bool + done bool + oldValue func(context.Context) (*PublisherPermission, error) + predicates []predicate.PublisherPermission +} + +var _ ent.Mutation = (*PublisherPermissionMutation)(nil) + +// publisherpermissionOption allows management of the mutation configuration using functional options. +type publisherpermissionOption func(*PublisherPermissionMutation) + +// newPublisherPermissionMutation creates new mutation for the PublisherPermission entity. +func newPublisherPermissionMutation(c config, op Op, opts ...publisherpermissionOption) *PublisherPermissionMutation { + m := &PublisherPermissionMutation{ + config: c, + op: op, + typ: TypePublisherPermission, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPublisherPermissionID sets the ID field of the mutation. +func withPublisherPermissionID(id int) publisherpermissionOption { + return func(m *PublisherPermissionMutation) { + var ( + err error + once sync.Once + value *PublisherPermission + ) + m.oldValue = func(ctx context.Context) (*PublisherPermission, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PublisherPermission.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPublisherPermission sets the old PublisherPermission of the mutation. +func withPublisherPermission(node *PublisherPermission) publisherpermissionOption { + return func(m *PublisherPermissionMutation) { + m.oldValue = func(context.Context) (*PublisherPermission, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PublisherPermissionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PublisherPermissionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PublisherPermissionMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PublisherPermissionMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PublisherPermission.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetPermission sets the "permission" field. +func (m *PublisherPermissionMutation) SetPermission(spt schema.PublisherPermissionType) { + m.permission = &spt +} + +// Permission returns the value of the "permission" field in the mutation. +func (m *PublisherPermissionMutation) Permission() (r schema.PublisherPermissionType, exists bool) { + v := m.permission + if v == nil { + return + } + return *v, true +} + +// OldPermission returns the old "permission" field's value of the PublisherPermission entity. +// If the PublisherPermission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherPermissionMutation) OldPermission(ctx context.Context) (v schema.PublisherPermissionType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPermission is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPermission requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPermission: %w", err) + } + return oldValue.Permission, nil +} + +// ResetPermission resets all changes to the "permission" field. +func (m *PublisherPermissionMutation) ResetPermission() { + m.permission = nil +} + +// SetUserID sets the "user_id" field. +func (m *PublisherPermissionMutation) SetUserID(s string) { + m.user = &s +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *PublisherPermissionMutation) UserID() (r string, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the PublisherPermission entity. +// If the PublisherPermission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherPermissionMutation) OldUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *PublisherPermissionMutation) ResetUserID() { + m.user = nil +} + +// SetPublisherID sets the "publisher_id" field. +func (m *PublisherPermissionMutation) SetPublisherID(s string) { + m.publisher = &s +} + +// PublisherID returns the value of the "publisher_id" field in the mutation. +func (m *PublisherPermissionMutation) PublisherID() (r string, exists bool) { + v := m.publisher + if v == nil { + return + } + return *v, true +} + +// OldPublisherID returns the old "publisher_id" field's value of the PublisherPermission entity. +// If the PublisherPermission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PublisherPermissionMutation) OldPublisherID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublisherID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublisherID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublisherID: %w", err) + } + return oldValue.PublisherID, nil +} + +// ResetPublisherID resets all changes to the "publisher_id" field. +func (m *PublisherPermissionMutation) ResetPublisherID() { + m.publisher = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *PublisherPermissionMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[publisherpermission.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *PublisherPermissionMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *PublisherPermissionMutation) UserIDs() (ids []string) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *PublisherPermissionMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (m *PublisherPermissionMutation) ClearPublisher() { + m.clearedpublisher = true + m.clearedFields[publisherpermission.FieldPublisherID] = struct{}{} +} + +// PublisherCleared reports if the "publisher" edge to the Publisher entity was cleared. +func (m *PublisherPermissionMutation) PublisherCleared() bool { + return m.clearedpublisher +} + +// PublisherIDs returns the "publisher" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PublisherID instead. It exists only for internal usage by the builders. +func (m *PublisherPermissionMutation) PublisherIDs() (ids []string) { + if id := m.publisher; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPublisher resets all changes to the "publisher" edge. +func (m *PublisherPermissionMutation) ResetPublisher() { + m.publisher = nil + m.clearedpublisher = false +} + +// Where appends a list predicates to the PublisherPermissionMutation builder. +func (m *PublisherPermissionMutation) Where(ps ...predicate.PublisherPermission) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PublisherPermissionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PublisherPermissionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PublisherPermission, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PublisherPermissionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PublisherPermissionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PublisherPermission). +func (m *PublisherPermissionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PublisherPermissionMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.permission != nil { + fields = append(fields, publisherpermission.FieldPermission) + } + if m.user != nil { + fields = append(fields, publisherpermission.FieldUserID) + } + if m.publisher != nil { + fields = append(fields, publisherpermission.FieldPublisherID) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PublisherPermissionMutation) Field(name string) (ent.Value, bool) { + switch name { + case publisherpermission.FieldPermission: + return m.Permission() + case publisherpermission.FieldUserID: + return m.UserID() + case publisherpermission.FieldPublisherID: + return m.PublisherID() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PublisherPermissionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case publisherpermission.FieldPermission: + return m.OldPermission(ctx) + case publisherpermission.FieldUserID: + return m.OldUserID(ctx) + case publisherpermission.FieldPublisherID: + return m.OldPublisherID(ctx) + } + return nil, fmt.Errorf("unknown PublisherPermission field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PublisherPermissionMutation) SetField(name string, value ent.Value) error { + switch name { + case publisherpermission.FieldPermission: + v, ok := value.(schema.PublisherPermissionType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPermission(v) + return nil + case publisherpermission.FieldUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case publisherpermission.FieldPublisherID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublisherID(v) + return nil + } + return fmt.Errorf("unknown PublisherPermission field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PublisherPermissionMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PublisherPermissionMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PublisherPermissionMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown PublisherPermission numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PublisherPermissionMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PublisherPermissionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PublisherPermissionMutation) ClearField(name string) error { + return fmt.Errorf("unknown PublisherPermission nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PublisherPermissionMutation) ResetField(name string) error { + switch name { + case publisherpermission.FieldPermission: + m.ResetPermission() + return nil + case publisherpermission.FieldUserID: + m.ResetUserID() + return nil + case publisherpermission.FieldPublisherID: + m.ResetPublisherID() + return nil + } + return fmt.Errorf("unknown PublisherPermission field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PublisherPermissionMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, publisherpermission.EdgeUser) + } + if m.publisher != nil { + edges = append(edges, publisherpermission.EdgePublisher) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PublisherPermissionMutation) AddedIDs(name string) []ent.Value { + switch name { + case publisherpermission.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case publisherpermission.EdgePublisher: + if id := m.publisher; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PublisherPermissionMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PublisherPermissionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PublisherPermissionMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, publisherpermission.EdgeUser) + } + if m.clearedpublisher { + edges = append(edges, publisherpermission.EdgePublisher) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PublisherPermissionMutation) EdgeCleared(name string) bool { + switch name { + case publisherpermission.EdgeUser: + return m.cleareduser + case publisherpermission.EdgePublisher: + return m.clearedpublisher + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PublisherPermissionMutation) ClearEdge(name string) error { + switch name { + case publisherpermission.EdgeUser: + m.ClearUser() + return nil + case publisherpermission.EdgePublisher: + m.ClearPublisher() + return nil + } + return fmt.Errorf("unknown PublisherPermission unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PublisherPermissionMutation) ResetEdge(name string) error { + switch name { + case publisherpermission.EdgeUser: + m.ResetUser() + return nil + case publisherpermission.EdgePublisher: + m.ResetPublisher() + return nil + } + return fmt.Errorf("unknown PublisherPermission edge %s", name) +} + +// StorageFileMutation represents an operation that mutates the StorageFile nodes in the graph. +type StorageFileMutation struct { + config + op Op + typ string + id *uuid.UUID + create_time *time.Time + update_time *time.Time + bucket_name *string + object_name *string + file_path *string + file_type *string + file_url *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*StorageFile, error) + predicates []predicate.StorageFile +} + +var _ ent.Mutation = (*StorageFileMutation)(nil) + +// storagefileOption allows management of the mutation configuration using functional options. +type storagefileOption func(*StorageFileMutation) + +// newStorageFileMutation creates new mutation for the StorageFile entity. +func newStorageFileMutation(c config, op Op, opts ...storagefileOption) *StorageFileMutation { + m := &StorageFileMutation{ + config: c, + op: op, + typ: TypeStorageFile, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withStorageFileID sets the ID field of the mutation. +func withStorageFileID(id uuid.UUID) storagefileOption { + return func(m *StorageFileMutation) { + var ( + err error + once sync.Once + value *StorageFile + ) + m.oldValue = func(ctx context.Context) (*StorageFile, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().StorageFile.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withStorageFile sets the old StorageFile of the mutation. +func withStorageFile(node *StorageFile) storagefileOption { + return func(m *StorageFileMutation) { + m.oldValue = func(context.Context) (*StorageFile, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m StorageFileMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m StorageFileMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of StorageFile entities. +func (m *StorageFileMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *StorageFileMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *StorageFileMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().StorageFile.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *StorageFileMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *StorageFileMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *StorageFileMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *StorageFileMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *StorageFileMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *StorageFileMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetBucketName sets the "bucket_name" field. +func (m *StorageFileMutation) SetBucketName(s string) { + m.bucket_name = &s +} + +// BucketName returns the value of the "bucket_name" field in the mutation. +func (m *StorageFileMutation) BucketName() (r string, exists bool) { + v := m.bucket_name + if v == nil { + return + } + return *v, true +} + +// OldBucketName returns the old "bucket_name" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldBucketName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBucketName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBucketName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBucketName: %w", err) + } + return oldValue.BucketName, nil +} + +// ResetBucketName resets all changes to the "bucket_name" field. +func (m *StorageFileMutation) ResetBucketName() { + m.bucket_name = nil +} + +// SetObjectName sets the "object_name" field. +func (m *StorageFileMutation) SetObjectName(s string) { + m.object_name = &s +} + +// ObjectName returns the value of the "object_name" field in the mutation. +func (m *StorageFileMutation) ObjectName() (r string, exists bool) { + v := m.object_name + if v == nil { + return + } + return *v, true +} + +// OldObjectName returns the old "object_name" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldObjectName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldObjectName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldObjectName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldObjectName: %w", err) + } + return oldValue.ObjectName, nil +} + +// ClearObjectName clears the value of the "object_name" field. +func (m *StorageFileMutation) ClearObjectName() { + m.object_name = nil + m.clearedFields[storagefile.FieldObjectName] = struct{}{} +} + +// ObjectNameCleared returns if the "object_name" field was cleared in this mutation. +func (m *StorageFileMutation) ObjectNameCleared() bool { + _, ok := m.clearedFields[storagefile.FieldObjectName] + return ok +} + +// ResetObjectName resets all changes to the "object_name" field. +func (m *StorageFileMutation) ResetObjectName() { + m.object_name = nil + delete(m.clearedFields, storagefile.FieldObjectName) +} + +// SetFilePath sets the "file_path" field. +func (m *StorageFileMutation) SetFilePath(s string) { + m.file_path = &s +} + +// FilePath returns the value of the "file_path" field in the mutation. +func (m *StorageFileMutation) FilePath() (r string, exists bool) { + v := m.file_path + if v == nil { + return + } + return *v, true +} + +// OldFilePath returns the old "file_path" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldFilePath(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFilePath is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFilePath requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFilePath: %w", err) + } + return oldValue.FilePath, nil +} + +// ResetFilePath resets all changes to the "file_path" field. +func (m *StorageFileMutation) ResetFilePath() { + m.file_path = nil +} + +// SetFileType sets the "file_type" field. +func (m *StorageFileMutation) SetFileType(s string) { + m.file_type = &s +} + +// FileType returns the value of the "file_type" field in the mutation. +func (m *StorageFileMutation) FileType() (r string, exists bool) { + v := m.file_type + if v == nil { + return + } + return *v, true +} + +// OldFileType returns the old "file_type" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldFileType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFileType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFileType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFileType: %w", err) + } + return oldValue.FileType, nil +} + +// ResetFileType resets all changes to the "file_type" field. +func (m *StorageFileMutation) ResetFileType() { + m.file_type = nil +} + +// SetFileURL sets the "file_url" field. +func (m *StorageFileMutation) SetFileURL(s string) { + m.file_url = &s +} + +// FileURL returns the value of the "file_url" field in the mutation. +func (m *StorageFileMutation) FileURL() (r string, exists bool) { + v := m.file_url + if v == nil { + return + } + return *v, true +} + +// OldFileURL returns the old "file_url" field's value of the StorageFile entity. +// If the StorageFile object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *StorageFileMutation) OldFileURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFileURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFileURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFileURL: %w", err) + } + return oldValue.FileURL, nil +} + +// ClearFileURL clears the value of the "file_url" field. +func (m *StorageFileMutation) ClearFileURL() { + m.file_url = nil + m.clearedFields[storagefile.FieldFileURL] = struct{}{} +} + +// FileURLCleared returns if the "file_url" field was cleared in this mutation. +func (m *StorageFileMutation) FileURLCleared() bool { + _, ok := m.clearedFields[storagefile.FieldFileURL] + return ok +} + +// ResetFileURL resets all changes to the "file_url" field. +func (m *StorageFileMutation) ResetFileURL() { + m.file_url = nil + delete(m.clearedFields, storagefile.FieldFileURL) +} + +// Where appends a list predicates to the StorageFileMutation builder. +func (m *StorageFileMutation) Where(ps ...predicate.StorageFile) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the StorageFileMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *StorageFileMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.StorageFile, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *StorageFileMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *StorageFileMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (StorageFile). +func (m *StorageFileMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *StorageFileMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.create_time != nil { + fields = append(fields, storagefile.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, storagefile.FieldUpdateTime) + } + if m.bucket_name != nil { + fields = append(fields, storagefile.FieldBucketName) + } + if m.object_name != nil { + fields = append(fields, storagefile.FieldObjectName) + } + if m.file_path != nil { + fields = append(fields, storagefile.FieldFilePath) + } + if m.file_type != nil { + fields = append(fields, storagefile.FieldFileType) + } + if m.file_url != nil { + fields = append(fields, storagefile.FieldFileURL) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *StorageFileMutation) Field(name string) (ent.Value, bool) { + switch name { + case storagefile.FieldCreateTime: + return m.CreateTime() + case storagefile.FieldUpdateTime: + return m.UpdateTime() + case storagefile.FieldBucketName: + return m.BucketName() + case storagefile.FieldObjectName: + return m.ObjectName() + case storagefile.FieldFilePath: + return m.FilePath() + case storagefile.FieldFileType: + return m.FileType() + case storagefile.FieldFileURL: + return m.FileURL() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *StorageFileMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case storagefile.FieldCreateTime: + return m.OldCreateTime(ctx) + case storagefile.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case storagefile.FieldBucketName: + return m.OldBucketName(ctx) + case storagefile.FieldObjectName: + return m.OldObjectName(ctx) + case storagefile.FieldFilePath: + return m.OldFilePath(ctx) + case storagefile.FieldFileType: + return m.OldFileType(ctx) + case storagefile.FieldFileURL: + return m.OldFileURL(ctx) + } + return nil, fmt.Errorf("unknown StorageFile field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *StorageFileMutation) SetField(name string, value ent.Value) error { + switch name { + case storagefile.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case storagefile.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case storagefile.FieldBucketName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBucketName(v) + return nil + case storagefile.FieldObjectName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetObjectName(v) + return nil + case storagefile.FieldFilePath: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFilePath(v) + return nil + case storagefile.FieldFileType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFileType(v) + return nil + case storagefile.FieldFileURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFileURL(v) + return nil + } + return fmt.Errorf("unknown StorageFile field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *StorageFileMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *StorageFileMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *StorageFileMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown StorageFile numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *StorageFileMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(storagefile.FieldObjectName) { + fields = append(fields, storagefile.FieldObjectName) + } + if m.FieldCleared(storagefile.FieldFileURL) { + fields = append(fields, storagefile.FieldFileURL) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *StorageFileMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *StorageFileMutation) ClearField(name string) error { + switch name { + case storagefile.FieldObjectName: + m.ClearObjectName() + return nil + case storagefile.FieldFileURL: + m.ClearFileURL() + return nil + } + return fmt.Errorf("unknown StorageFile nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *StorageFileMutation) ResetField(name string) error { + switch name { + case storagefile.FieldCreateTime: + m.ResetCreateTime() + return nil + case storagefile.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case storagefile.FieldBucketName: + m.ResetBucketName() + return nil + case storagefile.FieldObjectName: + m.ResetObjectName() + return nil + case storagefile.FieldFilePath: + m.ResetFilePath() + return nil + case storagefile.FieldFileType: + m.ResetFileType() + return nil + case storagefile.FieldFileURL: + m.ResetFileURL() + return nil + } + return fmt.Errorf("unknown StorageFile field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *StorageFileMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *StorageFileMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *StorageFileMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *StorageFileMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *StorageFileMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *StorageFileMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *StorageFileMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown StorageFile unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *StorageFileMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown StorageFile edge %s", name) +} + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *string + create_time *time.Time + update_time *time.Time + email *string + name *string + is_approved *bool + is_admin *bool + clearedFields map[string]struct{} + publisher_permissions map[int]struct{} + removedpublisher_permissions map[int]struct{} + clearedpublisher_permissions bool + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id string) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of User entities. +func (m *UserMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreateTime sets the "create_time" field. +func (m *UserMutation) SetCreateTime(t time.Time) { + m.create_time = &t +} + +// CreateTime returns the value of the "create_time" field in the mutation. +func (m *UserMutation) CreateTime() (r time.Time, exists bool) { + v := m.create_time + if v == nil { + return + } + return *v, true +} + +// OldCreateTime returns the old "create_time" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreateTime: %w", err) + } + return oldValue.CreateTime, nil +} + +// ResetCreateTime resets all changes to the "create_time" field. +func (m *UserMutation) ResetCreateTime() { + m.create_time = nil +} + +// SetUpdateTime sets the "update_time" field. +func (m *UserMutation) SetUpdateTime(t time.Time) { + m.update_time = &t +} + +// UpdateTime returns the value of the "update_time" field in the mutation. +func (m *UserMutation) UpdateTime() (r time.Time, exists bool) { + v := m.update_time + if v == nil { + return + } + return *v, true +} + +// OldUpdateTime returns the old "update_time" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdateTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdateTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdateTime: %w", err) + } + return oldValue.UpdateTime, nil +} + +// ResetUpdateTime resets all changes to the "update_time" field. +func (m *UserMutation) ResetUpdateTime() { + m.update_time = nil +} + +// SetEmail sets the "email" field. +func (m *UserMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *UserMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ClearEmail clears the value of the "email" field. +func (m *UserMutation) ClearEmail() { + m.email = nil + m.clearedFields[user.FieldEmail] = struct{}{} +} + +// EmailCleared returns if the "email" field was cleared in this mutation. +func (m *UserMutation) EmailCleared() bool { + _, ok := m.clearedFields[user.FieldEmail] + return ok +} + +// ResetEmail resets all changes to the "email" field. +func (m *UserMutation) ResetEmail() { + m.email = nil + delete(m.clearedFields, user.FieldEmail) +} + +// SetName sets the "name" field. +func (m *UserMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *UserMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ClearName clears the value of the "name" field. +func (m *UserMutation) ClearName() { + m.name = nil + m.clearedFields[user.FieldName] = struct{}{} +} + +// NameCleared returns if the "name" field was cleared in this mutation. +func (m *UserMutation) NameCleared() bool { + _, ok := m.clearedFields[user.FieldName] + return ok +} + +// ResetName resets all changes to the "name" field. +func (m *UserMutation) ResetName() { + m.name = nil + delete(m.clearedFields, user.FieldName) +} + +// SetIsApproved sets the "is_approved" field. +func (m *UserMutation) SetIsApproved(b bool) { + m.is_approved = &b +} + +// IsApproved returns the value of the "is_approved" field in the mutation. +func (m *UserMutation) IsApproved() (r bool, exists bool) { + v := m.is_approved + if v == nil { + return + } + return *v, true +} + +// OldIsApproved returns the old "is_approved" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldIsApproved(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsApproved is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsApproved requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsApproved: %w", err) + } + return oldValue.IsApproved, nil +} + +// ResetIsApproved resets all changes to the "is_approved" field. +func (m *UserMutation) ResetIsApproved() { + m.is_approved = nil +} + +// SetIsAdmin sets the "is_admin" field. +func (m *UserMutation) SetIsAdmin(b bool) { + m.is_admin = &b +} + +// IsAdmin returns the value of the "is_admin" field in the mutation. +func (m *UserMutation) IsAdmin() (r bool, exists bool) { + v := m.is_admin + if v == nil { + return + } + return *v, true +} + +// OldIsAdmin returns the old "is_admin" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldIsAdmin(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsAdmin is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsAdmin requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsAdmin: %w", err) + } + return oldValue.IsAdmin, nil +} + +// ResetIsAdmin resets all changes to the "is_admin" field. +func (m *UserMutation) ResetIsAdmin() { + m.is_admin = nil +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by ids. +func (m *UserMutation) AddPublisherPermissionIDs(ids ...int) { + if m.publisher_permissions == nil { + m.publisher_permissions = make(map[int]struct{}) + } + for i := range ids { + m.publisher_permissions[ids[i]] = struct{}{} + } +} + +// ClearPublisherPermissions clears the "publisher_permissions" edge to the PublisherPermission entity. +func (m *UserMutation) ClearPublisherPermissions() { + m.clearedpublisher_permissions = true +} + +// PublisherPermissionsCleared reports if the "publisher_permissions" edge to the PublisherPermission entity was cleared. +func (m *UserMutation) PublisherPermissionsCleared() bool { + return m.clearedpublisher_permissions +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (m *UserMutation) RemovePublisherPermissionIDs(ids ...int) { + if m.removedpublisher_permissions == nil { + m.removedpublisher_permissions = make(map[int]struct{}) + } + for i := range ids { + delete(m.publisher_permissions, ids[i]) + m.removedpublisher_permissions[ids[i]] = struct{}{} + } +} + +// RemovedPublisherPermissions returns the removed IDs of the "publisher_permissions" edge to the PublisherPermission entity. +func (m *UserMutation) RemovedPublisherPermissionsIDs() (ids []int) { + for id := range m.removedpublisher_permissions { + ids = append(ids, id) + } + return +} + +// PublisherPermissionsIDs returns the "publisher_permissions" edge IDs in the mutation. +func (m *UserMutation) PublisherPermissionsIDs() (ids []int) { + for id := range m.publisher_permissions { + ids = append(ids, id) + } + return +} + +// ResetPublisherPermissions resets all changes to the "publisher_permissions" edge. +func (m *UserMutation) ResetPublisherPermissions() { + m.publisher_permissions = nil + m.clearedpublisher_permissions = false + m.removedpublisher_permissions = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.create_time != nil { + fields = append(fields, user.FieldCreateTime) + } + if m.update_time != nil { + fields = append(fields, user.FieldUpdateTime) + } + if m.email != nil { + fields = append(fields, user.FieldEmail) + } + if m.name != nil { + fields = append(fields, user.FieldName) + } + if m.is_approved != nil { + fields = append(fields, user.FieldIsApproved) + } + if m.is_admin != nil { + fields = append(fields, user.FieldIsAdmin) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldCreateTime: + return m.CreateTime() + case user.FieldUpdateTime: + return m.UpdateTime() + case user.FieldEmail: + return m.Email() + case user.FieldName: + return m.Name() + case user.FieldIsApproved: + return m.IsApproved() + case user.FieldIsAdmin: + return m.IsAdmin() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldCreateTime: + return m.OldCreateTime(ctx) + case user.FieldUpdateTime: + return m.OldUpdateTime(ctx) + case user.FieldEmail: + return m.OldEmail(ctx) + case user.FieldName: + return m.OldName(ctx) + case user.FieldIsApproved: + return m.OldIsApproved(ctx) + case user.FieldIsAdmin: + return m.OldIsAdmin(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldCreateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreateTime(v) + return nil + case user.FieldUpdateTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdateTime(v) + return nil + case user.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case user.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case user.FieldIsApproved: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsApproved(v) + return nil + case user.FieldIsAdmin: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsAdmin(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(user.FieldEmail) { + fields = append(fields, user.FieldEmail) + } + if m.FieldCleared(user.FieldName) { + fields = append(fields, user.FieldName) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + switch name { + case user.FieldEmail: + m.ClearEmail() + return nil + case user.FieldName: + m.ClearName() + return nil + } + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldCreateTime: + m.ResetCreateTime() + return nil + case user.FieldUpdateTime: + m.ResetUpdateTime() + return nil + case user.FieldEmail: + m.ResetEmail() + return nil + case user.FieldName: + m.ResetName() + return nil + case user.FieldIsApproved: + m.ResetIsApproved() + return nil + case user.FieldIsAdmin: + m.ResetIsAdmin() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.publisher_permissions != nil { + edges = append(edges, user.EdgePublisherPermissions) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + switch name { + case user.EdgePublisherPermissions: + ids := make([]ent.Value, 0, len(m.publisher_permissions)) + for id := range m.publisher_permissions { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedpublisher_permissions != nil { + edges = append(edges, user.EdgePublisherPermissions) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + switch name { + case user.EdgePublisherPermissions: + ids := make([]ent.Value, 0, len(m.removedpublisher_permissions)) + for id := range m.removedpublisher_permissions { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedpublisher_permissions { + edges = append(edges, user.EdgePublisherPermissions) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + switch name { + case user.EdgePublisherPermissions: + return m.clearedpublisher_permissions + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + switch name { + case user.EdgePublisherPermissions: + m.ResetPublisherPermissions() + return nil + } + return fmt.Errorf("unknown User edge %s", name) +} diff --git a/ent/node.go b/ent/node.go new file mode 100644 index 0000000..00f0a94 --- /dev/null +++ b/ent/node.go @@ -0,0 +1,253 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/publisher" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Node is the model entity for the Node schema. +type Node struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // PublisherID holds the value of the "publisher_id" field. + PublisherID string `json:"publisher_id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Author holds the value of the "author" field. + Author string `json:"author,omitempty"` + // License holds the value of the "license" field. + License string `json:"license,omitempty"` + // RepositoryURL holds the value of the "repository_url" field. + RepositoryURL string `json:"repository_url,omitempty"` + // IconURL holds the value of the "icon_url" field. + IconURL string `json:"icon_url,omitempty"` + // Tags holds the value of the "tags" field. + Tags []string `json:"tags,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the NodeQuery when eager-loading is set. + Edges NodeEdges `json:"edges"` + selectValues sql.SelectValues +} + +// NodeEdges holds the relations/edges for other nodes in the graph. +type NodeEdges struct { + // Publisher holds the value of the publisher edge. + Publisher *Publisher `json:"publisher,omitempty"` + // Versions holds the value of the versions edge. + Versions []*NodeVersion `json:"versions,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// PublisherOrErr returns the Publisher value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NodeEdges) PublisherOrErr() (*Publisher, error) { + if e.Publisher != nil { + return e.Publisher, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: publisher.Label} + } + return nil, &NotLoadedError{edge: "publisher"} +} + +// VersionsOrErr returns the Versions value or an error if the edge +// was not loaded in eager-loading. +func (e NodeEdges) VersionsOrErr() ([]*NodeVersion, error) { + if e.loadedTypes[1] { + return e.Versions, nil + } + return nil, &NotLoadedError{edge: "versions"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Node) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case node.FieldTags: + values[i] = new([]byte) + case node.FieldID, node.FieldPublisherID, node.FieldName, node.FieldDescription, node.FieldAuthor, node.FieldLicense, node.FieldRepositoryURL, node.FieldIconURL: + values[i] = new(sql.NullString) + case node.FieldCreateTime, node.FieldUpdateTime: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Node fields. +func (n *Node) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case node.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + n.ID = value.String + } + case node.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + n.CreateTime = value.Time + } + case node.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + n.UpdateTime = value.Time + } + case node.FieldPublisherID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field publisher_id", values[i]) + } else if value.Valid { + n.PublisherID = value.String + } + case node.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + n.Name = value.String + } + case node.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + n.Description = value.String + } + case node.FieldAuthor: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field author", values[i]) + } else if value.Valid { + n.Author = value.String + } + case node.FieldLicense: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field license", values[i]) + } else if value.Valid { + n.License = value.String + } + case node.FieldRepositoryURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field repository_url", values[i]) + } else if value.Valid { + n.RepositoryURL = value.String + } + case node.FieldIconURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field icon_url", values[i]) + } else if value.Valid { + n.IconURL = value.String + } + case node.FieldTags: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field tags", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &n.Tags); err != nil { + return fmt.Errorf("unmarshal field tags: %w", err) + } + } + default: + n.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Node. +// This includes values selected through modifiers, order, etc. +func (n *Node) Value(name string) (ent.Value, error) { + return n.selectValues.Get(name) +} + +// QueryPublisher queries the "publisher" edge of the Node entity. +func (n *Node) QueryPublisher() *PublisherQuery { + return NewNodeClient(n.config).QueryPublisher(n) +} + +// QueryVersions queries the "versions" edge of the Node entity. +func (n *Node) QueryVersions() *NodeVersionQuery { + return NewNodeClient(n.config).QueryVersions(n) +} + +// Update returns a builder for updating this Node. +// Note that you need to call Node.Unwrap() before calling this method if this Node +// was returned from a transaction, and the transaction was committed or rolled back. +func (n *Node) Update() *NodeUpdateOne { + return NewNodeClient(n.config).UpdateOne(n) +} + +// Unwrap unwraps the Node entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (n *Node) Unwrap() *Node { + _tx, ok := n.config.driver.(*txDriver) + if !ok { + panic("ent: Node is not a transactional entity") + } + n.config.driver = _tx.drv + return n +} + +// String implements the fmt.Stringer. +func (n *Node) String() string { + var builder strings.Builder + builder.WriteString("Node(") + builder.WriteString(fmt.Sprintf("id=%v, ", n.ID)) + builder.WriteString("create_time=") + builder.WriteString(n.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(n.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("publisher_id=") + builder.WriteString(n.PublisherID) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(n.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(n.Description) + builder.WriteString(", ") + builder.WriteString("author=") + builder.WriteString(n.Author) + builder.WriteString(", ") + builder.WriteString("license=") + builder.WriteString(n.License) + builder.WriteString(", ") + builder.WriteString("repository_url=") + builder.WriteString(n.RepositoryURL) + builder.WriteString(", ") + builder.WriteString("icon_url=") + builder.WriteString(n.IconURL) + builder.WriteString(", ") + builder.WriteString("tags=") + builder.WriteString(fmt.Sprintf("%v", n.Tags)) + builder.WriteByte(')') + return builder.String() +} + +// Nodes is a parsable slice of Node. +type Nodes []*Node diff --git a/ent/node/node.go b/ent/node/node.go new file mode 100644 index 0000000..2fe6325 --- /dev/null +++ b/ent/node/node.go @@ -0,0 +1,181 @@ +// Code generated by ent, DO NOT EDIT. + +package node + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the node type in the database. + Label = "node" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldPublisherID holds the string denoting the publisher_id field in the database. + FieldPublisherID = "publisher_id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldAuthor holds the string denoting the author field in the database. + FieldAuthor = "author" + // FieldLicense holds the string denoting the license field in the database. + FieldLicense = "license" + // FieldRepositoryURL holds the string denoting the repository_url field in the database. + FieldRepositoryURL = "repository_url" + // FieldIconURL holds the string denoting the icon_url field in the database. + FieldIconURL = "icon_url" + // FieldTags holds the string denoting the tags field in the database. + FieldTags = "tags" + // EdgePublisher holds the string denoting the publisher edge name in mutations. + EdgePublisher = "publisher" + // EdgeVersions holds the string denoting the versions edge name in mutations. + EdgeVersions = "versions" + // Table holds the table name of the node in the database. + Table = "nodes" + // PublisherTable is the table that holds the publisher relation/edge. + PublisherTable = "nodes" + // PublisherInverseTable is the table name for the Publisher entity. + // It exists in this package in order to avoid circular dependency with the "publisher" package. + PublisherInverseTable = "publishers" + // PublisherColumn is the table column denoting the publisher relation/edge. + PublisherColumn = "publisher_id" + // VersionsTable is the table that holds the versions relation/edge. + VersionsTable = "node_versions" + // VersionsInverseTable is the table name for the NodeVersion entity. + // It exists in this package in order to avoid circular dependency with the "nodeversion" package. + VersionsInverseTable = "node_versions" + // VersionsColumn is the table column denoting the versions relation/edge. + VersionsColumn = "node_id" +) + +// Columns holds all SQL columns for node fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldPublisherID, + FieldName, + FieldDescription, + FieldAuthor, + FieldLicense, + FieldRepositoryURL, + FieldIconURL, + FieldTags, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultTags holds the default value on creation for the "tags" field. + DefaultTags []string +) + +// OrderOption defines the ordering options for the Node queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByPublisherID orders the results by the publisher_id field. +func ByPublisherID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPublisherID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByAuthor orders the results by the author field. +func ByAuthor(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthor, opts...).ToFunc() +} + +// ByLicense orders the results by the license field. +func ByLicense(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLicense, opts...).ToFunc() +} + +// ByRepositoryURL orders the results by the repository_url field. +func ByRepositoryURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRepositoryURL, opts...).ToFunc() +} + +// ByIconURL orders the results by the icon_url field. +func ByIconURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIconURL, opts...).ToFunc() +} + +// ByPublisherField orders the results by publisher field. +func ByPublisherField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPublisherStep(), sql.OrderByField(field, opts...)) + } +} + +// ByVersionsCount orders the results by versions count. +func ByVersionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newVersionsStep(), opts...) + } +} + +// ByVersions orders the results by versions terms. +func ByVersions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newVersionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPublisherStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PublisherInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) +} +func newVersionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(VersionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, VersionsTable, VersionsColumn), + ) +} diff --git a/ent/node/where.go b/ent/node/where.go new file mode 100644 index 0000000..6260d08 --- /dev/null +++ b/ent/node/where.go @@ -0,0 +1,737 @@ +// Code generated by ent, DO NOT EDIT. + +package node + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldUpdateTime, v)) +} + +// PublisherID applies equality check predicate on the "publisher_id" field. It's identical to PublisherIDEQ. +func PublisherID(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldPublisherID, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldDescription, v)) +} + +// Author applies equality check predicate on the "author" field. It's identical to AuthorEQ. +func Author(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldAuthor, v)) +} + +// License applies equality check predicate on the "license" field. It's identical to LicenseEQ. +func License(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldLicense, v)) +} + +// RepositoryURL applies equality check predicate on the "repository_url" field. It's identical to RepositoryURLEQ. +func RepositoryURL(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldRepositoryURL, v)) +} + +// IconURL applies equality check predicate on the "icon_url" field. It's identical to IconURLEQ. +func IconURL(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldIconURL, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.Node { + return predicate.Node(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.Node { + return predicate.Node(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.Node { + return predicate.Node(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.Node { + return predicate.Node(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.Node { + return predicate.Node(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.Node { + return predicate.Node(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldUpdateTime, v)) +} + +// PublisherIDEQ applies the EQ predicate on the "publisher_id" field. +func PublisherIDEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldPublisherID, v)) +} + +// PublisherIDNEQ applies the NEQ predicate on the "publisher_id" field. +func PublisherIDNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldPublisherID, v)) +} + +// PublisherIDIn applies the In predicate on the "publisher_id" field. +func PublisherIDIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldPublisherID, vs...)) +} + +// PublisherIDNotIn applies the NotIn predicate on the "publisher_id" field. +func PublisherIDNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldPublisherID, vs...)) +} + +// PublisherIDGT applies the GT predicate on the "publisher_id" field. +func PublisherIDGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldPublisherID, v)) +} + +// PublisherIDGTE applies the GTE predicate on the "publisher_id" field. +func PublisherIDGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldPublisherID, v)) +} + +// PublisherIDLT applies the LT predicate on the "publisher_id" field. +func PublisherIDLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldPublisherID, v)) +} + +// PublisherIDLTE applies the LTE predicate on the "publisher_id" field. +func PublisherIDLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldPublisherID, v)) +} + +// PublisherIDContains applies the Contains predicate on the "publisher_id" field. +func PublisherIDContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldPublisherID, v)) +} + +// PublisherIDHasPrefix applies the HasPrefix predicate on the "publisher_id" field. +func PublisherIDHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldPublisherID, v)) +} + +// PublisherIDHasSuffix applies the HasSuffix predicate on the "publisher_id" field. +func PublisherIDHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldPublisherID, v)) +} + +// PublisherIDEqualFold applies the EqualFold predicate on the "publisher_id" field. +func PublisherIDEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldPublisherID, v)) +} + +// PublisherIDContainsFold applies the ContainsFold predicate on the "publisher_id" field. +func PublisherIDContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldPublisherID, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Node { + return predicate.Node(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Node { + return predicate.Node(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldDescription, v)) +} + +// AuthorEQ applies the EQ predicate on the "author" field. +func AuthorEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldAuthor, v)) +} + +// AuthorNEQ applies the NEQ predicate on the "author" field. +func AuthorNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldAuthor, v)) +} + +// AuthorIn applies the In predicate on the "author" field. +func AuthorIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldAuthor, vs...)) +} + +// AuthorNotIn applies the NotIn predicate on the "author" field. +func AuthorNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldAuthor, vs...)) +} + +// AuthorGT applies the GT predicate on the "author" field. +func AuthorGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldAuthor, v)) +} + +// AuthorGTE applies the GTE predicate on the "author" field. +func AuthorGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldAuthor, v)) +} + +// AuthorLT applies the LT predicate on the "author" field. +func AuthorLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldAuthor, v)) +} + +// AuthorLTE applies the LTE predicate on the "author" field. +func AuthorLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldAuthor, v)) +} + +// AuthorContains applies the Contains predicate on the "author" field. +func AuthorContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldAuthor, v)) +} + +// AuthorHasPrefix applies the HasPrefix predicate on the "author" field. +func AuthorHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldAuthor, v)) +} + +// AuthorHasSuffix applies the HasSuffix predicate on the "author" field. +func AuthorHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldAuthor, v)) +} + +// AuthorIsNil applies the IsNil predicate on the "author" field. +func AuthorIsNil() predicate.Node { + return predicate.Node(sql.FieldIsNull(FieldAuthor)) +} + +// AuthorNotNil applies the NotNil predicate on the "author" field. +func AuthorNotNil() predicate.Node { + return predicate.Node(sql.FieldNotNull(FieldAuthor)) +} + +// AuthorEqualFold applies the EqualFold predicate on the "author" field. +func AuthorEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldAuthor, v)) +} + +// AuthorContainsFold applies the ContainsFold predicate on the "author" field. +func AuthorContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldAuthor, v)) +} + +// LicenseEQ applies the EQ predicate on the "license" field. +func LicenseEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldLicense, v)) +} + +// LicenseNEQ applies the NEQ predicate on the "license" field. +func LicenseNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldLicense, v)) +} + +// LicenseIn applies the In predicate on the "license" field. +func LicenseIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldLicense, vs...)) +} + +// LicenseNotIn applies the NotIn predicate on the "license" field. +func LicenseNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldLicense, vs...)) +} + +// LicenseGT applies the GT predicate on the "license" field. +func LicenseGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldLicense, v)) +} + +// LicenseGTE applies the GTE predicate on the "license" field. +func LicenseGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldLicense, v)) +} + +// LicenseLT applies the LT predicate on the "license" field. +func LicenseLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldLicense, v)) +} + +// LicenseLTE applies the LTE predicate on the "license" field. +func LicenseLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldLicense, v)) +} + +// LicenseContains applies the Contains predicate on the "license" field. +func LicenseContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldLicense, v)) +} + +// LicenseHasPrefix applies the HasPrefix predicate on the "license" field. +func LicenseHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldLicense, v)) +} + +// LicenseHasSuffix applies the HasSuffix predicate on the "license" field. +func LicenseHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldLicense, v)) +} + +// LicenseEqualFold applies the EqualFold predicate on the "license" field. +func LicenseEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldLicense, v)) +} + +// LicenseContainsFold applies the ContainsFold predicate on the "license" field. +func LicenseContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldLicense, v)) +} + +// RepositoryURLEQ applies the EQ predicate on the "repository_url" field. +func RepositoryURLEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldRepositoryURL, v)) +} + +// RepositoryURLNEQ applies the NEQ predicate on the "repository_url" field. +func RepositoryURLNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldRepositoryURL, v)) +} + +// RepositoryURLIn applies the In predicate on the "repository_url" field. +func RepositoryURLIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldRepositoryURL, vs...)) +} + +// RepositoryURLNotIn applies the NotIn predicate on the "repository_url" field. +func RepositoryURLNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldRepositoryURL, vs...)) +} + +// RepositoryURLGT applies the GT predicate on the "repository_url" field. +func RepositoryURLGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldRepositoryURL, v)) +} + +// RepositoryURLGTE applies the GTE predicate on the "repository_url" field. +func RepositoryURLGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldRepositoryURL, v)) +} + +// RepositoryURLLT applies the LT predicate on the "repository_url" field. +func RepositoryURLLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldRepositoryURL, v)) +} + +// RepositoryURLLTE applies the LTE predicate on the "repository_url" field. +func RepositoryURLLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldRepositoryURL, v)) +} + +// RepositoryURLContains applies the Contains predicate on the "repository_url" field. +func RepositoryURLContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldRepositoryURL, v)) +} + +// RepositoryURLHasPrefix applies the HasPrefix predicate on the "repository_url" field. +func RepositoryURLHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldRepositoryURL, v)) +} + +// RepositoryURLHasSuffix applies the HasSuffix predicate on the "repository_url" field. +func RepositoryURLHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldRepositoryURL, v)) +} + +// RepositoryURLEqualFold applies the EqualFold predicate on the "repository_url" field. +func RepositoryURLEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldRepositoryURL, v)) +} + +// RepositoryURLContainsFold applies the ContainsFold predicate on the "repository_url" field. +func RepositoryURLContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldRepositoryURL, v)) +} + +// IconURLEQ applies the EQ predicate on the "icon_url" field. +func IconURLEQ(v string) predicate.Node { + return predicate.Node(sql.FieldEQ(FieldIconURL, v)) +} + +// IconURLNEQ applies the NEQ predicate on the "icon_url" field. +func IconURLNEQ(v string) predicate.Node { + return predicate.Node(sql.FieldNEQ(FieldIconURL, v)) +} + +// IconURLIn applies the In predicate on the "icon_url" field. +func IconURLIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldIn(FieldIconURL, vs...)) +} + +// IconURLNotIn applies the NotIn predicate on the "icon_url" field. +func IconURLNotIn(vs ...string) predicate.Node { + return predicate.Node(sql.FieldNotIn(FieldIconURL, vs...)) +} + +// IconURLGT applies the GT predicate on the "icon_url" field. +func IconURLGT(v string) predicate.Node { + return predicate.Node(sql.FieldGT(FieldIconURL, v)) +} + +// IconURLGTE applies the GTE predicate on the "icon_url" field. +func IconURLGTE(v string) predicate.Node { + return predicate.Node(sql.FieldGTE(FieldIconURL, v)) +} + +// IconURLLT applies the LT predicate on the "icon_url" field. +func IconURLLT(v string) predicate.Node { + return predicate.Node(sql.FieldLT(FieldIconURL, v)) +} + +// IconURLLTE applies the LTE predicate on the "icon_url" field. +func IconURLLTE(v string) predicate.Node { + return predicate.Node(sql.FieldLTE(FieldIconURL, v)) +} + +// IconURLContains applies the Contains predicate on the "icon_url" field. +func IconURLContains(v string) predicate.Node { + return predicate.Node(sql.FieldContains(FieldIconURL, v)) +} + +// IconURLHasPrefix applies the HasPrefix predicate on the "icon_url" field. +func IconURLHasPrefix(v string) predicate.Node { + return predicate.Node(sql.FieldHasPrefix(FieldIconURL, v)) +} + +// IconURLHasSuffix applies the HasSuffix predicate on the "icon_url" field. +func IconURLHasSuffix(v string) predicate.Node { + return predicate.Node(sql.FieldHasSuffix(FieldIconURL, v)) +} + +// IconURLIsNil applies the IsNil predicate on the "icon_url" field. +func IconURLIsNil() predicate.Node { + return predicate.Node(sql.FieldIsNull(FieldIconURL)) +} + +// IconURLNotNil applies the NotNil predicate on the "icon_url" field. +func IconURLNotNil() predicate.Node { + return predicate.Node(sql.FieldNotNull(FieldIconURL)) +} + +// IconURLEqualFold applies the EqualFold predicate on the "icon_url" field. +func IconURLEqualFold(v string) predicate.Node { + return predicate.Node(sql.FieldEqualFold(FieldIconURL, v)) +} + +// IconURLContainsFold applies the ContainsFold predicate on the "icon_url" field. +func IconURLContainsFold(v string) predicate.Node { + return predicate.Node(sql.FieldContainsFold(FieldIconURL, v)) +} + +// HasPublisher applies the HasEdge predicate on the "publisher" edge. +func HasPublisher() predicate.Node { + return predicate.Node(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPublisherWith applies the HasEdge predicate on the "publisher" edge with a given conditions (other predicates). +func HasPublisherWith(preds ...predicate.Publisher) predicate.Node { + return predicate.Node(func(s *sql.Selector) { + step := newPublisherStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasVersions applies the HasEdge predicate on the "versions" edge. +func HasVersions() predicate.Node { + return predicate.Node(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, VersionsTable, VersionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasVersionsWith applies the HasEdge predicate on the "versions" edge with a given conditions (other predicates). +func HasVersionsWith(preds ...predicate.NodeVersion) predicate.Node { + return predicate.Node(func(s *sql.Selector) { + step := newVersionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Node) predicate.Node { + return predicate.Node(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Node) predicate.Node { + return predicate.Node(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Node) predicate.Node { + return predicate.Node(sql.NotPredicates(p)) +} diff --git a/ent/node_create.go b/ent/node_create.go new file mode 100644 index 0000000..e413ea8 --- /dev/null +++ b/ent/node_create.go @@ -0,0 +1,1097 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/publisher" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// NodeCreate is the builder for creating a Node entity. +type NodeCreate struct { + config + mutation *NodeMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (nc *NodeCreate) SetCreateTime(t time.Time) *NodeCreate { + nc.mutation.SetCreateTime(t) + return nc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (nc *NodeCreate) SetNillableCreateTime(t *time.Time) *NodeCreate { + if t != nil { + nc.SetCreateTime(*t) + } + return nc +} + +// SetUpdateTime sets the "update_time" field. +func (nc *NodeCreate) SetUpdateTime(t time.Time) *NodeCreate { + nc.mutation.SetUpdateTime(t) + return nc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (nc *NodeCreate) SetNillableUpdateTime(t *time.Time) *NodeCreate { + if t != nil { + nc.SetUpdateTime(*t) + } + return nc +} + +// SetPublisherID sets the "publisher_id" field. +func (nc *NodeCreate) SetPublisherID(s string) *NodeCreate { + nc.mutation.SetPublisherID(s) + return nc +} + +// SetName sets the "name" field. +func (nc *NodeCreate) SetName(s string) *NodeCreate { + nc.mutation.SetName(s) + return nc +} + +// SetDescription sets the "description" field. +func (nc *NodeCreate) SetDescription(s string) *NodeCreate { + nc.mutation.SetDescription(s) + return nc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (nc *NodeCreate) SetNillableDescription(s *string) *NodeCreate { + if s != nil { + nc.SetDescription(*s) + } + return nc +} + +// SetAuthor sets the "author" field. +func (nc *NodeCreate) SetAuthor(s string) *NodeCreate { + nc.mutation.SetAuthor(s) + return nc +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (nc *NodeCreate) SetNillableAuthor(s *string) *NodeCreate { + if s != nil { + nc.SetAuthor(*s) + } + return nc +} + +// SetLicense sets the "license" field. +func (nc *NodeCreate) SetLicense(s string) *NodeCreate { + nc.mutation.SetLicense(s) + return nc +} + +// SetRepositoryURL sets the "repository_url" field. +func (nc *NodeCreate) SetRepositoryURL(s string) *NodeCreate { + nc.mutation.SetRepositoryURL(s) + return nc +} + +// SetIconURL sets the "icon_url" field. +func (nc *NodeCreate) SetIconURL(s string) *NodeCreate { + nc.mutation.SetIconURL(s) + return nc +} + +// SetNillableIconURL sets the "icon_url" field if the given value is not nil. +func (nc *NodeCreate) SetNillableIconURL(s *string) *NodeCreate { + if s != nil { + nc.SetIconURL(*s) + } + return nc +} + +// SetTags sets the "tags" field. +func (nc *NodeCreate) SetTags(s []string) *NodeCreate { + nc.mutation.SetTags(s) + return nc +} + +// SetID sets the "id" field. +func (nc *NodeCreate) SetID(s string) *NodeCreate { + nc.mutation.SetID(s) + return nc +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (nc *NodeCreate) SetPublisher(p *Publisher) *NodeCreate { + return nc.SetPublisherID(p.ID) +} + +// AddVersionIDs adds the "versions" edge to the NodeVersion entity by IDs. +func (nc *NodeCreate) AddVersionIDs(ids ...uuid.UUID) *NodeCreate { + nc.mutation.AddVersionIDs(ids...) + return nc +} + +// AddVersions adds the "versions" edges to the NodeVersion entity. +func (nc *NodeCreate) AddVersions(n ...*NodeVersion) *NodeCreate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return nc.AddVersionIDs(ids...) +} + +// Mutation returns the NodeMutation object of the builder. +func (nc *NodeCreate) Mutation() *NodeMutation { + return nc.mutation +} + +// Save creates the Node in the database. +func (nc *NodeCreate) Save(ctx context.Context) (*Node, error) { + nc.defaults() + return withHooks(ctx, nc.sqlSave, nc.mutation, nc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (nc *NodeCreate) SaveX(ctx context.Context) *Node { + v, err := nc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (nc *NodeCreate) Exec(ctx context.Context) error { + _, err := nc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nc *NodeCreate) ExecX(ctx context.Context) { + if err := nc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nc *NodeCreate) defaults() { + if _, ok := nc.mutation.CreateTime(); !ok { + v := node.DefaultCreateTime() + nc.mutation.SetCreateTime(v) + } + if _, ok := nc.mutation.UpdateTime(); !ok { + v := node.DefaultUpdateTime() + nc.mutation.SetUpdateTime(v) + } + if _, ok := nc.mutation.Tags(); !ok { + v := node.DefaultTags + nc.mutation.SetTags(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nc *NodeCreate) check() error { + if _, ok := nc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "Node.create_time"`)} + } + if _, ok := nc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "Node.update_time"`)} + } + if _, ok := nc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher_id", err: errors.New(`ent: missing required field "Node.publisher_id"`)} + } + if _, ok := nc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Node.name"`)} + } + if _, ok := nc.mutation.License(); !ok { + return &ValidationError{Name: "license", err: errors.New(`ent: missing required field "Node.license"`)} + } + if _, ok := nc.mutation.RepositoryURL(); !ok { + return &ValidationError{Name: "repository_url", err: errors.New(`ent: missing required field "Node.repository_url"`)} + } + if _, ok := nc.mutation.Tags(); !ok { + return &ValidationError{Name: "tags", err: errors.New(`ent: missing required field "Node.tags"`)} + } + if _, ok := nc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher", err: errors.New(`ent: missing required edge "Node.publisher"`)} + } + return nil +} + +func (nc *NodeCreate) sqlSave(ctx context.Context) (*Node, error) { + if err := nc.check(); err != nil { + return nil, err + } + _node, _spec := nc.createSpec() + if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Node.ID type: %T", _spec.ID.Value) + } + } + nc.mutation.id = &_node.ID + nc.mutation.done = true + return _node, nil +} + +func (nc *NodeCreate) createSpec() (*Node, *sqlgraph.CreateSpec) { + var ( + _node = &Node{config: nc.config} + _spec = sqlgraph.NewCreateSpec(node.Table, sqlgraph.NewFieldSpec(node.FieldID, field.TypeString)) + ) + _spec.OnConflict = nc.conflict + if id, ok := nc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := nc.mutation.CreateTime(); ok { + _spec.SetField(node.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := nc.mutation.UpdateTime(); ok { + _spec.SetField(node.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := nc.mutation.Name(); ok { + _spec.SetField(node.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := nc.mutation.Description(); ok { + _spec.SetField(node.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := nc.mutation.Author(); ok { + _spec.SetField(node.FieldAuthor, field.TypeString, value) + _node.Author = value + } + if value, ok := nc.mutation.License(); ok { + _spec.SetField(node.FieldLicense, field.TypeString, value) + _node.License = value + } + if value, ok := nc.mutation.RepositoryURL(); ok { + _spec.SetField(node.FieldRepositoryURL, field.TypeString, value) + _node.RepositoryURL = value + } + if value, ok := nc.mutation.IconURL(); ok { + _spec.SetField(node.FieldIconURL, field.TypeString, value) + _node.IconURL = value + } + if value, ok := nc.mutation.Tags(); ok { + _spec.SetField(node.FieldTags, field.TypeJSON, value) + _node.Tags = value + } + if nodes := nc.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: node.PublisherTable, + Columns: []string{node.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.PublisherID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := nc.mutation.VersionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Node.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NodeUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (nc *NodeCreate) OnConflict(opts ...sql.ConflictOption) *NodeUpsertOne { + nc.conflict = opts + return &NodeUpsertOne{ + create: nc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (nc *NodeCreate) OnConflictColumns(columns ...string) *NodeUpsertOne { + nc.conflict = append(nc.conflict, sql.ConflictColumns(columns...)) + return &NodeUpsertOne{ + create: nc, + } +} + +type ( + // NodeUpsertOne is the builder for "upsert"-ing + // one Node node. + NodeUpsertOne struct { + create *NodeCreate + } + + // NodeUpsert is the "OnConflict" setter. + NodeUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *NodeUpsert) SetUpdateTime(v time.Time) *NodeUpsert { + u.Set(node.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeUpsert) UpdateUpdateTime() *NodeUpsert { + u.SetExcluded(node.FieldUpdateTime) + return u +} + +// SetPublisherID sets the "publisher_id" field. +func (u *NodeUpsert) SetPublisherID(v string) *NodeUpsert { + u.Set(node.FieldPublisherID, v) + return u +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *NodeUpsert) UpdatePublisherID() *NodeUpsert { + u.SetExcluded(node.FieldPublisherID) + return u +} + +// SetName sets the "name" field. +func (u *NodeUpsert) SetName(v string) *NodeUpsert { + u.Set(node.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *NodeUpsert) UpdateName() *NodeUpsert { + u.SetExcluded(node.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *NodeUpsert) SetDescription(v string) *NodeUpsert { + u.Set(node.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *NodeUpsert) UpdateDescription() *NodeUpsert { + u.SetExcluded(node.FieldDescription) + return u +} + +// ClearDescription clears the value of the "description" field. +func (u *NodeUpsert) ClearDescription() *NodeUpsert { + u.SetNull(node.FieldDescription) + return u +} + +// SetAuthor sets the "author" field. +func (u *NodeUpsert) SetAuthor(v string) *NodeUpsert { + u.Set(node.FieldAuthor, v) + return u +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *NodeUpsert) UpdateAuthor() *NodeUpsert { + u.SetExcluded(node.FieldAuthor) + return u +} + +// ClearAuthor clears the value of the "author" field. +func (u *NodeUpsert) ClearAuthor() *NodeUpsert { + u.SetNull(node.FieldAuthor) + return u +} + +// SetLicense sets the "license" field. +func (u *NodeUpsert) SetLicense(v string) *NodeUpsert { + u.Set(node.FieldLicense, v) + return u +} + +// UpdateLicense sets the "license" field to the value that was provided on create. +func (u *NodeUpsert) UpdateLicense() *NodeUpsert { + u.SetExcluded(node.FieldLicense) + return u +} + +// SetRepositoryURL sets the "repository_url" field. +func (u *NodeUpsert) SetRepositoryURL(v string) *NodeUpsert { + u.Set(node.FieldRepositoryURL, v) + return u +} + +// UpdateRepositoryURL sets the "repository_url" field to the value that was provided on create. +func (u *NodeUpsert) UpdateRepositoryURL() *NodeUpsert { + u.SetExcluded(node.FieldRepositoryURL) + return u +} + +// SetIconURL sets the "icon_url" field. +func (u *NodeUpsert) SetIconURL(v string) *NodeUpsert { + u.Set(node.FieldIconURL, v) + return u +} + +// UpdateIconURL sets the "icon_url" field to the value that was provided on create. +func (u *NodeUpsert) UpdateIconURL() *NodeUpsert { + u.SetExcluded(node.FieldIconURL) + return u +} + +// ClearIconURL clears the value of the "icon_url" field. +func (u *NodeUpsert) ClearIconURL() *NodeUpsert { + u.SetNull(node.FieldIconURL) + return u +} + +// SetTags sets the "tags" field. +func (u *NodeUpsert) SetTags(v []string) *NodeUpsert { + u.Set(node.FieldTags, v) + return u +} + +// UpdateTags sets the "tags" field to the value that was provided on create. +func (u *NodeUpsert) UpdateTags() *NodeUpsert { + u.SetExcluded(node.FieldTags) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(node.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NodeUpsertOne) UpdateNewValues() *NodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(node.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(node.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NodeUpsertOne) Ignore() *NodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NodeUpsertOne) DoNothing() *NodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NodeCreate.OnConflict +// documentation for more info. +func (u *NodeUpsertOne) Update(set func(*NodeUpsert)) *NodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *NodeUpsertOne) SetUpdateTime(v time.Time) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateUpdateTime() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *NodeUpsertOne) SetPublisherID(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdatePublisherID() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdatePublisherID() + }) +} + +// SetName sets the "name" field. +func (u *NodeUpsertOne) SetName(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateName() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *NodeUpsertOne) SetDescription(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateDescription() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *NodeUpsertOne) ClearDescription() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.ClearDescription() + }) +} + +// SetAuthor sets the "author" field. +func (u *NodeUpsertOne) SetAuthor(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetAuthor(v) + }) +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateAuthor() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateAuthor() + }) +} + +// ClearAuthor clears the value of the "author" field. +func (u *NodeUpsertOne) ClearAuthor() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.ClearAuthor() + }) +} + +// SetLicense sets the "license" field. +func (u *NodeUpsertOne) SetLicense(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetLicense(v) + }) +} + +// UpdateLicense sets the "license" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateLicense() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateLicense() + }) +} + +// SetRepositoryURL sets the "repository_url" field. +func (u *NodeUpsertOne) SetRepositoryURL(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetRepositoryURL(v) + }) +} + +// UpdateRepositoryURL sets the "repository_url" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateRepositoryURL() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateRepositoryURL() + }) +} + +// SetIconURL sets the "icon_url" field. +func (u *NodeUpsertOne) SetIconURL(v string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetIconURL(v) + }) +} + +// UpdateIconURL sets the "icon_url" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateIconURL() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateIconURL() + }) +} + +// ClearIconURL clears the value of the "icon_url" field. +func (u *NodeUpsertOne) ClearIconURL() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.ClearIconURL() + }) +} + +// SetTags sets the "tags" field. +func (u *NodeUpsertOne) SetTags(v []string) *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.SetTags(v) + }) +} + +// UpdateTags sets the "tags" field to the value that was provided on create. +func (u *NodeUpsertOne) UpdateTags() *NodeUpsertOne { + return u.Update(func(s *NodeUpsert) { + s.UpdateTags() + }) +} + +// Exec executes the query. +func (u *NodeUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NodeCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NodeUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *NodeUpsertOne) ID(ctx context.Context) (id string, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: NodeUpsertOne.ID is not supported by MySQL driver. Use NodeUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *NodeUpsertOne) IDX(ctx context.Context) string { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// NodeCreateBulk is the builder for creating many Node entities in bulk. +type NodeCreateBulk struct { + config + err error + builders []*NodeCreate + conflict []sql.ConflictOption +} + +// Save creates the Node entities in the database. +func (ncb *NodeCreateBulk) Save(ctx context.Context) ([]*Node, error) { + if ncb.err != nil { + return nil, ncb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ncb.builders)) + nodes := make([]*Node, len(ncb.builders)) + mutators := make([]Mutator, len(ncb.builders)) + for i := range ncb.builders { + func(i int, root context.Context) { + builder := ncb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*NodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ncb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ncb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ncb *NodeCreateBulk) SaveX(ctx context.Context) []*Node { + v, err := ncb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ncb *NodeCreateBulk) Exec(ctx context.Context) error { + _, err := ncb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ncb *NodeCreateBulk) ExecX(ctx context.Context) { + if err := ncb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Node.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NodeUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (ncb *NodeCreateBulk) OnConflict(opts ...sql.ConflictOption) *NodeUpsertBulk { + ncb.conflict = opts + return &NodeUpsertBulk{ + create: ncb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ncb *NodeCreateBulk) OnConflictColumns(columns ...string) *NodeUpsertBulk { + ncb.conflict = append(ncb.conflict, sql.ConflictColumns(columns...)) + return &NodeUpsertBulk{ + create: ncb, + } +} + +// NodeUpsertBulk is the builder for "upsert"-ing +// a bulk of Node nodes. +type NodeUpsertBulk struct { + create *NodeCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(node.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NodeUpsertBulk) UpdateNewValues() *NodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(node.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(node.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Node.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NodeUpsertBulk) Ignore() *NodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NodeUpsertBulk) DoNothing() *NodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NodeCreateBulk.OnConflict +// documentation for more info. +func (u *NodeUpsertBulk) Update(set func(*NodeUpsert)) *NodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *NodeUpsertBulk) SetUpdateTime(v time.Time) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateUpdateTime() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *NodeUpsertBulk) SetPublisherID(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdatePublisherID() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdatePublisherID() + }) +} + +// SetName sets the "name" field. +func (u *NodeUpsertBulk) SetName(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateName() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *NodeUpsertBulk) SetDescription(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateDescription() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *NodeUpsertBulk) ClearDescription() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.ClearDescription() + }) +} + +// SetAuthor sets the "author" field. +func (u *NodeUpsertBulk) SetAuthor(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetAuthor(v) + }) +} + +// UpdateAuthor sets the "author" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateAuthor() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateAuthor() + }) +} + +// ClearAuthor clears the value of the "author" field. +func (u *NodeUpsertBulk) ClearAuthor() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.ClearAuthor() + }) +} + +// SetLicense sets the "license" field. +func (u *NodeUpsertBulk) SetLicense(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetLicense(v) + }) +} + +// UpdateLicense sets the "license" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateLicense() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateLicense() + }) +} + +// SetRepositoryURL sets the "repository_url" field. +func (u *NodeUpsertBulk) SetRepositoryURL(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetRepositoryURL(v) + }) +} + +// UpdateRepositoryURL sets the "repository_url" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateRepositoryURL() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateRepositoryURL() + }) +} + +// SetIconURL sets the "icon_url" field. +func (u *NodeUpsertBulk) SetIconURL(v string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetIconURL(v) + }) +} + +// UpdateIconURL sets the "icon_url" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateIconURL() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateIconURL() + }) +} + +// ClearIconURL clears the value of the "icon_url" field. +func (u *NodeUpsertBulk) ClearIconURL() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.ClearIconURL() + }) +} + +// SetTags sets the "tags" field. +func (u *NodeUpsertBulk) SetTags(v []string) *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.SetTags(v) + }) +} + +// UpdateTags sets the "tags" field to the value that was provided on create. +func (u *NodeUpsertBulk) UpdateTags() *NodeUpsertBulk { + return u.Update(func(s *NodeUpsert) { + s.UpdateTags() + }) +} + +// Exec executes the query. +func (u *NodeUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the NodeCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NodeCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NodeUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/node_delete.go b/ent/node_delete.go new file mode 100644 index 0000000..1943cb4 --- /dev/null +++ b/ent/node_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/node" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// NodeDelete is the builder for deleting a Node entity. +type NodeDelete struct { + config + hooks []Hook + mutation *NodeMutation +} + +// Where appends a list predicates to the NodeDelete builder. +func (nd *NodeDelete) Where(ps ...predicate.Node) *NodeDelete { + nd.mutation.Where(ps...) + return nd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (nd *NodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, nd.sqlExec, nd.mutation, nd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (nd *NodeDelete) ExecX(ctx context.Context) int { + n, err := nd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (nd *NodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(node.Table, sqlgraph.NewFieldSpec(node.FieldID, field.TypeString)) + if ps := nd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + nd.mutation.done = true + return affected, err +} + +// NodeDeleteOne is the builder for deleting a single Node entity. +type NodeDeleteOne struct { + nd *NodeDelete +} + +// Where appends a list predicates to the NodeDelete builder. +func (ndo *NodeDeleteOne) Where(ps ...predicate.Node) *NodeDeleteOne { + ndo.nd.mutation.Where(ps...) + return ndo +} + +// Exec executes the deletion query. +func (ndo *NodeDeleteOne) Exec(ctx context.Context) error { + n, err := ndo.nd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{node.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ndo *NodeDeleteOne) ExecX(ctx context.Context) { + if err := ndo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/node_query.go b/ent/node_query.go new file mode 100644 index 0000000..89eec59 --- /dev/null +++ b/ent/node_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// NodeQuery is the builder for querying Node entities. +type NodeQuery struct { + config + ctx *QueryContext + order []node.OrderOption + inters []Interceptor + predicates []predicate.Node + withPublisher *PublisherQuery + withVersions *NodeVersionQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the NodeQuery builder. +func (nq *NodeQuery) Where(ps ...predicate.Node) *NodeQuery { + nq.predicates = append(nq.predicates, ps...) + return nq +} + +// Limit the number of records to be returned by this query. +func (nq *NodeQuery) Limit(limit int) *NodeQuery { + nq.ctx.Limit = &limit + return nq +} + +// Offset to start from. +func (nq *NodeQuery) Offset(offset int) *NodeQuery { + nq.ctx.Offset = &offset + return nq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (nq *NodeQuery) Unique(unique bool) *NodeQuery { + nq.ctx.Unique = &unique + return nq +} + +// Order specifies how the records should be ordered. +func (nq *NodeQuery) Order(o ...node.OrderOption) *NodeQuery { + nq.order = append(nq.order, o...) + return nq +} + +// QueryPublisher chains the current query on the "publisher" edge. +func (nq *NodeQuery) QueryPublisher() *PublisherQuery { + query := (&PublisherClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(node.Table, node.FieldID, selector), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, node.PublisherTable, node.PublisherColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryVersions chains the current query on the "versions" edge. +func (nq *NodeQuery) QueryVersions() *NodeVersionQuery { + query := (&NodeVersionClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(node.Table, node.FieldID, selector), + sqlgraph.To(nodeversion.Table, nodeversion.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, node.VersionsTable, node.VersionsColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Node entity from the query. +// Returns a *NotFoundError when no Node was found. +func (nq *NodeQuery) First(ctx context.Context) (*Node, error) { + nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{node.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (nq *NodeQuery) FirstX(ctx context.Context) *Node { + node, err := nq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Node ID from the query. +// Returns a *NotFoundError when no Node ID was found. +func (nq *NodeQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{node.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (nq *NodeQuery) FirstIDX(ctx context.Context) string { + id, err := nq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Node entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Node entity is found. +// Returns a *NotFoundError when no Node entities are found. +func (nq *NodeQuery) Only(ctx context.Context) (*Node, error) { + nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{node.Label} + default: + return nil, &NotSingularError{node.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (nq *NodeQuery) OnlyX(ctx context.Context) *Node { + node, err := nq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Node ID in the query. +// Returns a *NotSingularError when more than one Node ID is found. +// Returns a *NotFoundError when no entities are found. +func (nq *NodeQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{node.Label} + default: + err = &NotSingularError{node.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (nq *NodeQuery) OnlyIDX(ctx context.Context) string { + id, err := nq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Nodes. +func (nq *NodeQuery) All(ctx context.Context) ([]*Node, error) { + ctx = setContextOp(ctx, nq.ctx, "All") + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Node, *NodeQuery]() + return withInterceptors[[]*Node](ctx, nq, qr, nq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (nq *NodeQuery) AllX(ctx context.Context) []*Node { + nodes, err := nq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Node IDs. +func (nq *NodeQuery) IDs(ctx context.Context) (ids []string, err error) { + if nq.ctx.Unique == nil && nq.path != nil { + nq.Unique(true) + } + ctx = setContextOp(ctx, nq.ctx, "IDs") + if err = nq.Select(node.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (nq *NodeQuery) IDsX(ctx context.Context) []string { + ids, err := nq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (nq *NodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, nq.ctx, "Count") + if err := nq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, nq, querierCount[*NodeQuery](), nq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (nq *NodeQuery) CountX(ctx context.Context) int { + count, err := nq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (nq *NodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, nq.ctx, "Exist") + switch _, err := nq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (nq *NodeQuery) ExistX(ctx context.Context) bool { + exist, err := nq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the NodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (nq *NodeQuery) Clone() *NodeQuery { + if nq == nil { + return nil + } + return &NodeQuery{ + config: nq.config, + ctx: nq.ctx.Clone(), + order: append([]node.OrderOption{}, nq.order...), + inters: append([]Interceptor{}, nq.inters...), + predicates: append([]predicate.Node{}, nq.predicates...), + withPublisher: nq.withPublisher.Clone(), + withVersions: nq.withVersions.Clone(), + // clone intermediate query. + sql: nq.sql.Clone(), + path: nq.path, + } +} + +// WithPublisher tells the query-builder to eager-load the nodes that are connected to +// the "publisher" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NodeQuery) WithPublisher(opts ...func(*PublisherQuery)) *NodeQuery { + query := (&PublisherClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withPublisher = query + return nq +} + +// WithVersions tells the query-builder to eager-load the nodes that are connected to +// the "versions" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NodeQuery) WithVersions(opts ...func(*NodeVersionQuery)) *NodeQuery { + query := (&NodeVersionClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withVersions = query + return nq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Node.Query(). +// GroupBy(node.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (nq *NodeQuery) GroupBy(field string, fields ...string) *NodeGroupBy { + nq.ctx.Fields = append([]string{field}, fields...) + grbuild := &NodeGroupBy{build: nq} + grbuild.flds = &nq.ctx.Fields + grbuild.label = node.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.Node.Query(). +// Select(node.FieldCreateTime). +// Scan(ctx, &v) +func (nq *NodeQuery) Select(fields ...string) *NodeSelect { + nq.ctx.Fields = append(nq.ctx.Fields, fields...) + sbuild := &NodeSelect{NodeQuery: nq} + sbuild.label = node.Label + sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a NodeSelect configured with the given aggregations. +func (nq *NodeQuery) Aggregate(fns ...AggregateFunc) *NodeSelect { + return nq.Select().Aggregate(fns...) +} + +func (nq *NodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range nq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, nq); err != nil { + return err + } + } + } + for _, f := range nq.ctx.Fields { + if !node.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if nq.path != nil { + prev, err := nq.path(ctx) + if err != nil { + return err + } + nq.sql = prev + } + return nil +} + +func (nq *NodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Node, error) { + var ( + nodes = []*Node{} + _spec = nq.querySpec() + loadedTypes = [2]bool{ + nq.withPublisher != nil, + nq.withVersions != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Node).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Node{config: nq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(nq.modifiers) > 0 { + _spec.Modifiers = nq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := nq.withPublisher; query != nil { + if err := nq.loadPublisher(ctx, query, nodes, nil, + func(n *Node, e *Publisher) { n.Edges.Publisher = e }); err != nil { + return nil, err + } + } + if query := nq.withVersions; query != nil { + if err := nq.loadVersions(ctx, query, nodes, + func(n *Node) { n.Edges.Versions = []*NodeVersion{} }, + func(n *Node, e *NodeVersion) { n.Edges.Versions = append(n.Edges.Versions, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (nq *NodeQuery) loadPublisher(ctx context.Context, query *PublisherQuery, nodes []*Node, init func(*Node), assign func(*Node, *Publisher)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*Node) + for i := range nodes { + fk := nodes[i].PublisherID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(publisher.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "publisher_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (nq *NodeQuery) loadVersions(ctx context.Context, query *NodeVersionQuery, nodes []*Node, init func(*Node), assign func(*Node, *NodeVersion)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Node) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(nodeversion.FieldNodeID) + } + query.Where(predicate.NodeVersion(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(node.VersionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.NodeID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "node_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (nq *NodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := nq.querySpec() + if len(nq.modifiers) > 0 { + _spec.Modifiers = nq.modifiers + } + _spec.Node.Columns = nq.ctx.Fields + if len(nq.ctx.Fields) > 0 { + _spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, nq.driver, _spec) +} + +func (nq *NodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(node.Table, node.Columns, sqlgraph.NewFieldSpec(node.FieldID, field.TypeString)) + _spec.From = nq.sql + if unique := nq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if nq.path != nil { + _spec.Unique = true + } + if fields := nq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, node.FieldID) + for i := range fields { + if fields[i] != node.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if nq.withPublisher != nil { + _spec.Node.AddColumnOnce(node.FieldPublisherID) + } + } + if ps := nq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := nq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := nq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := nq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (nq *NodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(nq.driver.Dialect()) + t1 := builder.Table(node.Table) + columns := nq.ctx.Fields + if len(columns) == 0 { + columns = node.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if nq.sql != nil { + selector = nq.sql + selector.Select(selector.Columns(columns...)...) + } + if nq.ctx.Unique != nil && *nq.ctx.Unique { + selector.Distinct() + } + for _, m := range nq.modifiers { + m(selector) + } + for _, p := range nq.predicates { + p(selector) + } + for _, p := range nq.order { + p(selector) + } + if offset := nq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := nq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (nq *NodeQuery) ForUpdate(opts ...sql.LockOption) *NodeQuery { + if nq.driver.Dialect() == dialect.Postgres { + nq.Unique(false) + } + nq.modifiers = append(nq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return nq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (nq *NodeQuery) ForShare(opts ...sql.LockOption) *NodeQuery { + if nq.driver.Dialect() == dialect.Postgres { + nq.Unique(false) + } + nq.modifiers = append(nq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return nq +} + +// NodeGroupBy is the group-by builder for Node entities. +type NodeGroupBy struct { + selector + build *NodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ngb *NodeGroupBy) Aggregate(fns ...AggregateFunc) *NodeGroupBy { + ngb.fns = append(ngb.fns, fns...) + return ngb +} + +// Scan applies the selector query and scans the result into the given value. +func (ngb *NodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy") + if err := ngb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NodeQuery, *NodeGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v) +} + +func (ngb *NodeGroupBy) sqlScan(ctx context.Context, root *NodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ngb.fns)) + for _, fn := range ngb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns)) + for _, f := range *ngb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ngb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// NodeSelect is the builder for selecting fields of Node entities. +type NodeSelect struct { + *NodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ns *NodeSelect) Aggregate(fns ...AggregateFunc) *NodeSelect { + ns.fns = append(ns.fns, fns...) + return ns +} + +// Scan applies the selector query and scans the result into the given value. +func (ns *NodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ns.ctx, "Select") + if err := ns.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NodeQuery, *NodeSelect](ctx, ns.NodeQuery, ns, ns.inters, v) +} + +func (ns *NodeSelect) sqlScan(ctx context.Context, root *NodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ns.fns)) + for _, fn := range ns.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ns.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ns.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/node_update.go b/ent/node_update.go new file mode 100644 index 0000000..9fb6016 --- /dev/null +++ b/ent/node_update.go @@ -0,0 +1,806 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// NodeUpdate is the builder for updating Node entities. +type NodeUpdate struct { + config + hooks []Hook + mutation *NodeMutation +} + +// Where appends a list predicates to the NodeUpdate builder. +func (nu *NodeUpdate) Where(ps ...predicate.Node) *NodeUpdate { + nu.mutation.Where(ps...) + return nu +} + +// SetUpdateTime sets the "update_time" field. +func (nu *NodeUpdate) SetUpdateTime(t time.Time) *NodeUpdate { + nu.mutation.SetUpdateTime(t) + return nu +} + +// SetPublisherID sets the "publisher_id" field. +func (nu *NodeUpdate) SetPublisherID(s string) *NodeUpdate { + nu.mutation.SetPublisherID(s) + return nu +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (nu *NodeUpdate) SetNillablePublisherID(s *string) *NodeUpdate { + if s != nil { + nu.SetPublisherID(*s) + } + return nu +} + +// SetName sets the "name" field. +func (nu *NodeUpdate) SetName(s string) *NodeUpdate { + nu.mutation.SetName(s) + return nu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableName(s *string) *NodeUpdate { + if s != nil { + nu.SetName(*s) + } + return nu +} + +// SetDescription sets the "description" field. +func (nu *NodeUpdate) SetDescription(s string) *NodeUpdate { + nu.mutation.SetDescription(s) + return nu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableDescription(s *string) *NodeUpdate { + if s != nil { + nu.SetDescription(*s) + } + return nu +} + +// ClearDescription clears the value of the "description" field. +func (nu *NodeUpdate) ClearDescription() *NodeUpdate { + nu.mutation.ClearDescription() + return nu +} + +// SetAuthor sets the "author" field. +func (nu *NodeUpdate) SetAuthor(s string) *NodeUpdate { + nu.mutation.SetAuthor(s) + return nu +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableAuthor(s *string) *NodeUpdate { + if s != nil { + nu.SetAuthor(*s) + } + return nu +} + +// ClearAuthor clears the value of the "author" field. +func (nu *NodeUpdate) ClearAuthor() *NodeUpdate { + nu.mutation.ClearAuthor() + return nu +} + +// SetLicense sets the "license" field. +func (nu *NodeUpdate) SetLicense(s string) *NodeUpdate { + nu.mutation.SetLicense(s) + return nu +} + +// SetNillableLicense sets the "license" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableLicense(s *string) *NodeUpdate { + if s != nil { + nu.SetLicense(*s) + } + return nu +} + +// SetRepositoryURL sets the "repository_url" field. +func (nu *NodeUpdate) SetRepositoryURL(s string) *NodeUpdate { + nu.mutation.SetRepositoryURL(s) + return nu +} + +// SetNillableRepositoryURL sets the "repository_url" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableRepositoryURL(s *string) *NodeUpdate { + if s != nil { + nu.SetRepositoryURL(*s) + } + return nu +} + +// SetIconURL sets the "icon_url" field. +func (nu *NodeUpdate) SetIconURL(s string) *NodeUpdate { + nu.mutation.SetIconURL(s) + return nu +} + +// SetNillableIconURL sets the "icon_url" field if the given value is not nil. +func (nu *NodeUpdate) SetNillableIconURL(s *string) *NodeUpdate { + if s != nil { + nu.SetIconURL(*s) + } + return nu +} + +// ClearIconURL clears the value of the "icon_url" field. +func (nu *NodeUpdate) ClearIconURL() *NodeUpdate { + nu.mutation.ClearIconURL() + return nu +} + +// SetTags sets the "tags" field. +func (nu *NodeUpdate) SetTags(s []string) *NodeUpdate { + nu.mutation.SetTags(s) + return nu +} + +// AppendTags appends s to the "tags" field. +func (nu *NodeUpdate) AppendTags(s []string) *NodeUpdate { + nu.mutation.AppendTags(s) + return nu +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (nu *NodeUpdate) SetPublisher(p *Publisher) *NodeUpdate { + return nu.SetPublisherID(p.ID) +} + +// AddVersionIDs adds the "versions" edge to the NodeVersion entity by IDs. +func (nu *NodeUpdate) AddVersionIDs(ids ...uuid.UUID) *NodeUpdate { + nu.mutation.AddVersionIDs(ids...) + return nu +} + +// AddVersions adds the "versions" edges to the NodeVersion entity. +func (nu *NodeUpdate) AddVersions(n ...*NodeVersion) *NodeUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return nu.AddVersionIDs(ids...) +} + +// Mutation returns the NodeMutation object of the builder. +func (nu *NodeUpdate) Mutation() *NodeMutation { + return nu.mutation +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (nu *NodeUpdate) ClearPublisher() *NodeUpdate { + nu.mutation.ClearPublisher() + return nu +} + +// ClearVersions clears all "versions" edges to the NodeVersion entity. +func (nu *NodeUpdate) ClearVersions() *NodeUpdate { + nu.mutation.ClearVersions() + return nu +} + +// RemoveVersionIDs removes the "versions" edge to NodeVersion entities by IDs. +func (nu *NodeUpdate) RemoveVersionIDs(ids ...uuid.UUID) *NodeUpdate { + nu.mutation.RemoveVersionIDs(ids...) + return nu +} + +// RemoveVersions removes "versions" edges to NodeVersion entities. +func (nu *NodeUpdate) RemoveVersions(n ...*NodeVersion) *NodeUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return nu.RemoveVersionIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (nu *NodeUpdate) Save(ctx context.Context) (int, error) { + nu.defaults() + return withHooks(ctx, nu.sqlSave, nu.mutation, nu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nu *NodeUpdate) SaveX(ctx context.Context) int { + affected, err := nu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (nu *NodeUpdate) Exec(ctx context.Context) error { + _, err := nu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nu *NodeUpdate) ExecX(ctx context.Context) { + if err := nu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nu *NodeUpdate) defaults() { + if _, ok := nu.mutation.UpdateTime(); !ok { + v := node.UpdateDefaultUpdateTime() + nu.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nu *NodeUpdate) check() error { + if _, ok := nu.mutation.PublisherID(); nu.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Node.publisher"`) + } + return nil +} + +func (nu *NodeUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := nu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(node.Table, node.Columns, sqlgraph.NewFieldSpec(node.FieldID, field.TypeString)) + if ps := nu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nu.mutation.UpdateTime(); ok { + _spec.SetField(node.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := nu.mutation.Name(); ok { + _spec.SetField(node.FieldName, field.TypeString, value) + } + if value, ok := nu.mutation.Description(); ok { + _spec.SetField(node.FieldDescription, field.TypeString, value) + } + if nu.mutation.DescriptionCleared() { + _spec.ClearField(node.FieldDescription, field.TypeString) + } + if value, ok := nu.mutation.Author(); ok { + _spec.SetField(node.FieldAuthor, field.TypeString, value) + } + if nu.mutation.AuthorCleared() { + _spec.ClearField(node.FieldAuthor, field.TypeString) + } + if value, ok := nu.mutation.License(); ok { + _spec.SetField(node.FieldLicense, field.TypeString, value) + } + if value, ok := nu.mutation.RepositoryURL(); ok { + _spec.SetField(node.FieldRepositoryURL, field.TypeString, value) + } + if value, ok := nu.mutation.IconURL(); ok { + _spec.SetField(node.FieldIconURL, field.TypeString, value) + } + if nu.mutation.IconURLCleared() { + _spec.ClearField(node.FieldIconURL, field.TypeString) + } + if value, ok := nu.mutation.Tags(); ok { + _spec.SetField(node.FieldTags, field.TypeJSON, value) + } + if value, ok := nu.mutation.AppendedTags(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, node.FieldTags, value) + }) + } + if nu.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: node.PublisherTable, + Columns: []string{node.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: node.PublisherTable, + Columns: []string{node.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nu.mutation.VersionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !nu.mutation.VersionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.VersionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, nu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{node.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + nu.mutation.done = true + return n, nil +} + +// NodeUpdateOne is the builder for updating a single Node entity. +type NodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *NodeMutation +} + +// SetUpdateTime sets the "update_time" field. +func (nuo *NodeUpdateOne) SetUpdateTime(t time.Time) *NodeUpdateOne { + nuo.mutation.SetUpdateTime(t) + return nuo +} + +// SetPublisherID sets the "publisher_id" field. +func (nuo *NodeUpdateOne) SetPublisherID(s string) *NodeUpdateOne { + nuo.mutation.SetPublisherID(s) + return nuo +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillablePublisherID(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetPublisherID(*s) + } + return nuo +} + +// SetName sets the "name" field. +func (nuo *NodeUpdateOne) SetName(s string) *NodeUpdateOne { + nuo.mutation.SetName(s) + return nuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableName(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetName(*s) + } + return nuo +} + +// SetDescription sets the "description" field. +func (nuo *NodeUpdateOne) SetDescription(s string) *NodeUpdateOne { + nuo.mutation.SetDescription(s) + return nuo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableDescription(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetDescription(*s) + } + return nuo +} + +// ClearDescription clears the value of the "description" field. +func (nuo *NodeUpdateOne) ClearDescription() *NodeUpdateOne { + nuo.mutation.ClearDescription() + return nuo +} + +// SetAuthor sets the "author" field. +func (nuo *NodeUpdateOne) SetAuthor(s string) *NodeUpdateOne { + nuo.mutation.SetAuthor(s) + return nuo +} + +// SetNillableAuthor sets the "author" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableAuthor(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetAuthor(*s) + } + return nuo +} + +// ClearAuthor clears the value of the "author" field. +func (nuo *NodeUpdateOne) ClearAuthor() *NodeUpdateOne { + nuo.mutation.ClearAuthor() + return nuo +} + +// SetLicense sets the "license" field. +func (nuo *NodeUpdateOne) SetLicense(s string) *NodeUpdateOne { + nuo.mutation.SetLicense(s) + return nuo +} + +// SetNillableLicense sets the "license" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableLicense(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetLicense(*s) + } + return nuo +} + +// SetRepositoryURL sets the "repository_url" field. +func (nuo *NodeUpdateOne) SetRepositoryURL(s string) *NodeUpdateOne { + nuo.mutation.SetRepositoryURL(s) + return nuo +} + +// SetNillableRepositoryURL sets the "repository_url" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableRepositoryURL(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetRepositoryURL(*s) + } + return nuo +} + +// SetIconURL sets the "icon_url" field. +func (nuo *NodeUpdateOne) SetIconURL(s string) *NodeUpdateOne { + nuo.mutation.SetIconURL(s) + return nuo +} + +// SetNillableIconURL sets the "icon_url" field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableIconURL(s *string) *NodeUpdateOne { + if s != nil { + nuo.SetIconURL(*s) + } + return nuo +} + +// ClearIconURL clears the value of the "icon_url" field. +func (nuo *NodeUpdateOne) ClearIconURL() *NodeUpdateOne { + nuo.mutation.ClearIconURL() + return nuo +} + +// SetTags sets the "tags" field. +func (nuo *NodeUpdateOne) SetTags(s []string) *NodeUpdateOne { + nuo.mutation.SetTags(s) + return nuo +} + +// AppendTags appends s to the "tags" field. +func (nuo *NodeUpdateOne) AppendTags(s []string) *NodeUpdateOne { + nuo.mutation.AppendTags(s) + return nuo +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (nuo *NodeUpdateOne) SetPublisher(p *Publisher) *NodeUpdateOne { + return nuo.SetPublisherID(p.ID) +} + +// AddVersionIDs adds the "versions" edge to the NodeVersion entity by IDs. +func (nuo *NodeUpdateOne) AddVersionIDs(ids ...uuid.UUID) *NodeUpdateOne { + nuo.mutation.AddVersionIDs(ids...) + return nuo +} + +// AddVersions adds the "versions" edges to the NodeVersion entity. +func (nuo *NodeUpdateOne) AddVersions(n ...*NodeVersion) *NodeUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return nuo.AddVersionIDs(ids...) +} + +// Mutation returns the NodeMutation object of the builder. +func (nuo *NodeUpdateOne) Mutation() *NodeMutation { + return nuo.mutation +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (nuo *NodeUpdateOne) ClearPublisher() *NodeUpdateOne { + nuo.mutation.ClearPublisher() + return nuo +} + +// ClearVersions clears all "versions" edges to the NodeVersion entity. +func (nuo *NodeUpdateOne) ClearVersions() *NodeUpdateOne { + nuo.mutation.ClearVersions() + return nuo +} + +// RemoveVersionIDs removes the "versions" edge to NodeVersion entities by IDs. +func (nuo *NodeUpdateOne) RemoveVersionIDs(ids ...uuid.UUID) *NodeUpdateOne { + nuo.mutation.RemoveVersionIDs(ids...) + return nuo +} + +// RemoveVersions removes "versions" edges to NodeVersion entities. +func (nuo *NodeUpdateOne) RemoveVersions(n ...*NodeVersion) *NodeUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return nuo.RemoveVersionIDs(ids...) +} + +// Where appends a list predicates to the NodeUpdate builder. +func (nuo *NodeUpdateOne) Where(ps ...predicate.Node) *NodeUpdateOne { + nuo.mutation.Where(ps...) + return nuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (nuo *NodeUpdateOne) Select(field string, fields ...string) *NodeUpdateOne { + nuo.fields = append([]string{field}, fields...) + return nuo +} + +// Save executes the query and returns the updated Node entity. +func (nuo *NodeUpdateOne) Save(ctx context.Context) (*Node, error) { + nuo.defaults() + return withHooks(ctx, nuo.sqlSave, nuo.mutation, nuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nuo *NodeUpdateOne) SaveX(ctx context.Context) *Node { + node, err := nuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (nuo *NodeUpdateOne) Exec(ctx context.Context) error { + _, err := nuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nuo *NodeUpdateOne) ExecX(ctx context.Context) { + if err := nuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nuo *NodeUpdateOne) defaults() { + if _, ok := nuo.mutation.UpdateTime(); !ok { + v := node.UpdateDefaultUpdateTime() + nuo.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nuo *NodeUpdateOne) check() error { + if _, ok := nuo.mutation.PublisherID(); nuo.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Node.publisher"`) + } + return nil +} + +func (nuo *NodeUpdateOne) sqlSave(ctx context.Context) (_node *Node, err error) { + if err := nuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(node.Table, node.Columns, sqlgraph.NewFieldSpec(node.FieldID, field.TypeString)) + id, ok := nuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Node.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := nuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, node.FieldID) + for _, f := range fields { + if !node.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != node.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := nuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nuo.mutation.UpdateTime(); ok { + _spec.SetField(node.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := nuo.mutation.Name(); ok { + _spec.SetField(node.FieldName, field.TypeString, value) + } + if value, ok := nuo.mutation.Description(); ok { + _spec.SetField(node.FieldDescription, field.TypeString, value) + } + if nuo.mutation.DescriptionCleared() { + _spec.ClearField(node.FieldDescription, field.TypeString) + } + if value, ok := nuo.mutation.Author(); ok { + _spec.SetField(node.FieldAuthor, field.TypeString, value) + } + if nuo.mutation.AuthorCleared() { + _spec.ClearField(node.FieldAuthor, field.TypeString) + } + if value, ok := nuo.mutation.License(); ok { + _spec.SetField(node.FieldLicense, field.TypeString, value) + } + if value, ok := nuo.mutation.RepositoryURL(); ok { + _spec.SetField(node.FieldRepositoryURL, field.TypeString, value) + } + if value, ok := nuo.mutation.IconURL(); ok { + _spec.SetField(node.FieldIconURL, field.TypeString, value) + } + if nuo.mutation.IconURLCleared() { + _spec.ClearField(node.FieldIconURL, field.TypeString) + } + if value, ok := nuo.mutation.Tags(); ok { + _spec.SetField(node.FieldTags, field.TypeJSON, value) + } + if value, ok := nuo.mutation.AppendedTags(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, node.FieldTags, value) + }) + } + if nuo.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: node.PublisherTable, + Columns: []string{node.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: node.PublisherTable, + Columns: []string{node.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nuo.mutation.VersionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.RemovedVersionsIDs(); len(nodes) > 0 && !nuo.mutation.VersionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.VersionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: node.VersionsTable, + Columns: []string{node.VersionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Node{config: nuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, nuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{node.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + nuo.mutation.done = true + return _node, nil +} diff --git a/ent/nodeversion.go b/ent/nodeversion.go new file mode 100644 index 0000000..0222910 --- /dev/null +++ b/ent/nodeversion.go @@ -0,0 +1,238 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/storagefile" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// NodeVersion is the model entity for the NodeVersion schema. +type NodeVersion struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // NodeID holds the value of the "node_id" field. + NodeID string `json:"node_id,omitempty"` + // Must be SemVer compliant + Version string `json:"version,omitempty"` + // Changelog holds the value of the "changelog" field. + Changelog string `json:"changelog,omitempty"` + // PipDependencies holds the value of the "pip_dependencies" field. + PipDependencies []string `json:"pip_dependencies,omitempty"` + // Deprecated holds the value of the "deprecated" field. + Deprecated bool `json:"deprecated,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the NodeVersionQuery when eager-loading is set. + Edges NodeVersionEdges `json:"edges"` + node_version_storage_file *uuid.UUID + selectValues sql.SelectValues +} + +// NodeVersionEdges holds the relations/edges for other nodes in the graph. +type NodeVersionEdges struct { + // Node holds the value of the node edge. + Node *Node `json:"node,omitempty"` + // StorageFile holds the value of the storage_file edge. + StorageFile *StorageFile `json:"storage_file,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// NodeOrErr returns the Node value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NodeVersionEdges) NodeOrErr() (*Node, error) { + if e.Node != nil { + return e.Node, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: node.Label} + } + return nil, &NotLoadedError{edge: "node"} +} + +// StorageFileOrErr returns the StorageFile value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NodeVersionEdges) StorageFileOrErr() (*StorageFile, error) { + if e.StorageFile != nil { + return e.StorageFile, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: storagefile.Label} + } + return nil, &NotLoadedError{edge: "storage_file"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*NodeVersion) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case nodeversion.FieldPipDependencies: + values[i] = new([]byte) + case nodeversion.FieldDeprecated: + values[i] = new(sql.NullBool) + case nodeversion.FieldNodeID, nodeversion.FieldVersion, nodeversion.FieldChangelog: + values[i] = new(sql.NullString) + case nodeversion.FieldCreateTime, nodeversion.FieldUpdateTime: + values[i] = new(sql.NullTime) + case nodeversion.FieldID: + values[i] = new(uuid.UUID) + case nodeversion.ForeignKeys[0]: // node_version_storage_file + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the NodeVersion fields. +func (nv *NodeVersion) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case nodeversion.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + nv.ID = *value + } + case nodeversion.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + nv.CreateTime = value.Time + } + case nodeversion.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + nv.UpdateTime = value.Time + } + case nodeversion.FieldNodeID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field node_id", values[i]) + } else if value.Valid { + nv.NodeID = value.String + } + case nodeversion.FieldVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[i]) + } else if value.Valid { + nv.Version = value.String + } + case nodeversion.FieldChangelog: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field changelog", values[i]) + } else if value.Valid { + nv.Changelog = value.String + } + case nodeversion.FieldPipDependencies: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field pip_dependencies", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &nv.PipDependencies); err != nil { + return fmt.Errorf("unmarshal field pip_dependencies: %w", err) + } + } + case nodeversion.FieldDeprecated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field deprecated", values[i]) + } else if value.Valid { + nv.Deprecated = value.Bool + } + case nodeversion.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field node_version_storage_file", values[i]) + } else if value.Valid { + nv.node_version_storage_file = new(uuid.UUID) + *nv.node_version_storage_file = *value.S.(*uuid.UUID) + } + default: + nv.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the NodeVersion. +// This includes values selected through modifiers, order, etc. +func (nv *NodeVersion) Value(name string) (ent.Value, error) { + return nv.selectValues.Get(name) +} + +// QueryNode queries the "node" edge of the NodeVersion entity. +func (nv *NodeVersion) QueryNode() *NodeQuery { + return NewNodeVersionClient(nv.config).QueryNode(nv) +} + +// QueryStorageFile queries the "storage_file" edge of the NodeVersion entity. +func (nv *NodeVersion) QueryStorageFile() *StorageFileQuery { + return NewNodeVersionClient(nv.config).QueryStorageFile(nv) +} + +// Update returns a builder for updating this NodeVersion. +// Note that you need to call NodeVersion.Unwrap() before calling this method if this NodeVersion +// was returned from a transaction, and the transaction was committed or rolled back. +func (nv *NodeVersion) Update() *NodeVersionUpdateOne { + return NewNodeVersionClient(nv.config).UpdateOne(nv) +} + +// Unwrap unwraps the NodeVersion entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (nv *NodeVersion) Unwrap() *NodeVersion { + _tx, ok := nv.config.driver.(*txDriver) + if !ok { + panic("ent: NodeVersion is not a transactional entity") + } + nv.config.driver = _tx.drv + return nv +} + +// String implements the fmt.Stringer. +func (nv *NodeVersion) String() string { + var builder strings.Builder + builder.WriteString("NodeVersion(") + builder.WriteString(fmt.Sprintf("id=%v, ", nv.ID)) + builder.WriteString("create_time=") + builder.WriteString(nv.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(nv.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("node_id=") + builder.WriteString(nv.NodeID) + builder.WriteString(", ") + builder.WriteString("version=") + builder.WriteString(nv.Version) + builder.WriteString(", ") + builder.WriteString("changelog=") + builder.WriteString(nv.Changelog) + builder.WriteString(", ") + builder.WriteString("pip_dependencies=") + builder.WriteString(fmt.Sprintf("%v", nv.PipDependencies)) + builder.WriteString(", ") + builder.WriteString("deprecated=") + builder.WriteString(fmt.Sprintf("%v", nv.Deprecated)) + builder.WriteByte(')') + return builder.String() +} + +// NodeVersions is a parsable slice of NodeVersion. +type NodeVersions []*NodeVersion diff --git a/ent/nodeversion/nodeversion.go b/ent/nodeversion/nodeversion.go new file mode 100644 index 0000000..3507958 --- /dev/null +++ b/ent/nodeversion/nodeversion.go @@ -0,0 +1,164 @@ +// Code generated by ent, DO NOT EDIT. + +package nodeversion + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the nodeversion type in the database. + Label = "node_version" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldNodeID holds the string denoting the node_id field in the database. + FieldNodeID = "node_id" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldChangelog holds the string denoting the changelog field in the database. + FieldChangelog = "changelog" + // FieldPipDependencies holds the string denoting the pip_dependencies field in the database. + FieldPipDependencies = "pip_dependencies" + // FieldDeprecated holds the string denoting the deprecated field in the database. + FieldDeprecated = "deprecated" + // EdgeNode holds the string denoting the node edge name in mutations. + EdgeNode = "node" + // EdgeStorageFile holds the string denoting the storage_file edge name in mutations. + EdgeStorageFile = "storage_file" + // Table holds the table name of the nodeversion in the database. + Table = "node_versions" + // NodeTable is the table that holds the node relation/edge. + NodeTable = "node_versions" + // NodeInverseTable is the table name for the Node entity. + // It exists in this package in order to avoid circular dependency with the "node" package. + NodeInverseTable = "nodes" + // NodeColumn is the table column denoting the node relation/edge. + NodeColumn = "node_id" + // StorageFileTable is the table that holds the storage_file relation/edge. + StorageFileTable = "node_versions" + // StorageFileInverseTable is the table name for the StorageFile entity. + // It exists in this package in order to avoid circular dependency with the "storagefile" package. + StorageFileInverseTable = "storage_files" + // StorageFileColumn is the table column denoting the storage_file relation/edge. + StorageFileColumn = "node_version_storage_file" +) + +// Columns holds all SQL columns for nodeversion fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldNodeID, + FieldVersion, + FieldChangelog, + FieldPipDependencies, + FieldDeprecated, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "node_versions" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "node_version_storage_file", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultDeprecated holds the default value on creation for the "deprecated" field. + DefaultDeprecated bool + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the NodeVersion queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByNodeID orders the results by the node_id field. +func ByNodeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNodeID, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByChangelog orders the results by the changelog field. +func ByChangelog(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldChangelog, opts...).ToFunc() +} + +// ByDeprecated orders the results by the deprecated field. +func ByDeprecated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeprecated, opts...).ToFunc() +} + +// ByNodeField orders the results by node field. +func ByNodeField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNodeStep(), sql.OrderByField(field, opts...)) + } +} + +// ByStorageFileField orders the results by storage_file field. +func ByStorageFileField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newStorageFileStep(), sql.OrderByField(field, opts...)) + } +} +func newNodeStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NodeInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, NodeTable, NodeColumn), + ) +} +func newStorageFileStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(StorageFileInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, StorageFileTable, StorageFileColumn), + ) +} diff --git a/ent/nodeversion/where.go b/ent/nodeversion/where.go new file mode 100644 index 0000000..683c2eb --- /dev/null +++ b/ent/nodeversion/where.go @@ -0,0 +1,443 @@ +// Code generated by ent, DO NOT EDIT. + +package nodeversion + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldUpdateTime, v)) +} + +// NodeID applies equality check predicate on the "node_id" field. It's identical to NodeIDEQ. +func NodeID(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldNodeID, v)) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldVersion, v)) +} + +// Changelog applies equality check predicate on the "changelog" field. It's identical to ChangelogEQ. +func Changelog(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldChangelog, v)) +} + +// Deprecated applies equality check predicate on the "deprecated" field. It's identical to DeprecatedEQ. +func Deprecated(v bool) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldDeprecated, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldUpdateTime, v)) +} + +// NodeIDEQ applies the EQ predicate on the "node_id" field. +func NodeIDEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldNodeID, v)) +} + +// NodeIDNEQ applies the NEQ predicate on the "node_id" field. +func NodeIDNEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldNodeID, v)) +} + +// NodeIDIn applies the In predicate on the "node_id" field. +func NodeIDIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldNodeID, vs...)) +} + +// NodeIDNotIn applies the NotIn predicate on the "node_id" field. +func NodeIDNotIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldNodeID, vs...)) +} + +// NodeIDGT applies the GT predicate on the "node_id" field. +func NodeIDGT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldNodeID, v)) +} + +// NodeIDGTE applies the GTE predicate on the "node_id" field. +func NodeIDGTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldNodeID, v)) +} + +// NodeIDLT applies the LT predicate on the "node_id" field. +func NodeIDLT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldNodeID, v)) +} + +// NodeIDLTE applies the LTE predicate on the "node_id" field. +func NodeIDLTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldNodeID, v)) +} + +// NodeIDContains applies the Contains predicate on the "node_id" field. +func NodeIDContains(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContains(FieldNodeID, v)) +} + +// NodeIDHasPrefix applies the HasPrefix predicate on the "node_id" field. +func NodeIDHasPrefix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasPrefix(FieldNodeID, v)) +} + +// NodeIDHasSuffix applies the HasSuffix predicate on the "node_id" field. +func NodeIDHasSuffix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasSuffix(FieldNodeID, v)) +} + +// NodeIDEqualFold applies the EqualFold predicate on the "node_id" field. +func NodeIDEqualFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEqualFold(FieldNodeID, v)) +} + +// NodeIDContainsFold applies the ContainsFold predicate on the "node_id" field. +func NodeIDContainsFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContainsFold(FieldNodeID, v)) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldVersion, v)) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldVersion, v)) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldVersion, vs...)) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldVersion, vs...)) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldVersion, v)) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldVersion, v)) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldVersion, v)) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldVersion, v)) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContains(FieldVersion, v)) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasPrefix(FieldVersion, v)) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasSuffix(FieldVersion, v)) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEqualFold(FieldVersion, v)) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContainsFold(FieldVersion, v)) +} + +// ChangelogEQ applies the EQ predicate on the "changelog" field. +func ChangelogEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldChangelog, v)) +} + +// ChangelogNEQ applies the NEQ predicate on the "changelog" field. +func ChangelogNEQ(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldChangelog, v)) +} + +// ChangelogIn applies the In predicate on the "changelog" field. +func ChangelogIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIn(FieldChangelog, vs...)) +} + +// ChangelogNotIn applies the NotIn predicate on the "changelog" field. +func ChangelogNotIn(vs ...string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotIn(FieldChangelog, vs...)) +} + +// ChangelogGT applies the GT predicate on the "changelog" field. +func ChangelogGT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGT(FieldChangelog, v)) +} + +// ChangelogGTE applies the GTE predicate on the "changelog" field. +func ChangelogGTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldGTE(FieldChangelog, v)) +} + +// ChangelogLT applies the LT predicate on the "changelog" field. +func ChangelogLT(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLT(FieldChangelog, v)) +} + +// ChangelogLTE applies the LTE predicate on the "changelog" field. +func ChangelogLTE(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldLTE(FieldChangelog, v)) +} + +// ChangelogContains applies the Contains predicate on the "changelog" field. +func ChangelogContains(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContains(FieldChangelog, v)) +} + +// ChangelogHasPrefix applies the HasPrefix predicate on the "changelog" field. +func ChangelogHasPrefix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasPrefix(FieldChangelog, v)) +} + +// ChangelogHasSuffix applies the HasSuffix predicate on the "changelog" field. +func ChangelogHasSuffix(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldHasSuffix(FieldChangelog, v)) +} + +// ChangelogIsNil applies the IsNil predicate on the "changelog" field. +func ChangelogIsNil() predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldIsNull(FieldChangelog)) +} + +// ChangelogNotNil applies the NotNil predicate on the "changelog" field. +func ChangelogNotNil() predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNotNull(FieldChangelog)) +} + +// ChangelogEqualFold applies the EqualFold predicate on the "changelog" field. +func ChangelogEqualFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEqualFold(FieldChangelog, v)) +} + +// ChangelogContainsFold applies the ContainsFold predicate on the "changelog" field. +func ChangelogContainsFold(v string) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldContainsFold(FieldChangelog, v)) +} + +// DeprecatedEQ applies the EQ predicate on the "deprecated" field. +func DeprecatedEQ(v bool) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldEQ(FieldDeprecated, v)) +} + +// DeprecatedNEQ applies the NEQ predicate on the "deprecated" field. +func DeprecatedNEQ(v bool) predicate.NodeVersion { + return predicate.NodeVersion(sql.FieldNEQ(FieldDeprecated, v)) +} + +// HasNode applies the HasEdge predicate on the "node" edge. +func HasNode() predicate.NodeVersion { + return predicate.NodeVersion(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, NodeTable, NodeColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasNodeWith applies the HasEdge predicate on the "node" edge with a given conditions (other predicates). +func HasNodeWith(preds ...predicate.Node) predicate.NodeVersion { + return predicate.NodeVersion(func(s *sql.Selector) { + step := newNodeStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasStorageFile applies the HasEdge predicate on the "storage_file" edge. +func HasStorageFile() predicate.NodeVersion { + return predicate.NodeVersion(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, StorageFileTable, StorageFileColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasStorageFileWith applies the HasEdge predicate on the "storage_file" edge with a given conditions (other predicates). +func HasStorageFileWith(preds ...predicate.StorageFile) predicate.NodeVersion { + return predicate.NodeVersion(func(s *sql.Selector) { + step := newStorageFileStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.NodeVersion) predicate.NodeVersion { + return predicate.NodeVersion(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.NodeVersion) predicate.NodeVersion { + return predicate.NodeVersion(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.NodeVersion) predicate.NodeVersion { + return predicate.NodeVersion(sql.NotPredicates(p)) +} diff --git a/ent/nodeversion_create.go b/ent/nodeversion_create.go new file mode 100644 index 0000000..611fe2e --- /dev/null +++ b/ent/nodeversion_create.go @@ -0,0 +1,913 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// NodeVersionCreate is the builder for creating a NodeVersion entity. +type NodeVersionCreate struct { + config + mutation *NodeVersionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (nvc *NodeVersionCreate) SetCreateTime(t time.Time) *NodeVersionCreate { + nvc.mutation.SetCreateTime(t) + return nvc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableCreateTime(t *time.Time) *NodeVersionCreate { + if t != nil { + nvc.SetCreateTime(*t) + } + return nvc +} + +// SetUpdateTime sets the "update_time" field. +func (nvc *NodeVersionCreate) SetUpdateTime(t time.Time) *NodeVersionCreate { + nvc.mutation.SetUpdateTime(t) + return nvc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableUpdateTime(t *time.Time) *NodeVersionCreate { + if t != nil { + nvc.SetUpdateTime(*t) + } + return nvc +} + +// SetNodeID sets the "node_id" field. +func (nvc *NodeVersionCreate) SetNodeID(s string) *NodeVersionCreate { + nvc.mutation.SetNodeID(s) + return nvc +} + +// SetVersion sets the "version" field. +func (nvc *NodeVersionCreate) SetVersion(s string) *NodeVersionCreate { + nvc.mutation.SetVersion(s) + return nvc +} + +// SetChangelog sets the "changelog" field. +func (nvc *NodeVersionCreate) SetChangelog(s string) *NodeVersionCreate { + nvc.mutation.SetChangelog(s) + return nvc +} + +// SetNillableChangelog sets the "changelog" field if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableChangelog(s *string) *NodeVersionCreate { + if s != nil { + nvc.SetChangelog(*s) + } + return nvc +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (nvc *NodeVersionCreate) SetPipDependencies(s []string) *NodeVersionCreate { + nvc.mutation.SetPipDependencies(s) + return nvc +} + +// SetDeprecated sets the "deprecated" field. +func (nvc *NodeVersionCreate) SetDeprecated(b bool) *NodeVersionCreate { + nvc.mutation.SetDeprecated(b) + return nvc +} + +// SetNillableDeprecated sets the "deprecated" field if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableDeprecated(b *bool) *NodeVersionCreate { + if b != nil { + nvc.SetDeprecated(*b) + } + return nvc +} + +// SetID sets the "id" field. +func (nvc *NodeVersionCreate) SetID(u uuid.UUID) *NodeVersionCreate { + nvc.mutation.SetID(u) + return nvc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableID(u *uuid.UUID) *NodeVersionCreate { + if u != nil { + nvc.SetID(*u) + } + return nvc +} + +// SetNode sets the "node" edge to the Node entity. +func (nvc *NodeVersionCreate) SetNode(n *Node) *NodeVersionCreate { + return nvc.SetNodeID(n.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (nvc *NodeVersionCreate) SetStorageFileID(id uuid.UUID) *NodeVersionCreate { + nvc.mutation.SetStorageFileID(id) + return nvc +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (nvc *NodeVersionCreate) SetNillableStorageFileID(id *uuid.UUID) *NodeVersionCreate { + if id != nil { + nvc = nvc.SetStorageFileID(*id) + } + return nvc +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (nvc *NodeVersionCreate) SetStorageFile(s *StorageFile) *NodeVersionCreate { + return nvc.SetStorageFileID(s.ID) +} + +// Mutation returns the NodeVersionMutation object of the builder. +func (nvc *NodeVersionCreate) Mutation() *NodeVersionMutation { + return nvc.mutation +} + +// Save creates the NodeVersion in the database. +func (nvc *NodeVersionCreate) Save(ctx context.Context) (*NodeVersion, error) { + nvc.defaults() + return withHooks(ctx, nvc.sqlSave, nvc.mutation, nvc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (nvc *NodeVersionCreate) SaveX(ctx context.Context) *NodeVersion { + v, err := nvc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (nvc *NodeVersionCreate) Exec(ctx context.Context) error { + _, err := nvc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvc *NodeVersionCreate) ExecX(ctx context.Context) { + if err := nvc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nvc *NodeVersionCreate) defaults() { + if _, ok := nvc.mutation.CreateTime(); !ok { + v := nodeversion.DefaultCreateTime() + nvc.mutation.SetCreateTime(v) + } + if _, ok := nvc.mutation.UpdateTime(); !ok { + v := nodeversion.DefaultUpdateTime() + nvc.mutation.SetUpdateTime(v) + } + if _, ok := nvc.mutation.Deprecated(); !ok { + v := nodeversion.DefaultDeprecated + nvc.mutation.SetDeprecated(v) + } + if _, ok := nvc.mutation.ID(); !ok { + v := nodeversion.DefaultID() + nvc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nvc *NodeVersionCreate) check() error { + if _, ok := nvc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "NodeVersion.create_time"`)} + } + if _, ok := nvc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "NodeVersion.update_time"`)} + } + if _, ok := nvc.mutation.NodeID(); !ok { + return &ValidationError{Name: "node_id", err: errors.New(`ent: missing required field "NodeVersion.node_id"`)} + } + if _, ok := nvc.mutation.Version(); !ok { + return &ValidationError{Name: "version", err: errors.New(`ent: missing required field "NodeVersion.version"`)} + } + if _, ok := nvc.mutation.PipDependencies(); !ok { + return &ValidationError{Name: "pip_dependencies", err: errors.New(`ent: missing required field "NodeVersion.pip_dependencies"`)} + } + if _, ok := nvc.mutation.Deprecated(); !ok { + return &ValidationError{Name: "deprecated", err: errors.New(`ent: missing required field "NodeVersion.deprecated"`)} + } + if _, ok := nvc.mutation.NodeID(); !ok { + return &ValidationError{Name: "node", err: errors.New(`ent: missing required edge "NodeVersion.node"`)} + } + return nil +} + +func (nvc *NodeVersionCreate) sqlSave(ctx context.Context) (*NodeVersion, error) { + if err := nvc.check(); err != nil { + return nil, err + } + _node, _spec := nvc.createSpec() + if err := sqlgraph.CreateNode(ctx, nvc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + nvc.mutation.id = &_node.ID + nvc.mutation.done = true + return _node, nil +} + +func (nvc *NodeVersionCreate) createSpec() (*NodeVersion, *sqlgraph.CreateSpec) { + var ( + _node = &NodeVersion{config: nvc.config} + _spec = sqlgraph.NewCreateSpec(nodeversion.Table, sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = nvc.conflict + if id, ok := nvc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := nvc.mutation.CreateTime(); ok { + _spec.SetField(nodeversion.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := nvc.mutation.UpdateTime(); ok { + _spec.SetField(nodeversion.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := nvc.mutation.Version(); ok { + _spec.SetField(nodeversion.FieldVersion, field.TypeString, value) + _node.Version = value + } + if value, ok := nvc.mutation.Changelog(); ok { + _spec.SetField(nodeversion.FieldChangelog, field.TypeString, value) + _node.Changelog = value + } + if value, ok := nvc.mutation.PipDependencies(); ok { + _spec.SetField(nodeversion.FieldPipDependencies, field.TypeJSON, value) + _node.PipDependencies = value + } + if value, ok := nvc.mutation.Deprecated(); ok { + _spec.SetField(nodeversion.FieldDeprecated, field.TypeBool, value) + _node.Deprecated = value + } + if nodes := nvc.mutation.NodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: nodeversion.NodeTable, + Columns: []string{nodeversion.NodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.NodeID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := nvc.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: nodeversion.StorageFileTable, + Columns: []string{nodeversion.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.node_version_storage_file = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.NodeVersion.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NodeVersionUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (nvc *NodeVersionCreate) OnConflict(opts ...sql.ConflictOption) *NodeVersionUpsertOne { + nvc.conflict = opts + return &NodeVersionUpsertOne{ + create: nvc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (nvc *NodeVersionCreate) OnConflictColumns(columns ...string) *NodeVersionUpsertOne { + nvc.conflict = append(nvc.conflict, sql.ConflictColumns(columns...)) + return &NodeVersionUpsertOne{ + create: nvc, + } +} + +type ( + // NodeVersionUpsertOne is the builder for "upsert"-ing + // one NodeVersion node. + NodeVersionUpsertOne struct { + create *NodeVersionCreate + } + + // NodeVersionUpsert is the "OnConflict" setter. + NodeVersionUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *NodeVersionUpsert) SetUpdateTime(v time.Time) *NodeVersionUpsert { + u.Set(nodeversion.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdateUpdateTime() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldUpdateTime) + return u +} + +// SetNodeID sets the "node_id" field. +func (u *NodeVersionUpsert) SetNodeID(v string) *NodeVersionUpsert { + u.Set(nodeversion.FieldNodeID, v) + return u +} + +// UpdateNodeID sets the "node_id" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdateNodeID() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldNodeID) + return u +} + +// SetVersion sets the "version" field. +func (u *NodeVersionUpsert) SetVersion(v string) *NodeVersionUpsert { + u.Set(nodeversion.FieldVersion, v) + return u +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdateVersion() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldVersion) + return u +} + +// SetChangelog sets the "changelog" field. +func (u *NodeVersionUpsert) SetChangelog(v string) *NodeVersionUpsert { + u.Set(nodeversion.FieldChangelog, v) + return u +} + +// UpdateChangelog sets the "changelog" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdateChangelog() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldChangelog) + return u +} + +// ClearChangelog clears the value of the "changelog" field. +func (u *NodeVersionUpsert) ClearChangelog() *NodeVersionUpsert { + u.SetNull(nodeversion.FieldChangelog) + return u +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (u *NodeVersionUpsert) SetPipDependencies(v []string) *NodeVersionUpsert { + u.Set(nodeversion.FieldPipDependencies, v) + return u +} + +// UpdatePipDependencies sets the "pip_dependencies" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdatePipDependencies() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldPipDependencies) + return u +} + +// SetDeprecated sets the "deprecated" field. +func (u *NodeVersionUpsert) SetDeprecated(v bool) *NodeVersionUpsert { + u.Set(nodeversion.FieldDeprecated, v) + return u +} + +// UpdateDeprecated sets the "deprecated" field to the value that was provided on create. +func (u *NodeVersionUpsert) UpdateDeprecated() *NodeVersionUpsert { + u.SetExcluded(nodeversion.FieldDeprecated) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(nodeversion.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NodeVersionUpsertOne) UpdateNewValues() *NodeVersionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(nodeversion.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(nodeversion.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NodeVersionUpsertOne) Ignore() *NodeVersionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NodeVersionUpsertOne) DoNothing() *NodeVersionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NodeVersionCreate.OnConflict +// documentation for more info. +func (u *NodeVersionUpsertOne) Update(set func(*NodeVersionUpsert)) *NodeVersionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NodeVersionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *NodeVersionUpsertOne) SetUpdateTime(v time.Time) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdateUpdateTime() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetNodeID sets the "node_id" field. +func (u *NodeVersionUpsertOne) SetNodeID(v string) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetNodeID(v) + }) +} + +// UpdateNodeID sets the "node_id" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdateNodeID() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateNodeID() + }) +} + +// SetVersion sets the "version" field. +func (u *NodeVersionUpsertOne) SetVersion(v string) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetVersion(v) + }) +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdateVersion() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateVersion() + }) +} + +// SetChangelog sets the "changelog" field. +func (u *NodeVersionUpsertOne) SetChangelog(v string) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetChangelog(v) + }) +} + +// UpdateChangelog sets the "changelog" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdateChangelog() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateChangelog() + }) +} + +// ClearChangelog clears the value of the "changelog" field. +func (u *NodeVersionUpsertOne) ClearChangelog() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.ClearChangelog() + }) +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (u *NodeVersionUpsertOne) SetPipDependencies(v []string) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetPipDependencies(v) + }) +} + +// UpdatePipDependencies sets the "pip_dependencies" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdatePipDependencies() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdatePipDependencies() + }) +} + +// SetDeprecated sets the "deprecated" field. +func (u *NodeVersionUpsertOne) SetDeprecated(v bool) *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.SetDeprecated(v) + }) +} + +// UpdateDeprecated sets the "deprecated" field to the value that was provided on create. +func (u *NodeVersionUpsertOne) UpdateDeprecated() *NodeVersionUpsertOne { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateDeprecated() + }) +} + +// Exec executes the query. +func (u *NodeVersionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NodeVersionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NodeVersionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *NodeVersionUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: NodeVersionUpsertOne.ID is not supported by MySQL driver. Use NodeVersionUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *NodeVersionUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// NodeVersionCreateBulk is the builder for creating many NodeVersion entities in bulk. +type NodeVersionCreateBulk struct { + config + err error + builders []*NodeVersionCreate + conflict []sql.ConflictOption +} + +// Save creates the NodeVersion entities in the database. +func (nvcb *NodeVersionCreateBulk) Save(ctx context.Context) ([]*NodeVersion, error) { + if nvcb.err != nil { + return nil, nvcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(nvcb.builders)) + nodes := make([]*NodeVersion, len(nvcb.builders)) + mutators := make([]Mutator, len(nvcb.builders)) + for i := range nvcb.builders { + func(i int, root context.Context) { + builder := nvcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*NodeVersionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, nvcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = nvcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, nvcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, nvcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (nvcb *NodeVersionCreateBulk) SaveX(ctx context.Context) []*NodeVersion { + v, err := nvcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (nvcb *NodeVersionCreateBulk) Exec(ctx context.Context) error { + _, err := nvcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvcb *NodeVersionCreateBulk) ExecX(ctx context.Context) { + if err := nvcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.NodeVersion.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NodeVersionUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (nvcb *NodeVersionCreateBulk) OnConflict(opts ...sql.ConflictOption) *NodeVersionUpsertBulk { + nvcb.conflict = opts + return &NodeVersionUpsertBulk{ + create: nvcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (nvcb *NodeVersionCreateBulk) OnConflictColumns(columns ...string) *NodeVersionUpsertBulk { + nvcb.conflict = append(nvcb.conflict, sql.ConflictColumns(columns...)) + return &NodeVersionUpsertBulk{ + create: nvcb, + } +} + +// NodeVersionUpsertBulk is the builder for "upsert"-ing +// a bulk of NodeVersion nodes. +type NodeVersionUpsertBulk struct { + create *NodeVersionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(nodeversion.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NodeVersionUpsertBulk) UpdateNewValues() *NodeVersionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(nodeversion.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(nodeversion.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.NodeVersion.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NodeVersionUpsertBulk) Ignore() *NodeVersionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NodeVersionUpsertBulk) DoNothing() *NodeVersionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NodeVersionCreateBulk.OnConflict +// documentation for more info. +func (u *NodeVersionUpsertBulk) Update(set func(*NodeVersionUpsert)) *NodeVersionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NodeVersionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *NodeVersionUpsertBulk) SetUpdateTime(v time.Time) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdateUpdateTime() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetNodeID sets the "node_id" field. +func (u *NodeVersionUpsertBulk) SetNodeID(v string) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetNodeID(v) + }) +} + +// UpdateNodeID sets the "node_id" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdateNodeID() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateNodeID() + }) +} + +// SetVersion sets the "version" field. +func (u *NodeVersionUpsertBulk) SetVersion(v string) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetVersion(v) + }) +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdateVersion() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateVersion() + }) +} + +// SetChangelog sets the "changelog" field. +func (u *NodeVersionUpsertBulk) SetChangelog(v string) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetChangelog(v) + }) +} + +// UpdateChangelog sets the "changelog" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdateChangelog() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateChangelog() + }) +} + +// ClearChangelog clears the value of the "changelog" field. +func (u *NodeVersionUpsertBulk) ClearChangelog() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.ClearChangelog() + }) +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (u *NodeVersionUpsertBulk) SetPipDependencies(v []string) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetPipDependencies(v) + }) +} + +// UpdatePipDependencies sets the "pip_dependencies" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdatePipDependencies() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdatePipDependencies() + }) +} + +// SetDeprecated sets the "deprecated" field. +func (u *NodeVersionUpsertBulk) SetDeprecated(v bool) *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.SetDeprecated(v) + }) +} + +// UpdateDeprecated sets the "deprecated" field to the value that was provided on create. +func (u *NodeVersionUpsertBulk) UpdateDeprecated() *NodeVersionUpsertBulk { + return u.Update(func(s *NodeVersionUpsert) { + s.UpdateDeprecated() + }) +} + +// Exec executes the query. +func (u *NodeVersionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the NodeVersionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NodeVersionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NodeVersionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/nodeversion_delete.go b/ent/nodeversion_delete.go new file mode 100644 index 0000000..5262305 --- /dev/null +++ b/ent/nodeversion_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/nodeversion" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// NodeVersionDelete is the builder for deleting a NodeVersion entity. +type NodeVersionDelete struct { + config + hooks []Hook + mutation *NodeVersionMutation +} + +// Where appends a list predicates to the NodeVersionDelete builder. +func (nvd *NodeVersionDelete) Where(ps ...predicate.NodeVersion) *NodeVersionDelete { + nvd.mutation.Where(ps...) + return nvd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (nvd *NodeVersionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, nvd.sqlExec, nvd.mutation, nvd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvd *NodeVersionDelete) ExecX(ctx context.Context) int { + n, err := nvd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (nvd *NodeVersionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(nodeversion.Table, sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID)) + if ps := nvd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, nvd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + nvd.mutation.done = true + return affected, err +} + +// NodeVersionDeleteOne is the builder for deleting a single NodeVersion entity. +type NodeVersionDeleteOne struct { + nvd *NodeVersionDelete +} + +// Where appends a list predicates to the NodeVersionDelete builder. +func (nvdo *NodeVersionDeleteOne) Where(ps ...predicate.NodeVersion) *NodeVersionDeleteOne { + nvdo.nvd.mutation.Where(ps...) + return nvdo +} + +// Exec executes the deletion query. +func (nvdo *NodeVersionDeleteOne) Exec(ctx context.Context) error { + n, err := nvdo.nvd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{nodeversion.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvdo *NodeVersionDeleteOne) ExecX(ctx context.Context) { + if err := nvdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/nodeversion_query.go b/ent/nodeversion_query.go new file mode 100644 index 0000000..7f0cb2c --- /dev/null +++ b/ent/nodeversion_query.go @@ -0,0 +1,726 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// NodeVersionQuery is the builder for querying NodeVersion entities. +type NodeVersionQuery struct { + config + ctx *QueryContext + order []nodeversion.OrderOption + inters []Interceptor + predicates []predicate.NodeVersion + withNode *NodeQuery + withStorageFile *StorageFileQuery + withFKs bool + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the NodeVersionQuery builder. +func (nvq *NodeVersionQuery) Where(ps ...predicate.NodeVersion) *NodeVersionQuery { + nvq.predicates = append(nvq.predicates, ps...) + return nvq +} + +// Limit the number of records to be returned by this query. +func (nvq *NodeVersionQuery) Limit(limit int) *NodeVersionQuery { + nvq.ctx.Limit = &limit + return nvq +} + +// Offset to start from. +func (nvq *NodeVersionQuery) Offset(offset int) *NodeVersionQuery { + nvq.ctx.Offset = &offset + return nvq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (nvq *NodeVersionQuery) Unique(unique bool) *NodeVersionQuery { + nvq.ctx.Unique = &unique + return nvq +} + +// Order specifies how the records should be ordered. +func (nvq *NodeVersionQuery) Order(o ...nodeversion.OrderOption) *NodeVersionQuery { + nvq.order = append(nvq.order, o...) + return nvq +} + +// QueryNode chains the current query on the "node" edge. +func (nvq *NodeVersionQuery) QueryNode() *NodeQuery { + query := (&NodeClient{config: nvq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nvq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nvq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(nodeversion.Table, nodeversion.FieldID, selector), + sqlgraph.To(node.Table, node.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, nodeversion.NodeTable, nodeversion.NodeColumn), + ) + fromU = sqlgraph.SetNeighbors(nvq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryStorageFile chains the current query on the "storage_file" edge. +func (nvq *NodeVersionQuery) QueryStorageFile() *StorageFileQuery { + query := (&StorageFileClient{config: nvq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nvq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nvq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(nodeversion.Table, nodeversion.FieldID, selector), + sqlgraph.To(storagefile.Table, storagefile.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, nodeversion.StorageFileTable, nodeversion.StorageFileColumn), + ) + fromU = sqlgraph.SetNeighbors(nvq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first NodeVersion entity from the query. +// Returns a *NotFoundError when no NodeVersion was found. +func (nvq *NodeVersionQuery) First(ctx context.Context) (*NodeVersion, error) { + nodes, err := nvq.Limit(1).All(setContextOp(ctx, nvq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{nodeversion.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (nvq *NodeVersionQuery) FirstX(ctx context.Context) *NodeVersion { + node, err := nvq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first NodeVersion ID from the query. +// Returns a *NotFoundError when no NodeVersion ID was found. +func (nvq *NodeVersionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nvq.Limit(1).IDs(setContextOp(ctx, nvq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{nodeversion.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (nvq *NodeVersionQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := nvq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single NodeVersion entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one NodeVersion entity is found. +// Returns a *NotFoundError when no NodeVersion entities are found. +func (nvq *NodeVersionQuery) Only(ctx context.Context) (*NodeVersion, error) { + nodes, err := nvq.Limit(2).All(setContextOp(ctx, nvq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{nodeversion.Label} + default: + return nil, &NotSingularError{nodeversion.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (nvq *NodeVersionQuery) OnlyX(ctx context.Context) *NodeVersion { + node, err := nvq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only NodeVersion ID in the query. +// Returns a *NotSingularError when more than one NodeVersion ID is found. +// Returns a *NotFoundError when no entities are found. +func (nvq *NodeVersionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nvq.Limit(2).IDs(setContextOp(ctx, nvq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{nodeversion.Label} + default: + err = &NotSingularError{nodeversion.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (nvq *NodeVersionQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := nvq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of NodeVersions. +func (nvq *NodeVersionQuery) All(ctx context.Context) ([]*NodeVersion, error) { + ctx = setContextOp(ctx, nvq.ctx, "All") + if err := nvq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*NodeVersion, *NodeVersionQuery]() + return withInterceptors[[]*NodeVersion](ctx, nvq, qr, nvq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (nvq *NodeVersionQuery) AllX(ctx context.Context) []*NodeVersion { + nodes, err := nvq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of NodeVersion IDs. +func (nvq *NodeVersionQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if nvq.ctx.Unique == nil && nvq.path != nil { + nvq.Unique(true) + } + ctx = setContextOp(ctx, nvq.ctx, "IDs") + if err = nvq.Select(nodeversion.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (nvq *NodeVersionQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := nvq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (nvq *NodeVersionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, nvq.ctx, "Count") + if err := nvq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, nvq, querierCount[*NodeVersionQuery](), nvq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (nvq *NodeVersionQuery) CountX(ctx context.Context) int { + count, err := nvq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (nvq *NodeVersionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, nvq.ctx, "Exist") + switch _, err := nvq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (nvq *NodeVersionQuery) ExistX(ctx context.Context) bool { + exist, err := nvq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the NodeVersionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (nvq *NodeVersionQuery) Clone() *NodeVersionQuery { + if nvq == nil { + return nil + } + return &NodeVersionQuery{ + config: nvq.config, + ctx: nvq.ctx.Clone(), + order: append([]nodeversion.OrderOption{}, nvq.order...), + inters: append([]Interceptor{}, nvq.inters...), + predicates: append([]predicate.NodeVersion{}, nvq.predicates...), + withNode: nvq.withNode.Clone(), + withStorageFile: nvq.withStorageFile.Clone(), + // clone intermediate query. + sql: nvq.sql.Clone(), + path: nvq.path, + } +} + +// WithNode tells the query-builder to eager-load the nodes that are connected to +// the "node" edge. The optional arguments are used to configure the query builder of the edge. +func (nvq *NodeVersionQuery) WithNode(opts ...func(*NodeQuery)) *NodeVersionQuery { + query := (&NodeClient{config: nvq.config}).Query() + for _, opt := range opts { + opt(query) + } + nvq.withNode = query + return nvq +} + +// WithStorageFile tells the query-builder to eager-load the nodes that are connected to +// the "storage_file" edge. The optional arguments are used to configure the query builder of the edge. +func (nvq *NodeVersionQuery) WithStorageFile(opts ...func(*StorageFileQuery)) *NodeVersionQuery { + query := (&StorageFileClient{config: nvq.config}).Query() + for _, opt := range opts { + opt(query) + } + nvq.withStorageFile = query + return nvq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.NodeVersion.Query(). +// GroupBy(nodeversion.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (nvq *NodeVersionQuery) GroupBy(field string, fields ...string) *NodeVersionGroupBy { + nvq.ctx.Fields = append([]string{field}, fields...) + grbuild := &NodeVersionGroupBy{build: nvq} + grbuild.flds = &nvq.ctx.Fields + grbuild.label = nodeversion.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.NodeVersion.Query(). +// Select(nodeversion.FieldCreateTime). +// Scan(ctx, &v) +func (nvq *NodeVersionQuery) Select(fields ...string) *NodeVersionSelect { + nvq.ctx.Fields = append(nvq.ctx.Fields, fields...) + sbuild := &NodeVersionSelect{NodeVersionQuery: nvq} + sbuild.label = nodeversion.Label + sbuild.flds, sbuild.scan = &nvq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a NodeVersionSelect configured with the given aggregations. +func (nvq *NodeVersionQuery) Aggregate(fns ...AggregateFunc) *NodeVersionSelect { + return nvq.Select().Aggregate(fns...) +} + +func (nvq *NodeVersionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range nvq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, nvq); err != nil { + return err + } + } + } + for _, f := range nvq.ctx.Fields { + if !nodeversion.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if nvq.path != nil { + prev, err := nvq.path(ctx) + if err != nil { + return err + } + nvq.sql = prev + } + return nil +} + +func (nvq *NodeVersionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*NodeVersion, error) { + var ( + nodes = []*NodeVersion{} + withFKs = nvq.withFKs + _spec = nvq.querySpec() + loadedTypes = [2]bool{ + nvq.withNode != nil, + nvq.withStorageFile != nil, + } + ) + if nvq.withStorageFile != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, nodeversion.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*NodeVersion).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &NodeVersion{config: nvq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(nvq.modifiers) > 0 { + _spec.Modifiers = nvq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, nvq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := nvq.withNode; query != nil { + if err := nvq.loadNode(ctx, query, nodes, nil, + func(n *NodeVersion, e *Node) { n.Edges.Node = e }); err != nil { + return nil, err + } + } + if query := nvq.withStorageFile; query != nil { + if err := nvq.loadStorageFile(ctx, query, nodes, nil, + func(n *NodeVersion, e *StorageFile) { n.Edges.StorageFile = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (nvq *NodeVersionQuery) loadNode(ctx context.Context, query *NodeQuery, nodes []*NodeVersion, init func(*NodeVersion), assign func(*NodeVersion, *Node)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*NodeVersion) + for i := range nodes { + fk := nodes[i].NodeID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(node.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "node_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (nvq *NodeVersionQuery) loadStorageFile(ctx context.Context, query *StorageFileQuery, nodes []*NodeVersion, init func(*NodeVersion), assign func(*NodeVersion, *StorageFile)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*NodeVersion) + for i := range nodes { + if nodes[i].node_version_storage_file == nil { + continue + } + fk := *nodes[i].node_version_storage_file + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(storagefile.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "node_version_storage_file" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (nvq *NodeVersionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := nvq.querySpec() + if len(nvq.modifiers) > 0 { + _spec.Modifiers = nvq.modifiers + } + _spec.Node.Columns = nvq.ctx.Fields + if len(nvq.ctx.Fields) > 0 { + _spec.Unique = nvq.ctx.Unique != nil && *nvq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, nvq.driver, _spec) +} + +func (nvq *NodeVersionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(nodeversion.Table, nodeversion.Columns, sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID)) + _spec.From = nvq.sql + if unique := nvq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if nvq.path != nil { + _spec.Unique = true + } + if fields := nvq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, nodeversion.FieldID) + for i := range fields { + if fields[i] != nodeversion.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if nvq.withNode != nil { + _spec.Node.AddColumnOnce(nodeversion.FieldNodeID) + } + } + if ps := nvq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := nvq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := nvq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := nvq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (nvq *NodeVersionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(nvq.driver.Dialect()) + t1 := builder.Table(nodeversion.Table) + columns := nvq.ctx.Fields + if len(columns) == 0 { + columns = nodeversion.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if nvq.sql != nil { + selector = nvq.sql + selector.Select(selector.Columns(columns...)...) + } + if nvq.ctx.Unique != nil && *nvq.ctx.Unique { + selector.Distinct() + } + for _, m := range nvq.modifiers { + m(selector) + } + for _, p := range nvq.predicates { + p(selector) + } + for _, p := range nvq.order { + p(selector) + } + if offset := nvq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := nvq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (nvq *NodeVersionQuery) ForUpdate(opts ...sql.LockOption) *NodeVersionQuery { + if nvq.driver.Dialect() == dialect.Postgres { + nvq.Unique(false) + } + nvq.modifiers = append(nvq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return nvq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (nvq *NodeVersionQuery) ForShare(opts ...sql.LockOption) *NodeVersionQuery { + if nvq.driver.Dialect() == dialect.Postgres { + nvq.Unique(false) + } + nvq.modifiers = append(nvq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return nvq +} + +// NodeVersionGroupBy is the group-by builder for NodeVersion entities. +type NodeVersionGroupBy struct { + selector + build *NodeVersionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (nvgb *NodeVersionGroupBy) Aggregate(fns ...AggregateFunc) *NodeVersionGroupBy { + nvgb.fns = append(nvgb.fns, fns...) + return nvgb +} + +// Scan applies the selector query and scans the result into the given value. +func (nvgb *NodeVersionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, nvgb.build.ctx, "GroupBy") + if err := nvgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NodeVersionQuery, *NodeVersionGroupBy](ctx, nvgb.build, nvgb, nvgb.build.inters, v) +} + +func (nvgb *NodeVersionGroupBy) sqlScan(ctx context.Context, root *NodeVersionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(nvgb.fns)) + for _, fn := range nvgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*nvgb.flds)+len(nvgb.fns)) + for _, f := range *nvgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*nvgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := nvgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// NodeVersionSelect is the builder for selecting fields of NodeVersion entities. +type NodeVersionSelect struct { + *NodeVersionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (nvs *NodeVersionSelect) Aggregate(fns ...AggregateFunc) *NodeVersionSelect { + nvs.fns = append(nvs.fns, fns...) + return nvs +} + +// Scan applies the selector query and scans the result into the given value. +func (nvs *NodeVersionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, nvs.ctx, "Select") + if err := nvs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NodeVersionQuery, *NodeVersionSelect](ctx, nvs.NodeVersionQuery, nvs, nvs.inters, v) +} + +func (nvs *NodeVersionSelect) sqlScan(ctx context.Context, root *NodeVersionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(nvs.fns)) + for _, fn := range nvs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*nvs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := nvs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/nodeversion_update.go b/ent/nodeversion_update.go new file mode 100644 index 0000000..65dd779 --- /dev/null +++ b/ent/nodeversion_update.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// NodeVersionUpdate is the builder for updating NodeVersion entities. +type NodeVersionUpdate struct { + config + hooks []Hook + mutation *NodeVersionMutation +} + +// Where appends a list predicates to the NodeVersionUpdate builder. +func (nvu *NodeVersionUpdate) Where(ps ...predicate.NodeVersion) *NodeVersionUpdate { + nvu.mutation.Where(ps...) + return nvu +} + +// SetUpdateTime sets the "update_time" field. +func (nvu *NodeVersionUpdate) SetUpdateTime(t time.Time) *NodeVersionUpdate { + nvu.mutation.SetUpdateTime(t) + return nvu +} + +// SetNodeID sets the "node_id" field. +func (nvu *NodeVersionUpdate) SetNodeID(s string) *NodeVersionUpdate { + nvu.mutation.SetNodeID(s) + return nvu +} + +// SetNillableNodeID sets the "node_id" field if the given value is not nil. +func (nvu *NodeVersionUpdate) SetNillableNodeID(s *string) *NodeVersionUpdate { + if s != nil { + nvu.SetNodeID(*s) + } + return nvu +} + +// SetVersion sets the "version" field. +func (nvu *NodeVersionUpdate) SetVersion(s string) *NodeVersionUpdate { + nvu.mutation.SetVersion(s) + return nvu +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (nvu *NodeVersionUpdate) SetNillableVersion(s *string) *NodeVersionUpdate { + if s != nil { + nvu.SetVersion(*s) + } + return nvu +} + +// SetChangelog sets the "changelog" field. +func (nvu *NodeVersionUpdate) SetChangelog(s string) *NodeVersionUpdate { + nvu.mutation.SetChangelog(s) + return nvu +} + +// SetNillableChangelog sets the "changelog" field if the given value is not nil. +func (nvu *NodeVersionUpdate) SetNillableChangelog(s *string) *NodeVersionUpdate { + if s != nil { + nvu.SetChangelog(*s) + } + return nvu +} + +// ClearChangelog clears the value of the "changelog" field. +func (nvu *NodeVersionUpdate) ClearChangelog() *NodeVersionUpdate { + nvu.mutation.ClearChangelog() + return nvu +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (nvu *NodeVersionUpdate) SetPipDependencies(s []string) *NodeVersionUpdate { + nvu.mutation.SetPipDependencies(s) + return nvu +} + +// AppendPipDependencies appends s to the "pip_dependencies" field. +func (nvu *NodeVersionUpdate) AppendPipDependencies(s []string) *NodeVersionUpdate { + nvu.mutation.AppendPipDependencies(s) + return nvu +} + +// SetDeprecated sets the "deprecated" field. +func (nvu *NodeVersionUpdate) SetDeprecated(b bool) *NodeVersionUpdate { + nvu.mutation.SetDeprecated(b) + return nvu +} + +// SetNillableDeprecated sets the "deprecated" field if the given value is not nil. +func (nvu *NodeVersionUpdate) SetNillableDeprecated(b *bool) *NodeVersionUpdate { + if b != nil { + nvu.SetDeprecated(*b) + } + return nvu +} + +// SetNode sets the "node" edge to the Node entity. +func (nvu *NodeVersionUpdate) SetNode(n *Node) *NodeVersionUpdate { + return nvu.SetNodeID(n.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (nvu *NodeVersionUpdate) SetStorageFileID(id uuid.UUID) *NodeVersionUpdate { + nvu.mutation.SetStorageFileID(id) + return nvu +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (nvu *NodeVersionUpdate) SetNillableStorageFileID(id *uuid.UUID) *NodeVersionUpdate { + if id != nil { + nvu = nvu.SetStorageFileID(*id) + } + return nvu +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (nvu *NodeVersionUpdate) SetStorageFile(s *StorageFile) *NodeVersionUpdate { + return nvu.SetStorageFileID(s.ID) +} + +// Mutation returns the NodeVersionMutation object of the builder. +func (nvu *NodeVersionUpdate) Mutation() *NodeVersionMutation { + return nvu.mutation +} + +// ClearNode clears the "node" edge to the Node entity. +func (nvu *NodeVersionUpdate) ClearNode() *NodeVersionUpdate { + nvu.mutation.ClearNode() + return nvu +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (nvu *NodeVersionUpdate) ClearStorageFile() *NodeVersionUpdate { + nvu.mutation.ClearStorageFile() + return nvu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (nvu *NodeVersionUpdate) Save(ctx context.Context) (int, error) { + nvu.defaults() + return withHooks(ctx, nvu.sqlSave, nvu.mutation, nvu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nvu *NodeVersionUpdate) SaveX(ctx context.Context) int { + affected, err := nvu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (nvu *NodeVersionUpdate) Exec(ctx context.Context) error { + _, err := nvu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvu *NodeVersionUpdate) ExecX(ctx context.Context) { + if err := nvu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nvu *NodeVersionUpdate) defaults() { + if _, ok := nvu.mutation.UpdateTime(); !ok { + v := nodeversion.UpdateDefaultUpdateTime() + nvu.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nvu *NodeVersionUpdate) check() error { + if _, ok := nvu.mutation.NodeID(); nvu.mutation.NodeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "NodeVersion.node"`) + } + return nil +} + +func (nvu *NodeVersionUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := nvu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(nodeversion.Table, nodeversion.Columns, sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID)) + if ps := nvu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nvu.mutation.UpdateTime(); ok { + _spec.SetField(nodeversion.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := nvu.mutation.Version(); ok { + _spec.SetField(nodeversion.FieldVersion, field.TypeString, value) + } + if value, ok := nvu.mutation.Changelog(); ok { + _spec.SetField(nodeversion.FieldChangelog, field.TypeString, value) + } + if nvu.mutation.ChangelogCleared() { + _spec.ClearField(nodeversion.FieldChangelog, field.TypeString) + } + if value, ok := nvu.mutation.PipDependencies(); ok { + _spec.SetField(nodeversion.FieldPipDependencies, field.TypeJSON, value) + } + if value, ok := nvu.mutation.AppendedPipDependencies(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, nodeversion.FieldPipDependencies, value) + }) + } + if value, ok := nvu.mutation.Deprecated(); ok { + _spec.SetField(nodeversion.FieldDeprecated, field.TypeBool, value) + } + if nvu.mutation.NodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: nodeversion.NodeTable, + Columns: []string{nodeversion.NodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nvu.mutation.NodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: nodeversion.NodeTable, + Columns: []string{nodeversion.NodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nvu.mutation.StorageFileCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: nodeversion.StorageFileTable, + Columns: []string{nodeversion.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nvu.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: nodeversion.StorageFileTable, + Columns: []string{nodeversion.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, nvu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{nodeversion.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + nvu.mutation.done = true + return n, nil +} + +// NodeVersionUpdateOne is the builder for updating a single NodeVersion entity. +type NodeVersionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *NodeVersionMutation +} + +// SetUpdateTime sets the "update_time" field. +func (nvuo *NodeVersionUpdateOne) SetUpdateTime(t time.Time) *NodeVersionUpdateOne { + nvuo.mutation.SetUpdateTime(t) + return nvuo +} + +// SetNodeID sets the "node_id" field. +func (nvuo *NodeVersionUpdateOne) SetNodeID(s string) *NodeVersionUpdateOne { + nvuo.mutation.SetNodeID(s) + return nvuo +} + +// SetNillableNodeID sets the "node_id" field if the given value is not nil. +func (nvuo *NodeVersionUpdateOne) SetNillableNodeID(s *string) *NodeVersionUpdateOne { + if s != nil { + nvuo.SetNodeID(*s) + } + return nvuo +} + +// SetVersion sets the "version" field. +func (nvuo *NodeVersionUpdateOne) SetVersion(s string) *NodeVersionUpdateOne { + nvuo.mutation.SetVersion(s) + return nvuo +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (nvuo *NodeVersionUpdateOne) SetNillableVersion(s *string) *NodeVersionUpdateOne { + if s != nil { + nvuo.SetVersion(*s) + } + return nvuo +} + +// SetChangelog sets the "changelog" field. +func (nvuo *NodeVersionUpdateOne) SetChangelog(s string) *NodeVersionUpdateOne { + nvuo.mutation.SetChangelog(s) + return nvuo +} + +// SetNillableChangelog sets the "changelog" field if the given value is not nil. +func (nvuo *NodeVersionUpdateOne) SetNillableChangelog(s *string) *NodeVersionUpdateOne { + if s != nil { + nvuo.SetChangelog(*s) + } + return nvuo +} + +// ClearChangelog clears the value of the "changelog" field. +func (nvuo *NodeVersionUpdateOne) ClearChangelog() *NodeVersionUpdateOne { + nvuo.mutation.ClearChangelog() + return nvuo +} + +// SetPipDependencies sets the "pip_dependencies" field. +func (nvuo *NodeVersionUpdateOne) SetPipDependencies(s []string) *NodeVersionUpdateOne { + nvuo.mutation.SetPipDependencies(s) + return nvuo +} + +// AppendPipDependencies appends s to the "pip_dependencies" field. +func (nvuo *NodeVersionUpdateOne) AppendPipDependencies(s []string) *NodeVersionUpdateOne { + nvuo.mutation.AppendPipDependencies(s) + return nvuo +} + +// SetDeprecated sets the "deprecated" field. +func (nvuo *NodeVersionUpdateOne) SetDeprecated(b bool) *NodeVersionUpdateOne { + nvuo.mutation.SetDeprecated(b) + return nvuo +} + +// SetNillableDeprecated sets the "deprecated" field if the given value is not nil. +func (nvuo *NodeVersionUpdateOne) SetNillableDeprecated(b *bool) *NodeVersionUpdateOne { + if b != nil { + nvuo.SetDeprecated(*b) + } + return nvuo +} + +// SetNode sets the "node" edge to the Node entity. +func (nvuo *NodeVersionUpdateOne) SetNode(n *Node) *NodeVersionUpdateOne { + return nvuo.SetNodeID(n.ID) +} + +// SetStorageFileID sets the "storage_file" edge to the StorageFile entity by ID. +func (nvuo *NodeVersionUpdateOne) SetStorageFileID(id uuid.UUID) *NodeVersionUpdateOne { + nvuo.mutation.SetStorageFileID(id) + return nvuo +} + +// SetNillableStorageFileID sets the "storage_file" edge to the StorageFile entity by ID if the given value is not nil. +func (nvuo *NodeVersionUpdateOne) SetNillableStorageFileID(id *uuid.UUID) *NodeVersionUpdateOne { + if id != nil { + nvuo = nvuo.SetStorageFileID(*id) + } + return nvuo +} + +// SetStorageFile sets the "storage_file" edge to the StorageFile entity. +func (nvuo *NodeVersionUpdateOne) SetStorageFile(s *StorageFile) *NodeVersionUpdateOne { + return nvuo.SetStorageFileID(s.ID) +} + +// Mutation returns the NodeVersionMutation object of the builder. +func (nvuo *NodeVersionUpdateOne) Mutation() *NodeVersionMutation { + return nvuo.mutation +} + +// ClearNode clears the "node" edge to the Node entity. +func (nvuo *NodeVersionUpdateOne) ClearNode() *NodeVersionUpdateOne { + nvuo.mutation.ClearNode() + return nvuo +} + +// ClearStorageFile clears the "storage_file" edge to the StorageFile entity. +func (nvuo *NodeVersionUpdateOne) ClearStorageFile() *NodeVersionUpdateOne { + nvuo.mutation.ClearStorageFile() + return nvuo +} + +// Where appends a list predicates to the NodeVersionUpdate builder. +func (nvuo *NodeVersionUpdateOne) Where(ps ...predicate.NodeVersion) *NodeVersionUpdateOne { + nvuo.mutation.Where(ps...) + return nvuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (nvuo *NodeVersionUpdateOne) Select(field string, fields ...string) *NodeVersionUpdateOne { + nvuo.fields = append([]string{field}, fields...) + return nvuo +} + +// Save executes the query and returns the updated NodeVersion entity. +func (nvuo *NodeVersionUpdateOne) Save(ctx context.Context) (*NodeVersion, error) { + nvuo.defaults() + return withHooks(ctx, nvuo.sqlSave, nvuo.mutation, nvuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nvuo *NodeVersionUpdateOne) SaveX(ctx context.Context) *NodeVersion { + node, err := nvuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (nvuo *NodeVersionUpdateOne) Exec(ctx context.Context) error { + _, err := nvuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nvuo *NodeVersionUpdateOne) ExecX(ctx context.Context) { + if err := nvuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nvuo *NodeVersionUpdateOne) defaults() { + if _, ok := nvuo.mutation.UpdateTime(); !ok { + v := nodeversion.UpdateDefaultUpdateTime() + nvuo.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nvuo *NodeVersionUpdateOne) check() error { + if _, ok := nvuo.mutation.NodeID(); nvuo.mutation.NodeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "NodeVersion.node"`) + } + return nil +} + +func (nvuo *NodeVersionUpdateOne) sqlSave(ctx context.Context) (_node *NodeVersion, err error) { + if err := nvuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(nodeversion.Table, nodeversion.Columns, sqlgraph.NewFieldSpec(nodeversion.FieldID, field.TypeUUID)) + id, ok := nvuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "NodeVersion.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := nvuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, nodeversion.FieldID) + for _, f := range fields { + if !nodeversion.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != nodeversion.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := nvuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nvuo.mutation.UpdateTime(); ok { + _spec.SetField(nodeversion.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := nvuo.mutation.Version(); ok { + _spec.SetField(nodeversion.FieldVersion, field.TypeString, value) + } + if value, ok := nvuo.mutation.Changelog(); ok { + _spec.SetField(nodeversion.FieldChangelog, field.TypeString, value) + } + if nvuo.mutation.ChangelogCleared() { + _spec.ClearField(nodeversion.FieldChangelog, field.TypeString) + } + if value, ok := nvuo.mutation.PipDependencies(); ok { + _spec.SetField(nodeversion.FieldPipDependencies, field.TypeJSON, value) + } + if value, ok := nvuo.mutation.AppendedPipDependencies(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, nodeversion.FieldPipDependencies, value) + }) + } + if value, ok := nvuo.mutation.Deprecated(); ok { + _spec.SetField(nodeversion.FieldDeprecated, field.TypeBool, value) + } + if nvuo.mutation.NodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: nodeversion.NodeTable, + Columns: []string{nodeversion.NodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nvuo.mutation.NodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: nodeversion.NodeTable, + Columns: []string{nodeversion.NodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nvuo.mutation.StorageFileCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: nodeversion.StorageFileTable, + Columns: []string{nodeversion.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nvuo.mutation.StorageFileIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: nodeversion.StorageFileTable, + Columns: []string{nodeversion.StorageFileColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &NodeVersion{config: nvuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, nvuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{nodeversion.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + nvuo.mutation.done = true + return _node, nil +} diff --git a/ent/personalaccesstoken.go b/ent/personalaccesstoken.go new file mode 100644 index 0000000..7acc8ba --- /dev/null +++ b/ent/personalaccesstoken.go @@ -0,0 +1,190 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// PersonalAccessToken is the model entity for the PersonalAccessToken schema. +type PersonalAccessToken struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // PublisherID holds the value of the "publisher_id" field. + PublisherID string `json:"publisher_id,omitempty"` + // Token holds the value of the "token" field. + Token string `json:"-"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PersonalAccessTokenQuery when eager-loading is set. + Edges PersonalAccessTokenEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PersonalAccessTokenEdges holds the relations/edges for other nodes in the graph. +type PersonalAccessTokenEdges struct { + // Publisher holds the value of the publisher edge. + Publisher *Publisher `json:"publisher,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// PublisherOrErr returns the Publisher value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PersonalAccessTokenEdges) PublisherOrErr() (*Publisher, error) { + if e.Publisher != nil { + return e.Publisher, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: publisher.Label} + } + return nil, &NotLoadedError{edge: "publisher"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PersonalAccessToken) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case personalaccesstoken.FieldName, personalaccesstoken.FieldDescription, personalaccesstoken.FieldPublisherID, personalaccesstoken.FieldToken: + values[i] = new(sql.NullString) + case personalaccesstoken.FieldCreateTime, personalaccesstoken.FieldUpdateTime: + values[i] = new(sql.NullTime) + case personalaccesstoken.FieldID: + values[i] = new(uuid.UUID) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PersonalAccessToken fields. +func (pat *PersonalAccessToken) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case personalaccesstoken.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + pat.ID = *value + } + case personalaccesstoken.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + pat.CreateTime = value.Time + } + case personalaccesstoken.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + pat.UpdateTime = value.Time + } + case personalaccesstoken.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + pat.Name = value.String + } + case personalaccesstoken.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + pat.Description = value.String + } + case personalaccesstoken.FieldPublisherID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field publisher_id", values[i]) + } else if value.Valid { + pat.PublisherID = value.String + } + case personalaccesstoken.FieldToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value.Valid { + pat.Token = value.String + } + default: + pat.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PersonalAccessToken. +// This includes values selected through modifiers, order, etc. +func (pat *PersonalAccessToken) Value(name string) (ent.Value, error) { + return pat.selectValues.Get(name) +} + +// QueryPublisher queries the "publisher" edge of the PersonalAccessToken entity. +func (pat *PersonalAccessToken) QueryPublisher() *PublisherQuery { + return NewPersonalAccessTokenClient(pat.config).QueryPublisher(pat) +} + +// Update returns a builder for updating this PersonalAccessToken. +// Note that you need to call PersonalAccessToken.Unwrap() before calling this method if this PersonalAccessToken +// was returned from a transaction, and the transaction was committed or rolled back. +func (pat *PersonalAccessToken) Update() *PersonalAccessTokenUpdateOne { + return NewPersonalAccessTokenClient(pat.config).UpdateOne(pat) +} + +// Unwrap unwraps the PersonalAccessToken entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pat *PersonalAccessToken) Unwrap() *PersonalAccessToken { + _tx, ok := pat.config.driver.(*txDriver) + if !ok { + panic("ent: PersonalAccessToken is not a transactional entity") + } + pat.config.driver = _tx.drv + return pat +} + +// String implements the fmt.Stringer. +func (pat *PersonalAccessToken) String() string { + var builder strings.Builder + builder.WriteString("PersonalAccessToken(") + builder.WriteString(fmt.Sprintf("id=%v, ", pat.ID)) + builder.WriteString("create_time=") + builder.WriteString(pat.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(pat.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(pat.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(pat.Description) + builder.WriteString(", ") + builder.WriteString("publisher_id=") + builder.WriteString(pat.PublisherID) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteByte(')') + return builder.String() +} + +// PersonalAccessTokens is a parsable slice of PersonalAccessToken. +type PersonalAccessTokens []*PersonalAccessToken diff --git a/ent/personalaccesstoken/personalaccesstoken.go b/ent/personalaccesstoken/personalaccesstoken.go new file mode 100644 index 0000000..798a7c9 --- /dev/null +++ b/ent/personalaccesstoken/personalaccesstoken.go @@ -0,0 +1,125 @@ +// Code generated by ent, DO NOT EDIT. + +package personalaccesstoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the personalaccesstoken type in the database. + Label = "personal_access_token" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldPublisherID holds the string denoting the publisher_id field in the database. + FieldPublisherID = "publisher_id" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // EdgePublisher holds the string denoting the publisher edge name in mutations. + EdgePublisher = "publisher" + // Table holds the table name of the personalaccesstoken in the database. + Table = "personal_access_tokens" + // PublisherTable is the table that holds the publisher relation/edge. + PublisherTable = "personal_access_tokens" + // PublisherInverseTable is the table name for the Publisher entity. + // It exists in this package in order to avoid circular dependency with the "publisher" package. + PublisherInverseTable = "publishers" + // PublisherColumn is the table column denoting the publisher relation/edge. + PublisherColumn = "publisher_id" +) + +// Columns holds all SQL columns for personalaccesstoken fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldName, + FieldDescription, + FieldPublisherID, + FieldToken, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the PersonalAccessToken queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByPublisherID orders the results by the publisher_id field. +func ByPublisherID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPublisherID, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} + +// ByPublisherField orders the results by publisher field. +func ByPublisherField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPublisherStep(), sql.OrderByField(field, opts...)) + } +} +func newPublisherStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PublisherInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) +} diff --git a/ent/personalaccesstoken/where.go b/ent/personalaccesstoken/where.go new file mode 100644 index 0000000..e725492 --- /dev/null +++ b/ent/personalaccesstoken/where.go @@ -0,0 +1,465 @@ +// Code generated by ent, DO NOT EDIT. + +package personalaccesstoken + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldUpdateTime, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldDescription, v)) +} + +// PublisherID applies equality check predicate on the "publisher_id" field. It's identical to PublisherIDEQ. +func PublisherID(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldPublisherID, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldToken, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldUpdateTime, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContainsFold(FieldDescription, v)) +} + +// PublisherIDEQ applies the EQ predicate on the "publisher_id" field. +func PublisherIDEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldPublisherID, v)) +} + +// PublisherIDNEQ applies the NEQ predicate on the "publisher_id" field. +func PublisherIDNEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldPublisherID, v)) +} + +// PublisherIDIn applies the In predicate on the "publisher_id" field. +func PublisherIDIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldPublisherID, vs...)) +} + +// PublisherIDNotIn applies the NotIn predicate on the "publisher_id" field. +func PublisherIDNotIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldPublisherID, vs...)) +} + +// PublisherIDGT applies the GT predicate on the "publisher_id" field. +func PublisherIDGT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldPublisherID, v)) +} + +// PublisherIDGTE applies the GTE predicate on the "publisher_id" field. +func PublisherIDGTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldPublisherID, v)) +} + +// PublisherIDLT applies the LT predicate on the "publisher_id" field. +func PublisherIDLT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldPublisherID, v)) +} + +// PublisherIDLTE applies the LTE predicate on the "publisher_id" field. +func PublisherIDLTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldPublisherID, v)) +} + +// PublisherIDContains applies the Contains predicate on the "publisher_id" field. +func PublisherIDContains(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContains(FieldPublisherID, v)) +} + +// PublisherIDHasPrefix applies the HasPrefix predicate on the "publisher_id" field. +func PublisherIDHasPrefix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasPrefix(FieldPublisherID, v)) +} + +// PublisherIDHasSuffix applies the HasSuffix predicate on the "publisher_id" field. +func PublisherIDHasSuffix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasSuffix(FieldPublisherID, v)) +} + +// PublisherIDEqualFold applies the EqualFold predicate on the "publisher_id" field. +func PublisherIDEqualFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEqualFold(FieldPublisherID, v)) +} + +// PublisherIDContainsFold applies the ContainsFold predicate on the "publisher_id" field. +func PublisherIDContainsFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContainsFold(FieldPublisherID, v)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldLTE(FieldToken, v)) +} + +// TokenContains applies the Contains predicate on the "token" field. +func TokenContains(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContains(FieldToken, v)) +} + +// TokenHasPrefix applies the HasPrefix predicate on the "token" field. +func TokenHasPrefix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasPrefix(FieldToken, v)) +} + +// TokenHasSuffix applies the HasSuffix predicate on the "token" field. +func TokenHasSuffix(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldHasSuffix(FieldToken, v)) +} + +// TokenEqualFold applies the EqualFold predicate on the "token" field. +func TokenEqualFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldEqualFold(FieldToken, v)) +} + +// TokenContainsFold applies the ContainsFold predicate on the "token" field. +func TokenContainsFold(v string) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.FieldContainsFold(FieldToken, v)) +} + +// HasPublisher applies the HasEdge predicate on the "publisher" edge. +func HasPublisher() predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPublisherWith applies the HasEdge predicate on the "publisher" edge with a given conditions (other predicates). +func HasPublisherWith(preds ...predicate.Publisher) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(func(s *sql.Selector) { + step := newPublisherStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PersonalAccessToken) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PersonalAccessToken) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PersonalAccessToken) predicate.PersonalAccessToken { + return predicate.PersonalAccessToken(sql.NotPredicates(p)) +} diff --git a/ent/personalaccesstoken_create.go b/ent/personalaccesstoken_create.go new file mode 100644 index 0000000..a6d2bf2 --- /dev/null +++ b/ent/personalaccesstoken_create.go @@ -0,0 +1,786 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// PersonalAccessTokenCreate is the builder for creating a PersonalAccessToken entity. +type PersonalAccessTokenCreate struct { + config + mutation *PersonalAccessTokenMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (patc *PersonalAccessTokenCreate) SetCreateTime(t time.Time) *PersonalAccessTokenCreate { + patc.mutation.SetCreateTime(t) + return patc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (patc *PersonalAccessTokenCreate) SetNillableCreateTime(t *time.Time) *PersonalAccessTokenCreate { + if t != nil { + patc.SetCreateTime(*t) + } + return patc +} + +// SetUpdateTime sets the "update_time" field. +func (patc *PersonalAccessTokenCreate) SetUpdateTime(t time.Time) *PersonalAccessTokenCreate { + patc.mutation.SetUpdateTime(t) + return patc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (patc *PersonalAccessTokenCreate) SetNillableUpdateTime(t *time.Time) *PersonalAccessTokenCreate { + if t != nil { + patc.SetUpdateTime(*t) + } + return patc +} + +// SetName sets the "name" field. +func (patc *PersonalAccessTokenCreate) SetName(s string) *PersonalAccessTokenCreate { + patc.mutation.SetName(s) + return patc +} + +// SetDescription sets the "description" field. +func (patc *PersonalAccessTokenCreate) SetDescription(s string) *PersonalAccessTokenCreate { + patc.mutation.SetDescription(s) + return patc +} + +// SetPublisherID sets the "publisher_id" field. +func (patc *PersonalAccessTokenCreate) SetPublisherID(s string) *PersonalAccessTokenCreate { + patc.mutation.SetPublisherID(s) + return patc +} + +// SetToken sets the "token" field. +func (patc *PersonalAccessTokenCreate) SetToken(s string) *PersonalAccessTokenCreate { + patc.mutation.SetToken(s) + return patc +} + +// SetID sets the "id" field. +func (patc *PersonalAccessTokenCreate) SetID(u uuid.UUID) *PersonalAccessTokenCreate { + patc.mutation.SetID(u) + return patc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (patc *PersonalAccessTokenCreate) SetNillableID(u *uuid.UUID) *PersonalAccessTokenCreate { + if u != nil { + patc.SetID(*u) + } + return patc +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (patc *PersonalAccessTokenCreate) SetPublisher(p *Publisher) *PersonalAccessTokenCreate { + return patc.SetPublisherID(p.ID) +} + +// Mutation returns the PersonalAccessTokenMutation object of the builder. +func (patc *PersonalAccessTokenCreate) Mutation() *PersonalAccessTokenMutation { + return patc.mutation +} + +// Save creates the PersonalAccessToken in the database. +func (patc *PersonalAccessTokenCreate) Save(ctx context.Context) (*PersonalAccessToken, error) { + patc.defaults() + return withHooks(ctx, patc.sqlSave, patc.mutation, patc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (patc *PersonalAccessTokenCreate) SaveX(ctx context.Context) *PersonalAccessToken { + v, err := patc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (patc *PersonalAccessTokenCreate) Exec(ctx context.Context) error { + _, err := patc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (patc *PersonalAccessTokenCreate) ExecX(ctx context.Context) { + if err := patc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (patc *PersonalAccessTokenCreate) defaults() { + if _, ok := patc.mutation.CreateTime(); !ok { + v := personalaccesstoken.DefaultCreateTime() + patc.mutation.SetCreateTime(v) + } + if _, ok := patc.mutation.UpdateTime(); !ok { + v := personalaccesstoken.DefaultUpdateTime() + patc.mutation.SetUpdateTime(v) + } + if _, ok := patc.mutation.ID(); !ok { + v := personalaccesstoken.DefaultID() + patc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (patc *PersonalAccessTokenCreate) check() error { + if _, ok := patc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "PersonalAccessToken.create_time"`)} + } + if _, ok := patc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "PersonalAccessToken.update_time"`)} + } + if _, ok := patc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "PersonalAccessToken.name"`)} + } + if _, ok := patc.mutation.Description(); !ok { + return &ValidationError{Name: "description", err: errors.New(`ent: missing required field "PersonalAccessToken.description"`)} + } + if _, ok := patc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher_id", err: errors.New(`ent: missing required field "PersonalAccessToken.publisher_id"`)} + } + if _, ok := patc.mutation.Token(); !ok { + return &ValidationError{Name: "token", err: errors.New(`ent: missing required field "PersonalAccessToken.token"`)} + } + if _, ok := patc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher", err: errors.New(`ent: missing required edge "PersonalAccessToken.publisher"`)} + } + return nil +} + +func (patc *PersonalAccessTokenCreate) sqlSave(ctx context.Context) (*PersonalAccessToken, error) { + if err := patc.check(); err != nil { + return nil, err + } + _node, _spec := patc.createSpec() + if err := sqlgraph.CreateNode(ctx, patc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + patc.mutation.id = &_node.ID + patc.mutation.done = true + return _node, nil +} + +func (patc *PersonalAccessTokenCreate) createSpec() (*PersonalAccessToken, *sqlgraph.CreateSpec) { + var ( + _node = &PersonalAccessToken{config: patc.config} + _spec = sqlgraph.NewCreateSpec(personalaccesstoken.Table, sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = patc.conflict + if id, ok := patc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := patc.mutation.CreateTime(); ok { + _spec.SetField(personalaccesstoken.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := patc.mutation.UpdateTime(); ok { + _spec.SetField(personalaccesstoken.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := patc.mutation.Name(); ok { + _spec.SetField(personalaccesstoken.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := patc.mutation.Description(); ok { + _spec.SetField(personalaccesstoken.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := patc.mutation.Token(); ok { + _spec.SetField(personalaccesstoken.FieldToken, field.TypeString, value) + _node.Token = value + } + if nodes := patc.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: personalaccesstoken.PublisherTable, + Columns: []string{personalaccesstoken.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.PublisherID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PersonalAccessToken.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PersonalAccessTokenUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (patc *PersonalAccessTokenCreate) OnConflict(opts ...sql.ConflictOption) *PersonalAccessTokenUpsertOne { + patc.conflict = opts + return &PersonalAccessTokenUpsertOne{ + create: patc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (patc *PersonalAccessTokenCreate) OnConflictColumns(columns ...string) *PersonalAccessTokenUpsertOne { + patc.conflict = append(patc.conflict, sql.ConflictColumns(columns...)) + return &PersonalAccessTokenUpsertOne{ + create: patc, + } +} + +type ( + // PersonalAccessTokenUpsertOne is the builder for "upsert"-ing + // one PersonalAccessToken node. + PersonalAccessTokenUpsertOne struct { + create *PersonalAccessTokenCreate + } + + // PersonalAccessTokenUpsert is the "OnConflict" setter. + PersonalAccessTokenUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *PersonalAccessTokenUpsert) SetUpdateTime(v time.Time) *PersonalAccessTokenUpsert { + u.Set(personalaccesstoken.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsert) UpdateUpdateTime() *PersonalAccessTokenUpsert { + u.SetExcluded(personalaccesstoken.FieldUpdateTime) + return u +} + +// SetName sets the "name" field. +func (u *PersonalAccessTokenUpsert) SetName(v string) *PersonalAccessTokenUpsert { + u.Set(personalaccesstoken.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsert) UpdateName() *PersonalAccessTokenUpsert { + u.SetExcluded(personalaccesstoken.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *PersonalAccessTokenUpsert) SetDescription(v string) *PersonalAccessTokenUpsert { + u.Set(personalaccesstoken.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsert) UpdateDescription() *PersonalAccessTokenUpsert { + u.SetExcluded(personalaccesstoken.FieldDescription) + return u +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PersonalAccessTokenUpsert) SetPublisherID(v string) *PersonalAccessTokenUpsert { + u.Set(personalaccesstoken.FieldPublisherID, v) + return u +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsert) UpdatePublisherID() *PersonalAccessTokenUpsert { + u.SetExcluded(personalaccesstoken.FieldPublisherID) + return u +} + +// SetToken sets the "token" field. +func (u *PersonalAccessTokenUpsert) SetToken(v string) *PersonalAccessTokenUpsert { + u.Set(personalaccesstoken.FieldToken, v) + return u +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsert) UpdateToken() *PersonalAccessTokenUpsert { + u.SetExcluded(personalaccesstoken.FieldToken) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(personalaccesstoken.FieldID) +// }), +// ). +// Exec(ctx) +func (u *PersonalAccessTokenUpsertOne) UpdateNewValues() *PersonalAccessTokenUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(personalaccesstoken.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(personalaccesstoken.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PersonalAccessTokenUpsertOne) Ignore() *PersonalAccessTokenUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PersonalAccessTokenUpsertOne) DoNothing() *PersonalAccessTokenUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PersonalAccessTokenCreate.OnConflict +// documentation for more info. +func (u *PersonalAccessTokenUpsertOne) Update(set func(*PersonalAccessTokenUpsert)) *PersonalAccessTokenUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PersonalAccessTokenUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *PersonalAccessTokenUpsertOne) SetUpdateTime(v time.Time) *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertOne) UpdateUpdateTime() *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetName sets the "name" field. +func (u *PersonalAccessTokenUpsertOne) SetName(v string) *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertOne) UpdateName() *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *PersonalAccessTokenUpsertOne) SetDescription(v string) *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertOne) UpdateDescription() *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateDescription() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PersonalAccessTokenUpsertOne) SetPublisherID(v string) *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertOne) UpdatePublisherID() *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdatePublisherID() + }) +} + +// SetToken sets the "token" field. +func (u *PersonalAccessTokenUpsertOne) SetToken(v string) *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetToken(v) + }) +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertOne) UpdateToken() *PersonalAccessTokenUpsertOne { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateToken() + }) +} + +// Exec executes the query. +func (u *PersonalAccessTokenUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PersonalAccessTokenCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PersonalAccessTokenUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PersonalAccessTokenUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: PersonalAccessTokenUpsertOne.ID is not supported by MySQL driver. Use PersonalAccessTokenUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PersonalAccessTokenUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PersonalAccessTokenCreateBulk is the builder for creating many PersonalAccessToken entities in bulk. +type PersonalAccessTokenCreateBulk struct { + config + err error + builders []*PersonalAccessTokenCreate + conflict []sql.ConflictOption +} + +// Save creates the PersonalAccessToken entities in the database. +func (patcb *PersonalAccessTokenCreateBulk) Save(ctx context.Context) ([]*PersonalAccessToken, error) { + if patcb.err != nil { + return nil, patcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(patcb.builders)) + nodes := make([]*PersonalAccessToken, len(patcb.builders)) + mutators := make([]Mutator, len(patcb.builders)) + for i := range patcb.builders { + func(i int, root context.Context) { + builder := patcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PersonalAccessTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, patcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = patcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, patcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, patcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (patcb *PersonalAccessTokenCreateBulk) SaveX(ctx context.Context) []*PersonalAccessToken { + v, err := patcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (patcb *PersonalAccessTokenCreateBulk) Exec(ctx context.Context) error { + _, err := patcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (patcb *PersonalAccessTokenCreateBulk) ExecX(ctx context.Context) { + if err := patcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PersonalAccessToken.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PersonalAccessTokenUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (patcb *PersonalAccessTokenCreateBulk) OnConflict(opts ...sql.ConflictOption) *PersonalAccessTokenUpsertBulk { + patcb.conflict = opts + return &PersonalAccessTokenUpsertBulk{ + create: patcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (patcb *PersonalAccessTokenCreateBulk) OnConflictColumns(columns ...string) *PersonalAccessTokenUpsertBulk { + patcb.conflict = append(patcb.conflict, sql.ConflictColumns(columns...)) + return &PersonalAccessTokenUpsertBulk{ + create: patcb, + } +} + +// PersonalAccessTokenUpsertBulk is the builder for "upsert"-ing +// a bulk of PersonalAccessToken nodes. +type PersonalAccessTokenUpsertBulk struct { + create *PersonalAccessTokenCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(personalaccesstoken.FieldID) +// }), +// ). +// Exec(ctx) +func (u *PersonalAccessTokenUpsertBulk) UpdateNewValues() *PersonalAccessTokenUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(personalaccesstoken.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(personalaccesstoken.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PersonalAccessToken.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PersonalAccessTokenUpsertBulk) Ignore() *PersonalAccessTokenUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PersonalAccessTokenUpsertBulk) DoNothing() *PersonalAccessTokenUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PersonalAccessTokenCreateBulk.OnConflict +// documentation for more info. +func (u *PersonalAccessTokenUpsertBulk) Update(set func(*PersonalAccessTokenUpsert)) *PersonalAccessTokenUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PersonalAccessTokenUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *PersonalAccessTokenUpsertBulk) SetUpdateTime(v time.Time) *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertBulk) UpdateUpdateTime() *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetName sets the "name" field. +func (u *PersonalAccessTokenUpsertBulk) SetName(v string) *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertBulk) UpdateName() *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *PersonalAccessTokenUpsertBulk) SetDescription(v string) *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertBulk) UpdateDescription() *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateDescription() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PersonalAccessTokenUpsertBulk) SetPublisherID(v string) *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertBulk) UpdatePublisherID() *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdatePublisherID() + }) +} + +// SetToken sets the "token" field. +func (u *PersonalAccessTokenUpsertBulk) SetToken(v string) *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.SetToken(v) + }) +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *PersonalAccessTokenUpsertBulk) UpdateToken() *PersonalAccessTokenUpsertBulk { + return u.Update(func(s *PersonalAccessTokenUpsert) { + s.UpdateToken() + }) +} + +// Exec executes the query. +func (u *PersonalAccessTokenUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PersonalAccessTokenCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PersonalAccessTokenCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PersonalAccessTokenUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/personalaccesstoken_delete.go b/ent/personalaccesstoken_delete.go new file mode 100644 index 0000000..2c553a9 --- /dev/null +++ b/ent/personalaccesstoken_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PersonalAccessTokenDelete is the builder for deleting a PersonalAccessToken entity. +type PersonalAccessTokenDelete struct { + config + hooks []Hook + mutation *PersonalAccessTokenMutation +} + +// Where appends a list predicates to the PersonalAccessTokenDelete builder. +func (patd *PersonalAccessTokenDelete) Where(ps ...predicate.PersonalAccessToken) *PersonalAccessTokenDelete { + patd.mutation.Where(ps...) + return patd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (patd *PersonalAccessTokenDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, patd.sqlExec, patd.mutation, patd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (patd *PersonalAccessTokenDelete) ExecX(ctx context.Context) int { + n, err := patd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (patd *PersonalAccessTokenDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(personalaccesstoken.Table, sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID)) + if ps := patd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, patd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + patd.mutation.done = true + return affected, err +} + +// PersonalAccessTokenDeleteOne is the builder for deleting a single PersonalAccessToken entity. +type PersonalAccessTokenDeleteOne struct { + patd *PersonalAccessTokenDelete +} + +// Where appends a list predicates to the PersonalAccessTokenDelete builder. +func (patdo *PersonalAccessTokenDeleteOne) Where(ps ...predicate.PersonalAccessToken) *PersonalAccessTokenDeleteOne { + patdo.patd.mutation.Where(ps...) + return patdo +} + +// Exec executes the deletion query. +func (patdo *PersonalAccessTokenDeleteOne) Exec(ctx context.Context) error { + n, err := patdo.patd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{personalaccesstoken.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (patdo *PersonalAccessTokenDeleteOne) ExecX(ctx context.Context) { + if err := patdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/personalaccesstoken_query.go b/ent/personalaccesstoken_query.go new file mode 100644 index 0000000..1bdce41 --- /dev/null +++ b/ent/personalaccesstoken_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// PersonalAccessTokenQuery is the builder for querying PersonalAccessToken entities. +type PersonalAccessTokenQuery struct { + config + ctx *QueryContext + order []personalaccesstoken.OrderOption + inters []Interceptor + predicates []predicate.PersonalAccessToken + withPublisher *PublisherQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PersonalAccessTokenQuery builder. +func (patq *PersonalAccessTokenQuery) Where(ps ...predicate.PersonalAccessToken) *PersonalAccessTokenQuery { + patq.predicates = append(patq.predicates, ps...) + return patq +} + +// Limit the number of records to be returned by this query. +func (patq *PersonalAccessTokenQuery) Limit(limit int) *PersonalAccessTokenQuery { + patq.ctx.Limit = &limit + return patq +} + +// Offset to start from. +func (patq *PersonalAccessTokenQuery) Offset(offset int) *PersonalAccessTokenQuery { + patq.ctx.Offset = &offset + return patq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (patq *PersonalAccessTokenQuery) Unique(unique bool) *PersonalAccessTokenQuery { + patq.ctx.Unique = &unique + return patq +} + +// Order specifies how the records should be ordered. +func (patq *PersonalAccessTokenQuery) Order(o ...personalaccesstoken.OrderOption) *PersonalAccessTokenQuery { + patq.order = append(patq.order, o...) + return patq +} + +// QueryPublisher chains the current query on the "publisher" edge. +func (patq *PersonalAccessTokenQuery) QueryPublisher() *PublisherQuery { + query := (&PublisherClient{config: patq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := patq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := patq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(personalaccesstoken.Table, personalaccesstoken.FieldID, selector), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, personalaccesstoken.PublisherTable, personalaccesstoken.PublisherColumn), + ) + fromU = sqlgraph.SetNeighbors(patq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PersonalAccessToken entity from the query. +// Returns a *NotFoundError when no PersonalAccessToken was found. +func (patq *PersonalAccessTokenQuery) First(ctx context.Context) (*PersonalAccessToken, error) { + nodes, err := patq.Limit(1).All(setContextOp(ctx, patq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{personalaccesstoken.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) FirstX(ctx context.Context) *PersonalAccessToken { + node, err := patq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PersonalAccessToken ID from the query. +// Returns a *NotFoundError when no PersonalAccessToken ID was found. +func (patq *PersonalAccessTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = patq.Limit(1).IDs(setContextOp(ctx, patq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{personalaccesstoken.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := patq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PersonalAccessToken entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PersonalAccessToken entity is found. +// Returns a *NotFoundError when no PersonalAccessToken entities are found. +func (patq *PersonalAccessTokenQuery) Only(ctx context.Context) (*PersonalAccessToken, error) { + nodes, err := patq.Limit(2).All(setContextOp(ctx, patq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{personalaccesstoken.Label} + default: + return nil, &NotSingularError{personalaccesstoken.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) OnlyX(ctx context.Context) *PersonalAccessToken { + node, err := patq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PersonalAccessToken ID in the query. +// Returns a *NotSingularError when more than one PersonalAccessToken ID is found. +// Returns a *NotFoundError when no entities are found. +func (patq *PersonalAccessTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = patq.Limit(2).IDs(setContextOp(ctx, patq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{personalaccesstoken.Label} + default: + err = &NotSingularError{personalaccesstoken.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := patq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PersonalAccessTokens. +func (patq *PersonalAccessTokenQuery) All(ctx context.Context) ([]*PersonalAccessToken, error) { + ctx = setContextOp(ctx, patq.ctx, "All") + if err := patq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PersonalAccessToken, *PersonalAccessTokenQuery]() + return withInterceptors[[]*PersonalAccessToken](ctx, patq, qr, patq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) AllX(ctx context.Context) []*PersonalAccessToken { + nodes, err := patq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PersonalAccessToken IDs. +func (patq *PersonalAccessTokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if patq.ctx.Unique == nil && patq.path != nil { + patq.Unique(true) + } + ctx = setContextOp(ctx, patq.ctx, "IDs") + if err = patq.Select(personalaccesstoken.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := patq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (patq *PersonalAccessTokenQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, patq.ctx, "Count") + if err := patq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, patq, querierCount[*PersonalAccessTokenQuery](), patq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) CountX(ctx context.Context) int { + count, err := patq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (patq *PersonalAccessTokenQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, patq.ctx, "Exist") + switch _, err := patq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (patq *PersonalAccessTokenQuery) ExistX(ctx context.Context) bool { + exist, err := patq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PersonalAccessTokenQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (patq *PersonalAccessTokenQuery) Clone() *PersonalAccessTokenQuery { + if patq == nil { + return nil + } + return &PersonalAccessTokenQuery{ + config: patq.config, + ctx: patq.ctx.Clone(), + order: append([]personalaccesstoken.OrderOption{}, patq.order...), + inters: append([]Interceptor{}, patq.inters...), + predicates: append([]predicate.PersonalAccessToken{}, patq.predicates...), + withPublisher: patq.withPublisher.Clone(), + // clone intermediate query. + sql: patq.sql.Clone(), + path: patq.path, + } +} + +// WithPublisher tells the query-builder to eager-load the nodes that are connected to +// the "publisher" edge. The optional arguments are used to configure the query builder of the edge. +func (patq *PersonalAccessTokenQuery) WithPublisher(opts ...func(*PublisherQuery)) *PersonalAccessTokenQuery { + query := (&PublisherClient{config: patq.config}).Query() + for _, opt := range opts { + opt(query) + } + patq.withPublisher = query + return patq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PersonalAccessToken.Query(). +// GroupBy(personalaccesstoken.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (patq *PersonalAccessTokenQuery) GroupBy(field string, fields ...string) *PersonalAccessTokenGroupBy { + patq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PersonalAccessTokenGroupBy{build: patq} + grbuild.flds = &patq.ctx.Fields + grbuild.label = personalaccesstoken.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.PersonalAccessToken.Query(). +// Select(personalaccesstoken.FieldCreateTime). +// Scan(ctx, &v) +func (patq *PersonalAccessTokenQuery) Select(fields ...string) *PersonalAccessTokenSelect { + patq.ctx.Fields = append(patq.ctx.Fields, fields...) + sbuild := &PersonalAccessTokenSelect{PersonalAccessTokenQuery: patq} + sbuild.label = personalaccesstoken.Label + sbuild.flds, sbuild.scan = &patq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PersonalAccessTokenSelect configured with the given aggregations. +func (patq *PersonalAccessTokenQuery) Aggregate(fns ...AggregateFunc) *PersonalAccessTokenSelect { + return patq.Select().Aggregate(fns...) +} + +func (patq *PersonalAccessTokenQuery) prepareQuery(ctx context.Context) error { + for _, inter := range patq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, patq); err != nil { + return err + } + } + } + for _, f := range patq.ctx.Fields { + if !personalaccesstoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if patq.path != nil { + prev, err := patq.path(ctx) + if err != nil { + return err + } + patq.sql = prev + } + return nil +} + +func (patq *PersonalAccessTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PersonalAccessToken, error) { + var ( + nodes = []*PersonalAccessToken{} + _spec = patq.querySpec() + loadedTypes = [1]bool{ + patq.withPublisher != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PersonalAccessToken).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PersonalAccessToken{config: patq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(patq.modifiers) > 0 { + _spec.Modifiers = patq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, patq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := patq.withPublisher; query != nil { + if err := patq.loadPublisher(ctx, query, nodes, nil, + func(n *PersonalAccessToken, e *Publisher) { n.Edges.Publisher = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (patq *PersonalAccessTokenQuery) loadPublisher(ctx context.Context, query *PublisherQuery, nodes []*PersonalAccessToken, init func(*PersonalAccessToken), assign func(*PersonalAccessToken, *Publisher)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*PersonalAccessToken) + for i := range nodes { + fk := nodes[i].PublisherID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(publisher.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "publisher_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (patq *PersonalAccessTokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := patq.querySpec() + if len(patq.modifiers) > 0 { + _spec.Modifiers = patq.modifiers + } + _spec.Node.Columns = patq.ctx.Fields + if len(patq.ctx.Fields) > 0 { + _spec.Unique = patq.ctx.Unique != nil && *patq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, patq.driver, _spec) +} + +func (patq *PersonalAccessTokenQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(personalaccesstoken.Table, personalaccesstoken.Columns, sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID)) + _spec.From = patq.sql + if unique := patq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if patq.path != nil { + _spec.Unique = true + } + if fields := patq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, personalaccesstoken.FieldID) + for i := range fields { + if fields[i] != personalaccesstoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if patq.withPublisher != nil { + _spec.Node.AddColumnOnce(personalaccesstoken.FieldPublisherID) + } + } + if ps := patq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := patq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := patq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := patq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (patq *PersonalAccessTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(patq.driver.Dialect()) + t1 := builder.Table(personalaccesstoken.Table) + columns := patq.ctx.Fields + if len(columns) == 0 { + columns = personalaccesstoken.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if patq.sql != nil { + selector = patq.sql + selector.Select(selector.Columns(columns...)...) + } + if patq.ctx.Unique != nil && *patq.ctx.Unique { + selector.Distinct() + } + for _, m := range patq.modifiers { + m(selector) + } + for _, p := range patq.predicates { + p(selector) + } + for _, p := range patq.order { + p(selector) + } + if offset := patq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := patq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (patq *PersonalAccessTokenQuery) ForUpdate(opts ...sql.LockOption) *PersonalAccessTokenQuery { + if patq.driver.Dialect() == dialect.Postgres { + patq.Unique(false) + } + patq.modifiers = append(patq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return patq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (patq *PersonalAccessTokenQuery) ForShare(opts ...sql.LockOption) *PersonalAccessTokenQuery { + if patq.driver.Dialect() == dialect.Postgres { + patq.Unique(false) + } + patq.modifiers = append(patq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return patq +} + +// PersonalAccessTokenGroupBy is the group-by builder for PersonalAccessToken entities. +type PersonalAccessTokenGroupBy struct { + selector + build *PersonalAccessTokenQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (patgb *PersonalAccessTokenGroupBy) Aggregate(fns ...AggregateFunc) *PersonalAccessTokenGroupBy { + patgb.fns = append(patgb.fns, fns...) + return patgb +} + +// Scan applies the selector query and scans the result into the given value. +func (patgb *PersonalAccessTokenGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, patgb.build.ctx, "GroupBy") + if err := patgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PersonalAccessTokenQuery, *PersonalAccessTokenGroupBy](ctx, patgb.build, patgb, patgb.build.inters, v) +} + +func (patgb *PersonalAccessTokenGroupBy) sqlScan(ctx context.Context, root *PersonalAccessTokenQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(patgb.fns)) + for _, fn := range patgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*patgb.flds)+len(patgb.fns)) + for _, f := range *patgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*patgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := patgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PersonalAccessTokenSelect is the builder for selecting fields of PersonalAccessToken entities. +type PersonalAccessTokenSelect struct { + *PersonalAccessTokenQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pats *PersonalAccessTokenSelect) Aggregate(fns ...AggregateFunc) *PersonalAccessTokenSelect { + pats.fns = append(pats.fns, fns...) + return pats +} + +// Scan applies the selector query and scans the result into the given value. +func (pats *PersonalAccessTokenSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pats.ctx, "Select") + if err := pats.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PersonalAccessTokenQuery, *PersonalAccessTokenSelect](ctx, pats.PersonalAccessTokenQuery, pats, pats.inters, v) +} + +func (pats *PersonalAccessTokenSelect) sqlScan(ctx context.Context, root *PersonalAccessTokenQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pats.fns)) + for _, fn := range pats.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*pats.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pats.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/personalaccesstoken_update.go b/ent/personalaccesstoken_update.go new file mode 100644 index 0000000..1ab2945 --- /dev/null +++ b/ent/personalaccesstoken_update.go @@ -0,0 +1,445 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PersonalAccessTokenUpdate is the builder for updating PersonalAccessToken entities. +type PersonalAccessTokenUpdate struct { + config + hooks []Hook + mutation *PersonalAccessTokenMutation +} + +// Where appends a list predicates to the PersonalAccessTokenUpdate builder. +func (patu *PersonalAccessTokenUpdate) Where(ps ...predicate.PersonalAccessToken) *PersonalAccessTokenUpdate { + patu.mutation.Where(ps...) + return patu +} + +// SetUpdateTime sets the "update_time" field. +func (patu *PersonalAccessTokenUpdate) SetUpdateTime(t time.Time) *PersonalAccessTokenUpdate { + patu.mutation.SetUpdateTime(t) + return patu +} + +// SetName sets the "name" field. +func (patu *PersonalAccessTokenUpdate) SetName(s string) *PersonalAccessTokenUpdate { + patu.mutation.SetName(s) + return patu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (patu *PersonalAccessTokenUpdate) SetNillableName(s *string) *PersonalAccessTokenUpdate { + if s != nil { + patu.SetName(*s) + } + return patu +} + +// SetDescription sets the "description" field. +func (patu *PersonalAccessTokenUpdate) SetDescription(s string) *PersonalAccessTokenUpdate { + patu.mutation.SetDescription(s) + return patu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (patu *PersonalAccessTokenUpdate) SetNillableDescription(s *string) *PersonalAccessTokenUpdate { + if s != nil { + patu.SetDescription(*s) + } + return patu +} + +// SetPublisherID sets the "publisher_id" field. +func (patu *PersonalAccessTokenUpdate) SetPublisherID(s string) *PersonalAccessTokenUpdate { + patu.mutation.SetPublisherID(s) + return patu +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (patu *PersonalAccessTokenUpdate) SetNillablePublisherID(s *string) *PersonalAccessTokenUpdate { + if s != nil { + patu.SetPublisherID(*s) + } + return patu +} + +// SetToken sets the "token" field. +func (patu *PersonalAccessTokenUpdate) SetToken(s string) *PersonalAccessTokenUpdate { + patu.mutation.SetToken(s) + return patu +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (patu *PersonalAccessTokenUpdate) SetNillableToken(s *string) *PersonalAccessTokenUpdate { + if s != nil { + patu.SetToken(*s) + } + return patu +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (patu *PersonalAccessTokenUpdate) SetPublisher(p *Publisher) *PersonalAccessTokenUpdate { + return patu.SetPublisherID(p.ID) +} + +// Mutation returns the PersonalAccessTokenMutation object of the builder. +func (patu *PersonalAccessTokenUpdate) Mutation() *PersonalAccessTokenMutation { + return patu.mutation +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (patu *PersonalAccessTokenUpdate) ClearPublisher() *PersonalAccessTokenUpdate { + patu.mutation.ClearPublisher() + return patu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (patu *PersonalAccessTokenUpdate) Save(ctx context.Context) (int, error) { + patu.defaults() + return withHooks(ctx, patu.sqlSave, patu.mutation, patu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (patu *PersonalAccessTokenUpdate) SaveX(ctx context.Context) int { + affected, err := patu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (patu *PersonalAccessTokenUpdate) Exec(ctx context.Context) error { + _, err := patu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (patu *PersonalAccessTokenUpdate) ExecX(ctx context.Context) { + if err := patu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (patu *PersonalAccessTokenUpdate) defaults() { + if _, ok := patu.mutation.UpdateTime(); !ok { + v := personalaccesstoken.UpdateDefaultUpdateTime() + patu.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (patu *PersonalAccessTokenUpdate) check() error { + if _, ok := patu.mutation.PublisherID(); patu.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PersonalAccessToken.publisher"`) + } + return nil +} + +func (patu *PersonalAccessTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := patu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(personalaccesstoken.Table, personalaccesstoken.Columns, sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID)) + if ps := patu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := patu.mutation.UpdateTime(); ok { + _spec.SetField(personalaccesstoken.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := patu.mutation.Name(); ok { + _spec.SetField(personalaccesstoken.FieldName, field.TypeString, value) + } + if value, ok := patu.mutation.Description(); ok { + _spec.SetField(personalaccesstoken.FieldDescription, field.TypeString, value) + } + if value, ok := patu.mutation.Token(); ok { + _spec.SetField(personalaccesstoken.FieldToken, field.TypeString, value) + } + if patu.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: personalaccesstoken.PublisherTable, + Columns: []string{personalaccesstoken.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := patu.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: personalaccesstoken.PublisherTable, + Columns: []string{personalaccesstoken.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, patu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{personalaccesstoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + patu.mutation.done = true + return n, nil +} + +// PersonalAccessTokenUpdateOne is the builder for updating a single PersonalAccessToken entity. +type PersonalAccessTokenUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PersonalAccessTokenMutation +} + +// SetUpdateTime sets the "update_time" field. +func (patuo *PersonalAccessTokenUpdateOne) SetUpdateTime(t time.Time) *PersonalAccessTokenUpdateOne { + patuo.mutation.SetUpdateTime(t) + return patuo +} + +// SetName sets the "name" field. +func (patuo *PersonalAccessTokenUpdateOne) SetName(s string) *PersonalAccessTokenUpdateOne { + patuo.mutation.SetName(s) + return patuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (patuo *PersonalAccessTokenUpdateOne) SetNillableName(s *string) *PersonalAccessTokenUpdateOne { + if s != nil { + patuo.SetName(*s) + } + return patuo +} + +// SetDescription sets the "description" field. +func (patuo *PersonalAccessTokenUpdateOne) SetDescription(s string) *PersonalAccessTokenUpdateOne { + patuo.mutation.SetDescription(s) + return patuo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (patuo *PersonalAccessTokenUpdateOne) SetNillableDescription(s *string) *PersonalAccessTokenUpdateOne { + if s != nil { + patuo.SetDescription(*s) + } + return patuo +} + +// SetPublisherID sets the "publisher_id" field. +func (patuo *PersonalAccessTokenUpdateOne) SetPublisherID(s string) *PersonalAccessTokenUpdateOne { + patuo.mutation.SetPublisherID(s) + return patuo +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (patuo *PersonalAccessTokenUpdateOne) SetNillablePublisherID(s *string) *PersonalAccessTokenUpdateOne { + if s != nil { + patuo.SetPublisherID(*s) + } + return patuo +} + +// SetToken sets the "token" field. +func (patuo *PersonalAccessTokenUpdateOne) SetToken(s string) *PersonalAccessTokenUpdateOne { + patuo.mutation.SetToken(s) + return patuo +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (patuo *PersonalAccessTokenUpdateOne) SetNillableToken(s *string) *PersonalAccessTokenUpdateOne { + if s != nil { + patuo.SetToken(*s) + } + return patuo +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (patuo *PersonalAccessTokenUpdateOne) SetPublisher(p *Publisher) *PersonalAccessTokenUpdateOne { + return patuo.SetPublisherID(p.ID) +} + +// Mutation returns the PersonalAccessTokenMutation object of the builder. +func (patuo *PersonalAccessTokenUpdateOne) Mutation() *PersonalAccessTokenMutation { + return patuo.mutation +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (patuo *PersonalAccessTokenUpdateOne) ClearPublisher() *PersonalAccessTokenUpdateOne { + patuo.mutation.ClearPublisher() + return patuo +} + +// Where appends a list predicates to the PersonalAccessTokenUpdate builder. +func (patuo *PersonalAccessTokenUpdateOne) Where(ps ...predicate.PersonalAccessToken) *PersonalAccessTokenUpdateOne { + patuo.mutation.Where(ps...) + return patuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (patuo *PersonalAccessTokenUpdateOne) Select(field string, fields ...string) *PersonalAccessTokenUpdateOne { + patuo.fields = append([]string{field}, fields...) + return patuo +} + +// Save executes the query and returns the updated PersonalAccessToken entity. +func (patuo *PersonalAccessTokenUpdateOne) Save(ctx context.Context) (*PersonalAccessToken, error) { + patuo.defaults() + return withHooks(ctx, patuo.sqlSave, patuo.mutation, patuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (patuo *PersonalAccessTokenUpdateOne) SaveX(ctx context.Context) *PersonalAccessToken { + node, err := patuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (patuo *PersonalAccessTokenUpdateOne) Exec(ctx context.Context) error { + _, err := patuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (patuo *PersonalAccessTokenUpdateOne) ExecX(ctx context.Context) { + if err := patuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (patuo *PersonalAccessTokenUpdateOne) defaults() { + if _, ok := patuo.mutation.UpdateTime(); !ok { + v := personalaccesstoken.UpdateDefaultUpdateTime() + patuo.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (patuo *PersonalAccessTokenUpdateOne) check() error { + if _, ok := patuo.mutation.PublisherID(); patuo.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PersonalAccessToken.publisher"`) + } + return nil +} + +func (patuo *PersonalAccessTokenUpdateOne) sqlSave(ctx context.Context) (_node *PersonalAccessToken, err error) { + if err := patuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(personalaccesstoken.Table, personalaccesstoken.Columns, sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID)) + id, ok := patuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PersonalAccessToken.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := patuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, personalaccesstoken.FieldID) + for _, f := range fields { + if !personalaccesstoken.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != personalaccesstoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := patuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := patuo.mutation.UpdateTime(); ok { + _spec.SetField(personalaccesstoken.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := patuo.mutation.Name(); ok { + _spec.SetField(personalaccesstoken.FieldName, field.TypeString, value) + } + if value, ok := patuo.mutation.Description(); ok { + _spec.SetField(personalaccesstoken.FieldDescription, field.TypeString, value) + } + if value, ok := patuo.mutation.Token(); ok { + _spec.SetField(personalaccesstoken.FieldToken, field.TypeString, value) + } + if patuo.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: personalaccesstoken.PublisherTable, + Columns: []string{personalaccesstoken.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := patuo.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: personalaccesstoken.PublisherTable, + Columns: []string{personalaccesstoken.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PersonalAccessToken{config: patuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, patuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{personalaccesstoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + patuo.mutation.done = true + return _node, nil +} diff --git a/ent/predicate/predicate.go b/ent/predicate/predicate.go new file mode 100644 index 0000000..c29ba05 --- /dev/null +++ b/ent/predicate/predicate.go @@ -0,0 +1,34 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// CIWorkflowResult is the predicate function for ciworkflowresult builders. +type CIWorkflowResult func(*sql.Selector) + +// GitCommit is the predicate function for gitcommit builders. +type GitCommit func(*sql.Selector) + +// Node is the predicate function for node builders. +type Node func(*sql.Selector) + +// NodeVersion is the predicate function for nodeversion builders. +type NodeVersion func(*sql.Selector) + +// PersonalAccessToken is the predicate function for personalaccesstoken builders. +type PersonalAccessToken func(*sql.Selector) + +// Publisher is the predicate function for publisher builders. +type Publisher func(*sql.Selector) + +// PublisherPermission is the predicate function for publisherpermission builders. +type PublisherPermission func(*sql.Selector) + +// StorageFile is the predicate function for storagefile builders. +type StorageFile func(*sql.Selector) + +// User is the predicate function for user builders. +type User func(*sql.Selector) diff --git a/ent/publisher.go b/ent/publisher.go new file mode 100644 index 0000000..9c35eb8 --- /dev/null +++ b/ent/publisher.go @@ -0,0 +1,240 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/publisher" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Publisher is the model entity for the Publisher schema. +type Publisher struct { + config `json:"-"` + // ID of the ent. + // The unique identifier of the publisher. Cannot be changed. + ID string `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // The publicly visible name of the publisher. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Website holds the value of the "website" field. + Website string `json:"website,omitempty"` + // SupportEmail holds the value of the "support_email" field. + SupportEmail string `json:"support_email,omitempty"` + // SourceCodeRepo holds the value of the "source_code_repo" field. + SourceCodeRepo string `json:"source_code_repo,omitempty"` + // LogoURL holds the value of the "logo_url" field. + LogoURL string `json:"logo_url,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PublisherQuery when eager-loading is set. + Edges PublisherEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PublisherEdges holds the relations/edges for other nodes in the graph. +type PublisherEdges struct { + // PublisherPermissions holds the value of the publisher_permissions edge. + PublisherPermissions []*PublisherPermission `json:"publisher_permissions,omitempty"` + // Nodes holds the value of the nodes edge. + Nodes []*Node `json:"nodes,omitempty"` + // PersonalAccessTokens holds the value of the personal_access_tokens edge. + PersonalAccessTokens []*PersonalAccessToken `json:"personal_access_tokens,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// PublisherPermissionsOrErr returns the PublisherPermissions value or an error if the edge +// was not loaded in eager-loading. +func (e PublisherEdges) PublisherPermissionsOrErr() ([]*PublisherPermission, error) { + if e.loadedTypes[0] { + return e.PublisherPermissions, nil + } + return nil, &NotLoadedError{edge: "publisher_permissions"} +} + +// NodesOrErr returns the Nodes value or an error if the edge +// was not loaded in eager-loading. +func (e PublisherEdges) NodesOrErr() ([]*Node, error) { + if e.loadedTypes[1] { + return e.Nodes, nil + } + return nil, &NotLoadedError{edge: "nodes"} +} + +// PersonalAccessTokensOrErr returns the PersonalAccessTokens value or an error if the edge +// was not loaded in eager-loading. +func (e PublisherEdges) PersonalAccessTokensOrErr() ([]*PersonalAccessToken, error) { + if e.loadedTypes[2] { + return e.PersonalAccessTokens, nil + } + return nil, &NotLoadedError{edge: "personal_access_tokens"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Publisher) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case publisher.FieldID, publisher.FieldName, publisher.FieldDescription, publisher.FieldWebsite, publisher.FieldSupportEmail, publisher.FieldSourceCodeRepo, publisher.FieldLogoURL: + values[i] = new(sql.NullString) + case publisher.FieldCreateTime, publisher.FieldUpdateTime: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Publisher fields. +func (pu *Publisher) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case publisher.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + pu.ID = value.String + } + case publisher.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + pu.CreateTime = value.Time + } + case publisher.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + pu.UpdateTime = value.Time + } + case publisher.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + pu.Name = value.String + } + case publisher.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + pu.Description = value.String + } + case publisher.FieldWebsite: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field website", values[i]) + } else if value.Valid { + pu.Website = value.String + } + case publisher.FieldSupportEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field support_email", values[i]) + } else if value.Valid { + pu.SupportEmail = value.String + } + case publisher.FieldSourceCodeRepo: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field source_code_repo", values[i]) + } else if value.Valid { + pu.SourceCodeRepo = value.String + } + case publisher.FieldLogoURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field logo_url", values[i]) + } else if value.Valid { + pu.LogoURL = value.String + } + default: + pu.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Publisher. +// This includes values selected through modifiers, order, etc. +func (pu *Publisher) Value(name string) (ent.Value, error) { + return pu.selectValues.Get(name) +} + +// QueryPublisherPermissions queries the "publisher_permissions" edge of the Publisher entity. +func (pu *Publisher) QueryPublisherPermissions() *PublisherPermissionQuery { + return NewPublisherClient(pu.config).QueryPublisherPermissions(pu) +} + +// QueryNodes queries the "nodes" edge of the Publisher entity. +func (pu *Publisher) QueryNodes() *NodeQuery { + return NewPublisherClient(pu.config).QueryNodes(pu) +} + +// QueryPersonalAccessTokens queries the "personal_access_tokens" edge of the Publisher entity. +func (pu *Publisher) QueryPersonalAccessTokens() *PersonalAccessTokenQuery { + return NewPublisherClient(pu.config).QueryPersonalAccessTokens(pu) +} + +// Update returns a builder for updating this Publisher. +// Note that you need to call Publisher.Unwrap() before calling this method if this Publisher +// was returned from a transaction, and the transaction was committed or rolled back. +func (pu *Publisher) Update() *PublisherUpdateOne { + return NewPublisherClient(pu.config).UpdateOne(pu) +} + +// Unwrap unwraps the Publisher entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pu *Publisher) Unwrap() *Publisher { + _tx, ok := pu.config.driver.(*txDriver) + if !ok { + panic("ent: Publisher is not a transactional entity") + } + pu.config.driver = _tx.drv + return pu +} + +// String implements the fmt.Stringer. +func (pu *Publisher) String() string { + var builder strings.Builder + builder.WriteString("Publisher(") + builder.WriteString(fmt.Sprintf("id=%v, ", pu.ID)) + builder.WriteString("create_time=") + builder.WriteString(pu.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(pu.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(pu.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(pu.Description) + builder.WriteString(", ") + builder.WriteString("website=") + builder.WriteString(pu.Website) + builder.WriteString(", ") + builder.WriteString("support_email=") + builder.WriteString(pu.SupportEmail) + builder.WriteString(", ") + builder.WriteString("source_code_repo=") + builder.WriteString(pu.SourceCodeRepo) + builder.WriteString(", ") + builder.WriteString("logo_url=") + builder.WriteString(pu.LogoURL) + builder.WriteByte(')') + return builder.String() +} + +// Publishers is a parsable slice of Publisher. +type Publishers []*Publisher diff --git a/ent/publisher/publisher.go b/ent/publisher/publisher.go new file mode 100644 index 0000000..a905a76 --- /dev/null +++ b/ent/publisher/publisher.go @@ -0,0 +1,205 @@ +// Code generated by ent, DO NOT EDIT. + +package publisher + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the publisher type in the database. + Label = "publisher" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldWebsite holds the string denoting the website field in the database. + FieldWebsite = "website" + // FieldSupportEmail holds the string denoting the support_email field in the database. + FieldSupportEmail = "support_email" + // FieldSourceCodeRepo holds the string denoting the source_code_repo field in the database. + FieldSourceCodeRepo = "source_code_repo" + // FieldLogoURL holds the string denoting the logo_url field in the database. + FieldLogoURL = "logo_url" + // EdgePublisherPermissions holds the string denoting the publisher_permissions edge name in mutations. + EdgePublisherPermissions = "publisher_permissions" + // EdgeNodes holds the string denoting the nodes edge name in mutations. + EdgeNodes = "nodes" + // EdgePersonalAccessTokens holds the string denoting the personal_access_tokens edge name in mutations. + EdgePersonalAccessTokens = "personal_access_tokens" + // Table holds the table name of the publisher in the database. + Table = "publishers" + // PublisherPermissionsTable is the table that holds the publisher_permissions relation/edge. + PublisherPermissionsTable = "publisher_permissions" + // PublisherPermissionsInverseTable is the table name for the PublisherPermission entity. + // It exists in this package in order to avoid circular dependency with the "publisherpermission" package. + PublisherPermissionsInverseTable = "publisher_permissions" + // PublisherPermissionsColumn is the table column denoting the publisher_permissions relation/edge. + PublisherPermissionsColumn = "publisher_id" + // NodesTable is the table that holds the nodes relation/edge. + NodesTable = "nodes" + // NodesInverseTable is the table name for the Node entity. + // It exists in this package in order to avoid circular dependency with the "node" package. + NodesInverseTable = "nodes" + // NodesColumn is the table column denoting the nodes relation/edge. + NodesColumn = "publisher_id" + // PersonalAccessTokensTable is the table that holds the personal_access_tokens relation/edge. + PersonalAccessTokensTable = "personal_access_tokens" + // PersonalAccessTokensInverseTable is the table name for the PersonalAccessToken entity. + // It exists in this package in order to avoid circular dependency with the "personalaccesstoken" package. + PersonalAccessTokensInverseTable = "personal_access_tokens" + // PersonalAccessTokensColumn is the table column denoting the personal_access_tokens relation/edge. + PersonalAccessTokensColumn = "publisher_id" +) + +// Columns holds all SQL columns for publisher fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldName, + FieldDescription, + FieldWebsite, + FieldSupportEmail, + FieldSourceCodeRepo, + FieldLogoURL, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time +) + +// OrderOption defines the ordering options for the Publisher queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByWebsite orders the results by the website field. +func ByWebsite(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWebsite, opts...).ToFunc() +} + +// BySupportEmail orders the results by the support_email field. +func BySupportEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSupportEmail, opts...).ToFunc() +} + +// BySourceCodeRepo orders the results by the source_code_repo field. +func BySourceCodeRepo(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceCodeRepo, opts...).ToFunc() +} + +// ByLogoURL orders the results by the logo_url field. +func ByLogoURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLogoURL, opts...).ToFunc() +} + +// ByPublisherPermissionsCount orders the results by publisher_permissions count. +func ByPublisherPermissionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPublisherPermissionsStep(), opts...) + } +} + +// ByPublisherPermissions orders the results by publisher_permissions terms. +func ByPublisherPermissions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPublisherPermissionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNodesCount orders the results by nodes count. +func ByNodesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNodesStep(), opts...) + } +} + +// ByNodes orders the results by nodes terms. +func ByNodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNodesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByPersonalAccessTokensCount orders the results by personal_access_tokens count. +func ByPersonalAccessTokensCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPersonalAccessTokensStep(), opts...) + } +} + +// ByPersonalAccessTokens orders the results by personal_access_tokens terms. +func ByPersonalAccessTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPersonalAccessTokensStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPublisherPermissionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PublisherPermissionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PublisherPermissionsTable, PublisherPermissionsColumn), + ) +} +func newNodesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NodesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, NodesTable, NodesColumn), + ) +} +func newPersonalAccessTokensStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PersonalAccessTokensInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PersonalAccessTokensTable, PersonalAccessTokensColumn), + ) +} diff --git a/ent/publisher/where.go b/ent/publisher/where.go new file mode 100644 index 0000000..bb77b21 --- /dev/null +++ b/ent/publisher/where.go @@ -0,0 +1,710 @@ +// Code generated by ent, DO NOT EDIT. + +package publisher + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldUpdateTime, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldDescription, v)) +} + +// Website applies equality check predicate on the "website" field. It's identical to WebsiteEQ. +func Website(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldWebsite, v)) +} + +// SupportEmail applies equality check predicate on the "support_email" field. It's identical to SupportEmailEQ. +func SupportEmail(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldSupportEmail, v)) +} + +// SourceCodeRepo applies equality check predicate on the "source_code_repo" field. It's identical to SourceCodeRepoEQ. +func SourceCodeRepo(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldSourceCodeRepo, v)) +} + +// LogoURL applies equality check predicate on the "logo_url" field. It's identical to LogoURLEQ. +func LogoURL(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldLogoURL, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldUpdateTime, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Publisher { + return predicate.Publisher(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Publisher { + return predicate.Publisher(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldDescription, v)) +} + +// WebsiteEQ applies the EQ predicate on the "website" field. +func WebsiteEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldWebsite, v)) +} + +// WebsiteNEQ applies the NEQ predicate on the "website" field. +func WebsiteNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldWebsite, v)) +} + +// WebsiteIn applies the In predicate on the "website" field. +func WebsiteIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldWebsite, vs...)) +} + +// WebsiteNotIn applies the NotIn predicate on the "website" field. +func WebsiteNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldWebsite, vs...)) +} + +// WebsiteGT applies the GT predicate on the "website" field. +func WebsiteGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldWebsite, v)) +} + +// WebsiteGTE applies the GTE predicate on the "website" field. +func WebsiteGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldWebsite, v)) +} + +// WebsiteLT applies the LT predicate on the "website" field. +func WebsiteLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldWebsite, v)) +} + +// WebsiteLTE applies the LTE predicate on the "website" field. +func WebsiteLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldWebsite, v)) +} + +// WebsiteContains applies the Contains predicate on the "website" field. +func WebsiteContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldWebsite, v)) +} + +// WebsiteHasPrefix applies the HasPrefix predicate on the "website" field. +func WebsiteHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldWebsite, v)) +} + +// WebsiteHasSuffix applies the HasSuffix predicate on the "website" field. +func WebsiteHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldWebsite, v)) +} + +// WebsiteIsNil applies the IsNil predicate on the "website" field. +func WebsiteIsNil() predicate.Publisher { + return predicate.Publisher(sql.FieldIsNull(FieldWebsite)) +} + +// WebsiteNotNil applies the NotNil predicate on the "website" field. +func WebsiteNotNil() predicate.Publisher { + return predicate.Publisher(sql.FieldNotNull(FieldWebsite)) +} + +// WebsiteEqualFold applies the EqualFold predicate on the "website" field. +func WebsiteEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldWebsite, v)) +} + +// WebsiteContainsFold applies the ContainsFold predicate on the "website" field. +func WebsiteContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldWebsite, v)) +} + +// SupportEmailEQ applies the EQ predicate on the "support_email" field. +func SupportEmailEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldSupportEmail, v)) +} + +// SupportEmailNEQ applies the NEQ predicate on the "support_email" field. +func SupportEmailNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldSupportEmail, v)) +} + +// SupportEmailIn applies the In predicate on the "support_email" field. +func SupportEmailIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldSupportEmail, vs...)) +} + +// SupportEmailNotIn applies the NotIn predicate on the "support_email" field. +func SupportEmailNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldSupportEmail, vs...)) +} + +// SupportEmailGT applies the GT predicate on the "support_email" field. +func SupportEmailGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldSupportEmail, v)) +} + +// SupportEmailGTE applies the GTE predicate on the "support_email" field. +func SupportEmailGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldSupportEmail, v)) +} + +// SupportEmailLT applies the LT predicate on the "support_email" field. +func SupportEmailLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldSupportEmail, v)) +} + +// SupportEmailLTE applies the LTE predicate on the "support_email" field. +func SupportEmailLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldSupportEmail, v)) +} + +// SupportEmailContains applies the Contains predicate on the "support_email" field. +func SupportEmailContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldSupportEmail, v)) +} + +// SupportEmailHasPrefix applies the HasPrefix predicate on the "support_email" field. +func SupportEmailHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldSupportEmail, v)) +} + +// SupportEmailHasSuffix applies the HasSuffix predicate on the "support_email" field. +func SupportEmailHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldSupportEmail, v)) +} + +// SupportEmailIsNil applies the IsNil predicate on the "support_email" field. +func SupportEmailIsNil() predicate.Publisher { + return predicate.Publisher(sql.FieldIsNull(FieldSupportEmail)) +} + +// SupportEmailNotNil applies the NotNil predicate on the "support_email" field. +func SupportEmailNotNil() predicate.Publisher { + return predicate.Publisher(sql.FieldNotNull(FieldSupportEmail)) +} + +// SupportEmailEqualFold applies the EqualFold predicate on the "support_email" field. +func SupportEmailEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldSupportEmail, v)) +} + +// SupportEmailContainsFold applies the ContainsFold predicate on the "support_email" field. +func SupportEmailContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldSupportEmail, v)) +} + +// SourceCodeRepoEQ applies the EQ predicate on the "source_code_repo" field. +func SourceCodeRepoEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoNEQ applies the NEQ predicate on the "source_code_repo" field. +func SourceCodeRepoNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoIn applies the In predicate on the "source_code_repo" field. +func SourceCodeRepoIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldSourceCodeRepo, vs...)) +} + +// SourceCodeRepoNotIn applies the NotIn predicate on the "source_code_repo" field. +func SourceCodeRepoNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldSourceCodeRepo, vs...)) +} + +// SourceCodeRepoGT applies the GT predicate on the "source_code_repo" field. +func SourceCodeRepoGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoGTE applies the GTE predicate on the "source_code_repo" field. +func SourceCodeRepoGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoLT applies the LT predicate on the "source_code_repo" field. +func SourceCodeRepoLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoLTE applies the LTE predicate on the "source_code_repo" field. +func SourceCodeRepoLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoContains applies the Contains predicate on the "source_code_repo" field. +func SourceCodeRepoContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoHasPrefix applies the HasPrefix predicate on the "source_code_repo" field. +func SourceCodeRepoHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoHasSuffix applies the HasSuffix predicate on the "source_code_repo" field. +func SourceCodeRepoHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoIsNil applies the IsNil predicate on the "source_code_repo" field. +func SourceCodeRepoIsNil() predicate.Publisher { + return predicate.Publisher(sql.FieldIsNull(FieldSourceCodeRepo)) +} + +// SourceCodeRepoNotNil applies the NotNil predicate on the "source_code_repo" field. +func SourceCodeRepoNotNil() predicate.Publisher { + return predicate.Publisher(sql.FieldNotNull(FieldSourceCodeRepo)) +} + +// SourceCodeRepoEqualFold applies the EqualFold predicate on the "source_code_repo" field. +func SourceCodeRepoEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldSourceCodeRepo, v)) +} + +// SourceCodeRepoContainsFold applies the ContainsFold predicate on the "source_code_repo" field. +func SourceCodeRepoContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldSourceCodeRepo, v)) +} + +// LogoURLEQ applies the EQ predicate on the "logo_url" field. +func LogoURLEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEQ(FieldLogoURL, v)) +} + +// LogoURLNEQ applies the NEQ predicate on the "logo_url" field. +func LogoURLNEQ(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldNEQ(FieldLogoURL, v)) +} + +// LogoURLIn applies the In predicate on the "logo_url" field. +func LogoURLIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldIn(FieldLogoURL, vs...)) +} + +// LogoURLNotIn applies the NotIn predicate on the "logo_url" field. +func LogoURLNotIn(vs ...string) predicate.Publisher { + return predicate.Publisher(sql.FieldNotIn(FieldLogoURL, vs...)) +} + +// LogoURLGT applies the GT predicate on the "logo_url" field. +func LogoURLGT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGT(FieldLogoURL, v)) +} + +// LogoURLGTE applies the GTE predicate on the "logo_url" field. +func LogoURLGTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldGTE(FieldLogoURL, v)) +} + +// LogoURLLT applies the LT predicate on the "logo_url" field. +func LogoURLLT(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLT(FieldLogoURL, v)) +} + +// LogoURLLTE applies the LTE predicate on the "logo_url" field. +func LogoURLLTE(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldLTE(FieldLogoURL, v)) +} + +// LogoURLContains applies the Contains predicate on the "logo_url" field. +func LogoURLContains(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContains(FieldLogoURL, v)) +} + +// LogoURLHasPrefix applies the HasPrefix predicate on the "logo_url" field. +func LogoURLHasPrefix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasPrefix(FieldLogoURL, v)) +} + +// LogoURLHasSuffix applies the HasSuffix predicate on the "logo_url" field. +func LogoURLHasSuffix(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldHasSuffix(FieldLogoURL, v)) +} + +// LogoURLIsNil applies the IsNil predicate on the "logo_url" field. +func LogoURLIsNil() predicate.Publisher { + return predicate.Publisher(sql.FieldIsNull(FieldLogoURL)) +} + +// LogoURLNotNil applies the NotNil predicate on the "logo_url" field. +func LogoURLNotNil() predicate.Publisher { + return predicate.Publisher(sql.FieldNotNull(FieldLogoURL)) +} + +// LogoURLEqualFold applies the EqualFold predicate on the "logo_url" field. +func LogoURLEqualFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldEqualFold(FieldLogoURL, v)) +} + +// LogoURLContainsFold applies the ContainsFold predicate on the "logo_url" field. +func LogoURLContainsFold(v string) predicate.Publisher { + return predicate.Publisher(sql.FieldContainsFold(FieldLogoURL, v)) +} + +// HasPublisherPermissions applies the HasEdge predicate on the "publisher_permissions" edge. +func HasPublisherPermissions() predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PublisherPermissionsTable, PublisherPermissionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPublisherPermissionsWith applies the HasEdge predicate on the "publisher_permissions" edge with a given conditions (other predicates). +func HasPublisherPermissionsWith(preds ...predicate.PublisherPermission) predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := newPublisherPermissionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasNodes applies the HasEdge predicate on the "nodes" edge. +func HasNodes() predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, NodesTable, NodesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasNodesWith applies the HasEdge predicate on the "nodes" edge with a given conditions (other predicates). +func HasNodesWith(preds ...predicate.Node) predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := newNodesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasPersonalAccessTokens applies the HasEdge predicate on the "personal_access_tokens" edge. +func HasPersonalAccessTokens() predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PersonalAccessTokensTable, PersonalAccessTokensColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPersonalAccessTokensWith applies the HasEdge predicate on the "personal_access_tokens" edge with a given conditions (other predicates). +func HasPersonalAccessTokensWith(preds ...predicate.PersonalAccessToken) predicate.Publisher { + return predicate.Publisher(func(s *sql.Selector) { + step := newPersonalAccessTokensStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Publisher) predicate.Publisher { + return predicate.Publisher(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Publisher) predicate.Publisher { + return predicate.Publisher(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Publisher) predicate.Publisher { + return predicate.Publisher(sql.NotPredicates(p)) +} diff --git a/ent/publisher_create.go b/ent/publisher_create.go new file mode 100644 index 0000000..77be4ed --- /dev/null +++ b/ent/publisher_create.go @@ -0,0 +1,1079 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// PublisherCreate is the builder for creating a Publisher entity. +type PublisherCreate struct { + config + mutation *PublisherMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (pc *PublisherCreate) SetCreateTime(t time.Time) *PublisherCreate { + pc.mutation.SetCreateTime(t) + return pc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableCreateTime(t *time.Time) *PublisherCreate { + if t != nil { + pc.SetCreateTime(*t) + } + return pc +} + +// SetUpdateTime sets the "update_time" field. +func (pc *PublisherCreate) SetUpdateTime(t time.Time) *PublisherCreate { + pc.mutation.SetUpdateTime(t) + return pc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableUpdateTime(t *time.Time) *PublisherCreate { + if t != nil { + pc.SetUpdateTime(*t) + } + return pc +} + +// SetName sets the "name" field. +func (pc *PublisherCreate) SetName(s string) *PublisherCreate { + pc.mutation.SetName(s) + return pc +} + +// SetDescription sets the "description" field. +func (pc *PublisherCreate) SetDescription(s string) *PublisherCreate { + pc.mutation.SetDescription(s) + return pc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableDescription(s *string) *PublisherCreate { + if s != nil { + pc.SetDescription(*s) + } + return pc +} + +// SetWebsite sets the "website" field. +func (pc *PublisherCreate) SetWebsite(s string) *PublisherCreate { + pc.mutation.SetWebsite(s) + return pc +} + +// SetNillableWebsite sets the "website" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableWebsite(s *string) *PublisherCreate { + if s != nil { + pc.SetWebsite(*s) + } + return pc +} + +// SetSupportEmail sets the "support_email" field. +func (pc *PublisherCreate) SetSupportEmail(s string) *PublisherCreate { + pc.mutation.SetSupportEmail(s) + return pc +} + +// SetNillableSupportEmail sets the "support_email" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableSupportEmail(s *string) *PublisherCreate { + if s != nil { + pc.SetSupportEmail(*s) + } + return pc +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (pc *PublisherCreate) SetSourceCodeRepo(s string) *PublisherCreate { + pc.mutation.SetSourceCodeRepo(s) + return pc +} + +// SetNillableSourceCodeRepo sets the "source_code_repo" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableSourceCodeRepo(s *string) *PublisherCreate { + if s != nil { + pc.SetSourceCodeRepo(*s) + } + return pc +} + +// SetLogoURL sets the "logo_url" field. +func (pc *PublisherCreate) SetLogoURL(s string) *PublisherCreate { + pc.mutation.SetLogoURL(s) + return pc +} + +// SetNillableLogoURL sets the "logo_url" field if the given value is not nil. +func (pc *PublisherCreate) SetNillableLogoURL(s *string) *PublisherCreate { + if s != nil { + pc.SetLogoURL(*s) + } + return pc +} + +// SetID sets the "id" field. +func (pc *PublisherCreate) SetID(s string) *PublisherCreate { + pc.mutation.SetID(s) + return pc +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (pc *PublisherCreate) AddPublisherPermissionIDs(ids ...int) *PublisherCreate { + pc.mutation.AddPublisherPermissionIDs(ids...) + return pc +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (pc *PublisherCreate) AddPublisherPermissions(p ...*PublisherPermission) *PublisherCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pc.AddPublisherPermissionIDs(ids...) +} + +// AddNodeIDs adds the "nodes" edge to the Node entity by IDs. +func (pc *PublisherCreate) AddNodeIDs(ids ...string) *PublisherCreate { + pc.mutation.AddNodeIDs(ids...) + return pc +} + +// AddNodes adds the "nodes" edges to the Node entity. +func (pc *PublisherCreate) AddNodes(n ...*Node) *PublisherCreate { + ids := make([]string, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return pc.AddNodeIDs(ids...) +} + +// AddPersonalAccessTokenIDs adds the "personal_access_tokens" edge to the PersonalAccessToken entity by IDs. +func (pc *PublisherCreate) AddPersonalAccessTokenIDs(ids ...uuid.UUID) *PublisherCreate { + pc.mutation.AddPersonalAccessTokenIDs(ids...) + return pc +} + +// AddPersonalAccessTokens adds the "personal_access_tokens" edges to the PersonalAccessToken entity. +func (pc *PublisherCreate) AddPersonalAccessTokens(p ...*PersonalAccessToken) *PublisherCreate { + ids := make([]uuid.UUID, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pc.AddPersonalAccessTokenIDs(ids...) +} + +// Mutation returns the PublisherMutation object of the builder. +func (pc *PublisherCreate) Mutation() *PublisherMutation { + return pc.mutation +} + +// Save creates the Publisher in the database. +func (pc *PublisherCreate) Save(ctx context.Context) (*Publisher, error) { + pc.defaults() + return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pc *PublisherCreate) SaveX(ctx context.Context) *Publisher { + v, err := pc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pc *PublisherCreate) Exec(ctx context.Context) error { + _, err := pc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pc *PublisherCreate) ExecX(ctx context.Context) { + if err := pc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pc *PublisherCreate) defaults() { + if _, ok := pc.mutation.CreateTime(); !ok { + v := publisher.DefaultCreateTime() + pc.mutation.SetCreateTime(v) + } + if _, ok := pc.mutation.UpdateTime(); !ok { + v := publisher.DefaultUpdateTime() + pc.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pc *PublisherCreate) check() error { + if _, ok := pc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "Publisher.create_time"`)} + } + if _, ok := pc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "Publisher.update_time"`)} + } + if _, ok := pc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Publisher.name"`)} + } + return nil +} + +func (pc *PublisherCreate) sqlSave(ctx context.Context) (*Publisher, error) { + if err := pc.check(); err != nil { + return nil, err + } + _node, _spec := pc.createSpec() + if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Publisher.ID type: %T", _spec.ID.Value) + } + } + pc.mutation.id = &_node.ID + pc.mutation.done = true + return _node, nil +} + +func (pc *PublisherCreate) createSpec() (*Publisher, *sqlgraph.CreateSpec) { + var ( + _node = &Publisher{config: pc.config} + _spec = sqlgraph.NewCreateSpec(publisher.Table, sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString)) + ) + _spec.OnConflict = pc.conflict + if id, ok := pc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := pc.mutation.CreateTime(); ok { + _spec.SetField(publisher.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := pc.mutation.UpdateTime(); ok { + _spec.SetField(publisher.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := pc.mutation.Name(); ok { + _spec.SetField(publisher.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := pc.mutation.Description(); ok { + _spec.SetField(publisher.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := pc.mutation.Website(); ok { + _spec.SetField(publisher.FieldWebsite, field.TypeString, value) + _node.Website = value + } + if value, ok := pc.mutation.SupportEmail(); ok { + _spec.SetField(publisher.FieldSupportEmail, field.TypeString, value) + _node.SupportEmail = value + } + if value, ok := pc.mutation.SourceCodeRepo(); ok { + _spec.SetField(publisher.FieldSourceCodeRepo, field.TypeString, value) + _node.SourceCodeRepo = value + } + if value, ok := pc.mutation.LogoURL(); ok { + _spec.SetField(publisher.FieldLogoURL, field.TypeString, value) + _node.LogoURL = value + } + if nodes := pc.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pc.mutation.NodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pc.mutation.PersonalAccessTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Publisher.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PublisherUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (pc *PublisherCreate) OnConflict(opts ...sql.ConflictOption) *PublisherUpsertOne { + pc.conflict = opts + return &PublisherUpsertOne{ + create: pc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (pc *PublisherCreate) OnConflictColumns(columns ...string) *PublisherUpsertOne { + pc.conflict = append(pc.conflict, sql.ConflictColumns(columns...)) + return &PublisherUpsertOne{ + create: pc, + } +} + +type ( + // PublisherUpsertOne is the builder for "upsert"-ing + // one Publisher node. + PublisherUpsertOne struct { + create *PublisherCreate + } + + // PublisherUpsert is the "OnConflict" setter. + PublisherUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *PublisherUpsert) SetUpdateTime(v time.Time) *PublisherUpsert { + u.Set(publisher.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateUpdateTime() *PublisherUpsert { + u.SetExcluded(publisher.FieldUpdateTime) + return u +} + +// SetName sets the "name" field. +func (u *PublisherUpsert) SetName(v string) *PublisherUpsert { + u.Set(publisher.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateName() *PublisherUpsert { + u.SetExcluded(publisher.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *PublisherUpsert) SetDescription(v string) *PublisherUpsert { + u.Set(publisher.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateDescription() *PublisherUpsert { + u.SetExcluded(publisher.FieldDescription) + return u +} + +// ClearDescription clears the value of the "description" field. +func (u *PublisherUpsert) ClearDescription() *PublisherUpsert { + u.SetNull(publisher.FieldDescription) + return u +} + +// SetWebsite sets the "website" field. +func (u *PublisherUpsert) SetWebsite(v string) *PublisherUpsert { + u.Set(publisher.FieldWebsite, v) + return u +} + +// UpdateWebsite sets the "website" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateWebsite() *PublisherUpsert { + u.SetExcluded(publisher.FieldWebsite) + return u +} + +// ClearWebsite clears the value of the "website" field. +func (u *PublisherUpsert) ClearWebsite() *PublisherUpsert { + u.SetNull(publisher.FieldWebsite) + return u +} + +// SetSupportEmail sets the "support_email" field. +func (u *PublisherUpsert) SetSupportEmail(v string) *PublisherUpsert { + u.Set(publisher.FieldSupportEmail, v) + return u +} + +// UpdateSupportEmail sets the "support_email" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateSupportEmail() *PublisherUpsert { + u.SetExcluded(publisher.FieldSupportEmail) + return u +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (u *PublisherUpsert) ClearSupportEmail() *PublisherUpsert { + u.SetNull(publisher.FieldSupportEmail) + return u +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (u *PublisherUpsert) SetSourceCodeRepo(v string) *PublisherUpsert { + u.Set(publisher.FieldSourceCodeRepo, v) + return u +} + +// UpdateSourceCodeRepo sets the "source_code_repo" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateSourceCodeRepo() *PublisherUpsert { + u.SetExcluded(publisher.FieldSourceCodeRepo) + return u +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (u *PublisherUpsert) ClearSourceCodeRepo() *PublisherUpsert { + u.SetNull(publisher.FieldSourceCodeRepo) + return u +} + +// SetLogoURL sets the "logo_url" field. +func (u *PublisherUpsert) SetLogoURL(v string) *PublisherUpsert { + u.Set(publisher.FieldLogoURL, v) + return u +} + +// UpdateLogoURL sets the "logo_url" field to the value that was provided on create. +func (u *PublisherUpsert) UpdateLogoURL() *PublisherUpsert { + u.SetExcluded(publisher.FieldLogoURL) + return u +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (u *PublisherUpsert) ClearLogoURL() *PublisherUpsert { + u.SetNull(publisher.FieldLogoURL) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(publisher.FieldID) +// }), +// ). +// Exec(ctx) +func (u *PublisherUpsertOne) UpdateNewValues() *PublisherUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(publisher.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(publisher.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PublisherUpsertOne) Ignore() *PublisherUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PublisherUpsertOne) DoNothing() *PublisherUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PublisherCreate.OnConflict +// documentation for more info. +func (u *PublisherUpsertOne) Update(set func(*PublisherUpsert)) *PublisherUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PublisherUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *PublisherUpsertOne) SetUpdateTime(v time.Time) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateUpdateTime() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetName sets the "name" field. +func (u *PublisherUpsertOne) SetName(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateName() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *PublisherUpsertOne) SetDescription(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateDescription() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *PublisherUpsertOne) ClearDescription() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.ClearDescription() + }) +} + +// SetWebsite sets the "website" field. +func (u *PublisherUpsertOne) SetWebsite(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetWebsite(v) + }) +} + +// UpdateWebsite sets the "website" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateWebsite() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateWebsite() + }) +} + +// ClearWebsite clears the value of the "website" field. +func (u *PublisherUpsertOne) ClearWebsite() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.ClearWebsite() + }) +} + +// SetSupportEmail sets the "support_email" field. +func (u *PublisherUpsertOne) SetSupportEmail(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetSupportEmail(v) + }) +} + +// UpdateSupportEmail sets the "support_email" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateSupportEmail() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateSupportEmail() + }) +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (u *PublisherUpsertOne) ClearSupportEmail() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.ClearSupportEmail() + }) +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (u *PublisherUpsertOne) SetSourceCodeRepo(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetSourceCodeRepo(v) + }) +} + +// UpdateSourceCodeRepo sets the "source_code_repo" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateSourceCodeRepo() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateSourceCodeRepo() + }) +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (u *PublisherUpsertOne) ClearSourceCodeRepo() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.ClearSourceCodeRepo() + }) +} + +// SetLogoURL sets the "logo_url" field. +func (u *PublisherUpsertOne) SetLogoURL(v string) *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.SetLogoURL(v) + }) +} + +// UpdateLogoURL sets the "logo_url" field to the value that was provided on create. +func (u *PublisherUpsertOne) UpdateLogoURL() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.UpdateLogoURL() + }) +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (u *PublisherUpsertOne) ClearLogoURL() *PublisherUpsertOne { + return u.Update(func(s *PublisherUpsert) { + s.ClearLogoURL() + }) +} + +// Exec executes the query. +func (u *PublisherUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PublisherCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PublisherUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PublisherUpsertOne) ID(ctx context.Context) (id string, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: PublisherUpsertOne.ID is not supported by MySQL driver. Use PublisherUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PublisherUpsertOne) IDX(ctx context.Context) string { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PublisherCreateBulk is the builder for creating many Publisher entities in bulk. +type PublisherCreateBulk struct { + config + err error + builders []*PublisherCreate + conflict []sql.ConflictOption +} + +// Save creates the Publisher entities in the database. +func (pcb *PublisherCreateBulk) Save(ctx context.Context) ([]*Publisher, error) { + if pcb.err != nil { + return nil, pcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) + nodes := make([]*Publisher, len(pcb.builders)) + mutators := make([]Mutator, len(pcb.builders)) + for i := range pcb.builders { + func(i int, root context.Context) { + builder := pcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PublisherMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = pcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pcb *PublisherCreateBulk) SaveX(ctx context.Context) []*Publisher { + v, err := pcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcb *PublisherCreateBulk) Exec(ctx context.Context) error { + _, err := pcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcb *PublisherCreateBulk) ExecX(ctx context.Context) { + if err := pcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Publisher.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PublisherUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (pcb *PublisherCreateBulk) OnConflict(opts ...sql.ConflictOption) *PublisherUpsertBulk { + pcb.conflict = opts + return &PublisherUpsertBulk{ + create: pcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (pcb *PublisherCreateBulk) OnConflictColumns(columns ...string) *PublisherUpsertBulk { + pcb.conflict = append(pcb.conflict, sql.ConflictColumns(columns...)) + return &PublisherUpsertBulk{ + create: pcb, + } +} + +// PublisherUpsertBulk is the builder for "upsert"-ing +// a bulk of Publisher nodes. +type PublisherUpsertBulk struct { + create *PublisherCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(publisher.FieldID) +// }), +// ). +// Exec(ctx) +func (u *PublisherUpsertBulk) UpdateNewValues() *PublisherUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(publisher.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(publisher.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Publisher.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PublisherUpsertBulk) Ignore() *PublisherUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PublisherUpsertBulk) DoNothing() *PublisherUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PublisherCreateBulk.OnConflict +// documentation for more info. +func (u *PublisherUpsertBulk) Update(set func(*PublisherUpsert)) *PublisherUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PublisherUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *PublisherUpsertBulk) SetUpdateTime(v time.Time) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateUpdateTime() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetName sets the "name" field. +func (u *PublisherUpsertBulk) SetName(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateName() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *PublisherUpsertBulk) SetDescription(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateDescription() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *PublisherUpsertBulk) ClearDescription() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.ClearDescription() + }) +} + +// SetWebsite sets the "website" field. +func (u *PublisherUpsertBulk) SetWebsite(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetWebsite(v) + }) +} + +// UpdateWebsite sets the "website" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateWebsite() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateWebsite() + }) +} + +// ClearWebsite clears the value of the "website" field. +func (u *PublisherUpsertBulk) ClearWebsite() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.ClearWebsite() + }) +} + +// SetSupportEmail sets the "support_email" field. +func (u *PublisherUpsertBulk) SetSupportEmail(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetSupportEmail(v) + }) +} + +// UpdateSupportEmail sets the "support_email" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateSupportEmail() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateSupportEmail() + }) +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (u *PublisherUpsertBulk) ClearSupportEmail() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.ClearSupportEmail() + }) +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (u *PublisherUpsertBulk) SetSourceCodeRepo(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetSourceCodeRepo(v) + }) +} + +// UpdateSourceCodeRepo sets the "source_code_repo" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateSourceCodeRepo() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateSourceCodeRepo() + }) +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (u *PublisherUpsertBulk) ClearSourceCodeRepo() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.ClearSourceCodeRepo() + }) +} + +// SetLogoURL sets the "logo_url" field. +func (u *PublisherUpsertBulk) SetLogoURL(v string) *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.SetLogoURL(v) + }) +} + +// UpdateLogoURL sets the "logo_url" field to the value that was provided on create. +func (u *PublisherUpsertBulk) UpdateLogoURL() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.UpdateLogoURL() + }) +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (u *PublisherUpsertBulk) ClearLogoURL() *PublisherUpsertBulk { + return u.Update(func(s *PublisherUpsert) { + s.ClearLogoURL() + }) +} + +// Exec executes the query. +func (u *PublisherUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PublisherCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PublisherCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PublisherUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/publisher_delete.go b/ent/publisher_delete.go new file mode 100644 index 0000000..7ca3028 --- /dev/null +++ b/ent/publisher_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherDelete is the builder for deleting a Publisher entity. +type PublisherDelete struct { + config + hooks []Hook + mutation *PublisherMutation +} + +// Where appends a list predicates to the PublisherDelete builder. +func (pd *PublisherDelete) Where(ps ...predicate.Publisher) *PublisherDelete { + pd.mutation.Where(ps...) + return pd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pd *PublisherDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pd *PublisherDelete) ExecX(ctx context.Context) int { + n, err := pd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pd *PublisherDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(publisher.Table, sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString)) + if ps := pd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pd.mutation.done = true + return affected, err +} + +// PublisherDeleteOne is the builder for deleting a single Publisher entity. +type PublisherDeleteOne struct { + pd *PublisherDelete +} + +// Where appends a list predicates to the PublisherDelete builder. +func (pdo *PublisherDeleteOne) Where(ps ...predicate.Publisher) *PublisherDeleteOne { + pdo.pd.mutation.Where(ps...) + return pdo +} + +// Exec executes the deletion query. +func (pdo *PublisherDeleteOne) Exec(ctx context.Context) error { + n, err := pdo.pd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{publisher.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pdo *PublisherDeleteOne) ExecX(ctx context.Context) { + if err := pdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/publisher_query.go b/ent/publisher_query.go new file mode 100644 index 0000000..b3f8c79 --- /dev/null +++ b/ent/publisher_query.go @@ -0,0 +1,794 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "registry-backend/ent/node" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherQuery is the builder for querying Publisher entities. +type PublisherQuery struct { + config + ctx *QueryContext + order []publisher.OrderOption + inters []Interceptor + predicates []predicate.Publisher + withPublisherPermissions *PublisherPermissionQuery + withNodes *NodeQuery + withPersonalAccessTokens *PersonalAccessTokenQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PublisherQuery builder. +func (pq *PublisherQuery) Where(ps ...predicate.Publisher) *PublisherQuery { + pq.predicates = append(pq.predicates, ps...) + return pq +} + +// Limit the number of records to be returned by this query. +func (pq *PublisherQuery) Limit(limit int) *PublisherQuery { + pq.ctx.Limit = &limit + return pq +} + +// Offset to start from. +func (pq *PublisherQuery) Offset(offset int) *PublisherQuery { + pq.ctx.Offset = &offset + return pq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pq *PublisherQuery) Unique(unique bool) *PublisherQuery { + pq.ctx.Unique = &unique + return pq +} + +// Order specifies how the records should be ordered. +func (pq *PublisherQuery) Order(o ...publisher.OrderOption) *PublisherQuery { + pq.order = append(pq.order, o...) + return pq +} + +// QueryPublisherPermissions chains the current query on the "publisher_permissions" edge. +func (pq *PublisherQuery) QueryPublisherPermissions() *PublisherPermissionQuery { + query := (&PublisherPermissionClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, selector), + sqlgraph.To(publisherpermission.Table, publisherpermission.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.PublisherPermissionsTable, publisher.PublisherPermissionsColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryNodes chains the current query on the "nodes" edge. +func (pq *PublisherQuery) QueryNodes() *NodeQuery { + query := (&NodeClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, selector), + sqlgraph.To(node.Table, node.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.NodesTable, publisher.NodesColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryPersonalAccessTokens chains the current query on the "personal_access_tokens" edge. +func (pq *PublisherQuery) QueryPersonalAccessTokens() *PersonalAccessTokenQuery { + query := (&PersonalAccessTokenClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(publisher.Table, publisher.FieldID, selector), + sqlgraph.To(personalaccesstoken.Table, personalaccesstoken.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, publisher.PersonalAccessTokensTable, publisher.PersonalAccessTokensColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Publisher entity from the query. +// Returns a *NotFoundError when no Publisher was found. +func (pq *PublisherQuery) First(ctx context.Context) (*Publisher, error) { + nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{publisher.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pq *PublisherQuery) FirstX(ctx context.Context) *Publisher { + node, err := pq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Publisher ID from the query. +// Returns a *NotFoundError when no Publisher ID was found. +func (pq *PublisherQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{publisher.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pq *PublisherQuery) FirstIDX(ctx context.Context) string { + id, err := pq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Publisher entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Publisher entity is found. +// Returns a *NotFoundError when no Publisher entities are found. +func (pq *PublisherQuery) Only(ctx context.Context) (*Publisher, error) { + nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{publisher.Label} + default: + return nil, &NotSingularError{publisher.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pq *PublisherQuery) OnlyX(ctx context.Context) *Publisher { + node, err := pq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Publisher ID in the query. +// Returns a *NotSingularError when more than one Publisher ID is found. +// Returns a *NotFoundError when no entities are found. +func (pq *PublisherQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{publisher.Label} + default: + err = &NotSingularError{publisher.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pq *PublisherQuery) OnlyIDX(ctx context.Context) string { + id, err := pq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Publishers. +func (pq *PublisherQuery) All(ctx context.Context) ([]*Publisher, error) { + ctx = setContextOp(ctx, pq.ctx, "All") + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Publisher, *PublisherQuery]() + return withInterceptors[[]*Publisher](ctx, pq, qr, pq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pq *PublisherQuery) AllX(ctx context.Context) []*Publisher { + nodes, err := pq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Publisher IDs. +func (pq *PublisherQuery) IDs(ctx context.Context) (ids []string, err error) { + if pq.ctx.Unique == nil && pq.path != nil { + pq.Unique(true) + } + ctx = setContextOp(ctx, pq.ctx, "IDs") + if err = pq.Select(publisher.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pq *PublisherQuery) IDsX(ctx context.Context) []string { + ids, err := pq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pq *PublisherQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pq.ctx, "Count") + if err := pq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pq, querierCount[*PublisherQuery](), pq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pq *PublisherQuery) CountX(ctx context.Context) int { + count, err := pq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pq *PublisherQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pq.ctx, "Exist") + switch _, err := pq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pq *PublisherQuery) ExistX(ctx context.Context) bool { + exist, err := pq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PublisherQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pq *PublisherQuery) Clone() *PublisherQuery { + if pq == nil { + return nil + } + return &PublisherQuery{ + config: pq.config, + ctx: pq.ctx.Clone(), + order: append([]publisher.OrderOption{}, pq.order...), + inters: append([]Interceptor{}, pq.inters...), + predicates: append([]predicate.Publisher{}, pq.predicates...), + withPublisherPermissions: pq.withPublisherPermissions.Clone(), + withNodes: pq.withNodes.Clone(), + withPersonalAccessTokens: pq.withPersonalAccessTokens.Clone(), + // clone intermediate query. + sql: pq.sql.Clone(), + path: pq.path, + } +} + +// WithPublisherPermissions tells the query-builder to eager-load the nodes that are connected to +// the "publisher_permissions" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PublisherQuery) WithPublisherPermissions(opts ...func(*PublisherPermissionQuery)) *PublisherQuery { + query := (&PublisherPermissionClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withPublisherPermissions = query + return pq +} + +// WithNodes tells the query-builder to eager-load the nodes that are connected to +// the "nodes" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PublisherQuery) WithNodes(opts ...func(*NodeQuery)) *PublisherQuery { + query := (&NodeClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withNodes = query + return pq +} + +// WithPersonalAccessTokens tells the query-builder to eager-load the nodes that are connected to +// the "personal_access_tokens" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PublisherQuery) WithPersonalAccessTokens(opts ...func(*PersonalAccessTokenQuery)) *PublisherQuery { + query := (&PersonalAccessTokenClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withPersonalAccessTokens = query + return pq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Publisher.Query(). +// GroupBy(publisher.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (pq *PublisherQuery) GroupBy(field string, fields ...string) *PublisherGroupBy { + pq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PublisherGroupBy{build: pq} + grbuild.flds = &pq.ctx.Fields + grbuild.label = publisher.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.Publisher.Query(). +// Select(publisher.FieldCreateTime). +// Scan(ctx, &v) +func (pq *PublisherQuery) Select(fields ...string) *PublisherSelect { + pq.ctx.Fields = append(pq.ctx.Fields, fields...) + sbuild := &PublisherSelect{PublisherQuery: pq} + sbuild.label = publisher.Label + sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PublisherSelect configured with the given aggregations. +func (pq *PublisherQuery) Aggregate(fns ...AggregateFunc) *PublisherSelect { + return pq.Select().Aggregate(fns...) +} + +func (pq *PublisherQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pq); err != nil { + return err + } + } + } + for _, f := range pq.ctx.Fields { + if !publisher.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if pq.path != nil { + prev, err := pq.path(ctx) + if err != nil { + return err + } + pq.sql = prev + } + return nil +} + +func (pq *PublisherQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Publisher, error) { + var ( + nodes = []*Publisher{} + _spec = pq.querySpec() + loadedTypes = [3]bool{ + pq.withPublisherPermissions != nil, + pq.withNodes != nil, + pq.withPersonalAccessTokens != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Publisher).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Publisher{config: pq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(pq.modifiers) > 0 { + _spec.Modifiers = pq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := pq.withPublisherPermissions; query != nil { + if err := pq.loadPublisherPermissions(ctx, query, nodes, + func(n *Publisher) { n.Edges.PublisherPermissions = []*PublisherPermission{} }, + func(n *Publisher, e *PublisherPermission) { + n.Edges.PublisherPermissions = append(n.Edges.PublisherPermissions, e) + }); err != nil { + return nil, err + } + } + if query := pq.withNodes; query != nil { + if err := pq.loadNodes(ctx, query, nodes, + func(n *Publisher) { n.Edges.Nodes = []*Node{} }, + func(n *Publisher, e *Node) { n.Edges.Nodes = append(n.Edges.Nodes, e) }); err != nil { + return nil, err + } + } + if query := pq.withPersonalAccessTokens; query != nil { + if err := pq.loadPersonalAccessTokens(ctx, query, nodes, + func(n *Publisher) { n.Edges.PersonalAccessTokens = []*PersonalAccessToken{} }, + func(n *Publisher, e *PersonalAccessToken) { + n.Edges.PersonalAccessTokens = append(n.Edges.PersonalAccessTokens, e) + }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (pq *PublisherQuery) loadPublisherPermissions(ctx context.Context, query *PublisherPermissionQuery, nodes []*Publisher, init func(*Publisher), assign func(*Publisher, *PublisherPermission)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Publisher) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(publisherpermission.FieldPublisherID) + } + query.Where(predicate.PublisherPermission(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(publisher.PublisherPermissionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.PublisherID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "publisher_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (pq *PublisherQuery) loadNodes(ctx context.Context, query *NodeQuery, nodes []*Publisher, init func(*Publisher), assign func(*Publisher, *Node)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Publisher) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(node.FieldPublisherID) + } + query.Where(predicate.Node(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(publisher.NodesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.PublisherID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "publisher_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (pq *PublisherQuery) loadPersonalAccessTokens(ctx context.Context, query *PersonalAccessTokenQuery, nodes []*Publisher, init func(*Publisher), assign func(*Publisher, *PersonalAccessToken)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Publisher) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(personalaccesstoken.FieldPublisherID) + } + query.Where(predicate.PersonalAccessToken(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(publisher.PersonalAccessTokensColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.PublisherID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "publisher_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (pq *PublisherQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pq.querySpec() + if len(pq.modifiers) > 0 { + _spec.Modifiers = pq.modifiers + } + _spec.Node.Columns = pq.ctx.Fields + if len(pq.ctx.Fields) > 0 { + _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pq.driver, _spec) +} + +func (pq *PublisherQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(publisher.Table, publisher.Columns, sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString)) + _spec.From = pq.sql + if unique := pq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pq.path != nil { + _spec.Unique = true + } + if fields := pq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, publisher.FieldID) + for i := range fields { + if fields[i] != publisher.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pq *PublisherQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pq.driver.Dialect()) + t1 := builder.Table(publisher.Table) + columns := pq.ctx.Fields + if len(columns) == 0 { + columns = publisher.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pq.sql != nil { + selector = pq.sql + selector.Select(selector.Columns(columns...)...) + } + if pq.ctx.Unique != nil && *pq.ctx.Unique { + selector.Distinct() + } + for _, m := range pq.modifiers { + m(selector) + } + for _, p := range pq.predicates { + p(selector) + } + for _, p := range pq.order { + p(selector) + } + if offset := pq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (pq *PublisherQuery) ForUpdate(opts ...sql.LockOption) *PublisherQuery { + if pq.driver.Dialect() == dialect.Postgres { + pq.Unique(false) + } + pq.modifiers = append(pq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return pq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (pq *PublisherQuery) ForShare(opts ...sql.LockOption) *PublisherQuery { + if pq.driver.Dialect() == dialect.Postgres { + pq.Unique(false) + } + pq.modifiers = append(pq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return pq +} + +// PublisherGroupBy is the group-by builder for Publisher entities. +type PublisherGroupBy struct { + selector + build *PublisherQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PublisherGroupBy) Aggregate(fns ...AggregateFunc) *PublisherGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pgb *PublisherGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pgb.build.ctx, "GroupBy") + if err := pgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PublisherQuery, *PublisherGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) +} + +func (pgb *PublisherGroupBy) sqlScan(ctx context.Context, root *PublisherQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pgb.fns)) + for _, fn := range pgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) + for _, f := range *pgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PublisherSelect is the builder for selecting fields of Publisher entities. +type PublisherSelect struct { + *PublisherQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ps *PublisherSelect) Aggregate(fns ...AggregateFunc) *PublisherSelect { + ps.fns = append(ps.fns, fns...) + return ps +} + +// Scan applies the selector query and scans the result into the given value. +func (ps *PublisherSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ps.ctx, "Select") + if err := ps.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PublisherQuery, *PublisherSelect](ctx, ps.PublisherQuery, ps, ps.inters, v) +} + +func (ps *PublisherSelect) sqlScan(ctx context.Context, root *PublisherQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ps.fns)) + for _, fn := range ps.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ps.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/publisher_update.go b/ent/publisher_update.go new file mode 100644 index 0000000..652a361 --- /dev/null +++ b/ent/publisher_update.go @@ -0,0 +1,996 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/node" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// PublisherUpdate is the builder for updating Publisher entities. +type PublisherUpdate struct { + config + hooks []Hook + mutation *PublisherMutation +} + +// Where appends a list predicates to the PublisherUpdate builder. +func (pu *PublisherUpdate) Where(ps ...predicate.Publisher) *PublisherUpdate { + pu.mutation.Where(ps...) + return pu +} + +// SetUpdateTime sets the "update_time" field. +func (pu *PublisherUpdate) SetUpdateTime(t time.Time) *PublisherUpdate { + pu.mutation.SetUpdateTime(t) + return pu +} + +// SetName sets the "name" field. +func (pu *PublisherUpdate) SetName(s string) *PublisherUpdate { + pu.mutation.SetName(s) + return pu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableName(s *string) *PublisherUpdate { + if s != nil { + pu.SetName(*s) + } + return pu +} + +// SetDescription sets the "description" field. +func (pu *PublisherUpdate) SetDescription(s string) *PublisherUpdate { + pu.mutation.SetDescription(s) + return pu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableDescription(s *string) *PublisherUpdate { + if s != nil { + pu.SetDescription(*s) + } + return pu +} + +// ClearDescription clears the value of the "description" field. +func (pu *PublisherUpdate) ClearDescription() *PublisherUpdate { + pu.mutation.ClearDescription() + return pu +} + +// SetWebsite sets the "website" field. +func (pu *PublisherUpdate) SetWebsite(s string) *PublisherUpdate { + pu.mutation.SetWebsite(s) + return pu +} + +// SetNillableWebsite sets the "website" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableWebsite(s *string) *PublisherUpdate { + if s != nil { + pu.SetWebsite(*s) + } + return pu +} + +// ClearWebsite clears the value of the "website" field. +func (pu *PublisherUpdate) ClearWebsite() *PublisherUpdate { + pu.mutation.ClearWebsite() + return pu +} + +// SetSupportEmail sets the "support_email" field. +func (pu *PublisherUpdate) SetSupportEmail(s string) *PublisherUpdate { + pu.mutation.SetSupportEmail(s) + return pu +} + +// SetNillableSupportEmail sets the "support_email" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableSupportEmail(s *string) *PublisherUpdate { + if s != nil { + pu.SetSupportEmail(*s) + } + return pu +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (pu *PublisherUpdate) ClearSupportEmail() *PublisherUpdate { + pu.mutation.ClearSupportEmail() + return pu +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (pu *PublisherUpdate) SetSourceCodeRepo(s string) *PublisherUpdate { + pu.mutation.SetSourceCodeRepo(s) + return pu +} + +// SetNillableSourceCodeRepo sets the "source_code_repo" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableSourceCodeRepo(s *string) *PublisherUpdate { + if s != nil { + pu.SetSourceCodeRepo(*s) + } + return pu +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (pu *PublisherUpdate) ClearSourceCodeRepo() *PublisherUpdate { + pu.mutation.ClearSourceCodeRepo() + return pu +} + +// SetLogoURL sets the "logo_url" field. +func (pu *PublisherUpdate) SetLogoURL(s string) *PublisherUpdate { + pu.mutation.SetLogoURL(s) + return pu +} + +// SetNillableLogoURL sets the "logo_url" field if the given value is not nil. +func (pu *PublisherUpdate) SetNillableLogoURL(s *string) *PublisherUpdate { + if s != nil { + pu.SetLogoURL(*s) + } + return pu +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (pu *PublisherUpdate) ClearLogoURL() *PublisherUpdate { + pu.mutation.ClearLogoURL() + return pu +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (pu *PublisherUpdate) AddPublisherPermissionIDs(ids ...int) *PublisherUpdate { + pu.mutation.AddPublisherPermissionIDs(ids...) + return pu +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (pu *PublisherUpdate) AddPublisherPermissions(p ...*PublisherPermission) *PublisherUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.AddPublisherPermissionIDs(ids...) +} + +// AddNodeIDs adds the "nodes" edge to the Node entity by IDs. +func (pu *PublisherUpdate) AddNodeIDs(ids ...string) *PublisherUpdate { + pu.mutation.AddNodeIDs(ids...) + return pu +} + +// AddNodes adds the "nodes" edges to the Node entity. +func (pu *PublisherUpdate) AddNodes(n ...*Node) *PublisherUpdate { + ids := make([]string, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return pu.AddNodeIDs(ids...) +} + +// AddPersonalAccessTokenIDs adds the "personal_access_tokens" edge to the PersonalAccessToken entity by IDs. +func (pu *PublisherUpdate) AddPersonalAccessTokenIDs(ids ...uuid.UUID) *PublisherUpdate { + pu.mutation.AddPersonalAccessTokenIDs(ids...) + return pu +} + +// AddPersonalAccessTokens adds the "personal_access_tokens" edges to the PersonalAccessToken entity. +func (pu *PublisherUpdate) AddPersonalAccessTokens(p ...*PersonalAccessToken) *PublisherUpdate { + ids := make([]uuid.UUID, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.AddPersonalAccessTokenIDs(ids...) +} + +// Mutation returns the PublisherMutation object of the builder. +func (pu *PublisherUpdate) Mutation() *PublisherMutation { + return pu.mutation +} + +// ClearPublisherPermissions clears all "publisher_permissions" edges to the PublisherPermission entity. +func (pu *PublisherUpdate) ClearPublisherPermissions() *PublisherUpdate { + pu.mutation.ClearPublisherPermissions() + return pu +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to PublisherPermission entities by IDs. +func (pu *PublisherUpdate) RemovePublisherPermissionIDs(ids ...int) *PublisherUpdate { + pu.mutation.RemovePublisherPermissionIDs(ids...) + return pu +} + +// RemovePublisherPermissions removes "publisher_permissions" edges to PublisherPermission entities. +func (pu *PublisherUpdate) RemovePublisherPermissions(p ...*PublisherPermission) *PublisherUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.RemovePublisherPermissionIDs(ids...) +} + +// ClearNodes clears all "nodes" edges to the Node entity. +func (pu *PublisherUpdate) ClearNodes() *PublisherUpdate { + pu.mutation.ClearNodes() + return pu +} + +// RemoveNodeIDs removes the "nodes" edge to Node entities by IDs. +func (pu *PublisherUpdate) RemoveNodeIDs(ids ...string) *PublisherUpdate { + pu.mutation.RemoveNodeIDs(ids...) + return pu +} + +// RemoveNodes removes "nodes" edges to Node entities. +func (pu *PublisherUpdate) RemoveNodes(n ...*Node) *PublisherUpdate { + ids := make([]string, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return pu.RemoveNodeIDs(ids...) +} + +// ClearPersonalAccessTokens clears all "personal_access_tokens" edges to the PersonalAccessToken entity. +func (pu *PublisherUpdate) ClearPersonalAccessTokens() *PublisherUpdate { + pu.mutation.ClearPersonalAccessTokens() + return pu +} + +// RemovePersonalAccessTokenIDs removes the "personal_access_tokens" edge to PersonalAccessToken entities by IDs. +func (pu *PublisherUpdate) RemovePersonalAccessTokenIDs(ids ...uuid.UUID) *PublisherUpdate { + pu.mutation.RemovePersonalAccessTokenIDs(ids...) + return pu +} + +// RemovePersonalAccessTokens removes "personal_access_tokens" edges to PersonalAccessToken entities. +func (pu *PublisherUpdate) RemovePersonalAccessTokens(p ...*PersonalAccessToken) *PublisherUpdate { + ids := make([]uuid.UUID, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.RemovePersonalAccessTokenIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pu *PublisherUpdate) Save(ctx context.Context) (int, error) { + pu.defaults() + return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pu *PublisherUpdate) SaveX(ctx context.Context) int { + affected, err := pu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pu *PublisherUpdate) Exec(ctx context.Context) error { + _, err := pu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pu *PublisherUpdate) ExecX(ctx context.Context) { + if err := pu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pu *PublisherUpdate) defaults() { + if _, ok := pu.mutation.UpdateTime(); !ok { + v := publisher.UpdateDefaultUpdateTime() + pu.mutation.SetUpdateTime(v) + } +} + +func (pu *PublisherUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(publisher.Table, publisher.Columns, sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString)) + if ps := pu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pu.mutation.UpdateTime(); ok { + _spec.SetField(publisher.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := pu.mutation.Name(); ok { + _spec.SetField(publisher.FieldName, field.TypeString, value) + } + if value, ok := pu.mutation.Description(); ok { + _spec.SetField(publisher.FieldDescription, field.TypeString, value) + } + if pu.mutation.DescriptionCleared() { + _spec.ClearField(publisher.FieldDescription, field.TypeString) + } + if value, ok := pu.mutation.Website(); ok { + _spec.SetField(publisher.FieldWebsite, field.TypeString, value) + } + if pu.mutation.WebsiteCleared() { + _spec.ClearField(publisher.FieldWebsite, field.TypeString) + } + if value, ok := pu.mutation.SupportEmail(); ok { + _spec.SetField(publisher.FieldSupportEmail, field.TypeString, value) + } + if pu.mutation.SupportEmailCleared() { + _spec.ClearField(publisher.FieldSupportEmail, field.TypeString) + } + if value, ok := pu.mutation.SourceCodeRepo(); ok { + _spec.SetField(publisher.FieldSourceCodeRepo, field.TypeString, value) + } + if pu.mutation.SourceCodeRepoCleared() { + _spec.ClearField(publisher.FieldSourceCodeRepo, field.TypeString) + } + if value, ok := pu.mutation.LogoURL(); ok { + _spec.SetField(publisher.FieldLogoURL, field.TypeString, value) + } + if pu.mutation.LogoURLCleared() { + _spec.ClearField(publisher.FieldLogoURL, field.TypeString) + } + if pu.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedPublisherPermissionsIDs(); len(nodes) > 0 && !pu.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pu.mutation.NodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedNodesIDs(); len(nodes) > 0 && !pu.mutation.NodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.NodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pu.mutation.PersonalAccessTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedPersonalAccessTokensIDs(); len(nodes) > 0 && !pu.mutation.PersonalAccessTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.PersonalAccessTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{publisher.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pu.mutation.done = true + return n, nil +} + +// PublisherUpdateOne is the builder for updating a single Publisher entity. +type PublisherUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PublisherMutation +} + +// SetUpdateTime sets the "update_time" field. +func (puo *PublisherUpdateOne) SetUpdateTime(t time.Time) *PublisherUpdateOne { + puo.mutation.SetUpdateTime(t) + return puo +} + +// SetName sets the "name" field. +func (puo *PublisherUpdateOne) SetName(s string) *PublisherUpdateOne { + puo.mutation.SetName(s) + return puo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableName(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetName(*s) + } + return puo +} + +// SetDescription sets the "description" field. +func (puo *PublisherUpdateOne) SetDescription(s string) *PublisherUpdateOne { + puo.mutation.SetDescription(s) + return puo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableDescription(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetDescription(*s) + } + return puo +} + +// ClearDescription clears the value of the "description" field. +func (puo *PublisherUpdateOne) ClearDescription() *PublisherUpdateOne { + puo.mutation.ClearDescription() + return puo +} + +// SetWebsite sets the "website" field. +func (puo *PublisherUpdateOne) SetWebsite(s string) *PublisherUpdateOne { + puo.mutation.SetWebsite(s) + return puo +} + +// SetNillableWebsite sets the "website" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableWebsite(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetWebsite(*s) + } + return puo +} + +// ClearWebsite clears the value of the "website" field. +func (puo *PublisherUpdateOne) ClearWebsite() *PublisherUpdateOne { + puo.mutation.ClearWebsite() + return puo +} + +// SetSupportEmail sets the "support_email" field. +func (puo *PublisherUpdateOne) SetSupportEmail(s string) *PublisherUpdateOne { + puo.mutation.SetSupportEmail(s) + return puo +} + +// SetNillableSupportEmail sets the "support_email" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableSupportEmail(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetSupportEmail(*s) + } + return puo +} + +// ClearSupportEmail clears the value of the "support_email" field. +func (puo *PublisherUpdateOne) ClearSupportEmail() *PublisherUpdateOne { + puo.mutation.ClearSupportEmail() + return puo +} + +// SetSourceCodeRepo sets the "source_code_repo" field. +func (puo *PublisherUpdateOne) SetSourceCodeRepo(s string) *PublisherUpdateOne { + puo.mutation.SetSourceCodeRepo(s) + return puo +} + +// SetNillableSourceCodeRepo sets the "source_code_repo" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableSourceCodeRepo(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetSourceCodeRepo(*s) + } + return puo +} + +// ClearSourceCodeRepo clears the value of the "source_code_repo" field. +func (puo *PublisherUpdateOne) ClearSourceCodeRepo() *PublisherUpdateOne { + puo.mutation.ClearSourceCodeRepo() + return puo +} + +// SetLogoURL sets the "logo_url" field. +func (puo *PublisherUpdateOne) SetLogoURL(s string) *PublisherUpdateOne { + puo.mutation.SetLogoURL(s) + return puo +} + +// SetNillableLogoURL sets the "logo_url" field if the given value is not nil. +func (puo *PublisherUpdateOne) SetNillableLogoURL(s *string) *PublisherUpdateOne { + if s != nil { + puo.SetLogoURL(*s) + } + return puo +} + +// ClearLogoURL clears the value of the "logo_url" field. +func (puo *PublisherUpdateOne) ClearLogoURL() *PublisherUpdateOne { + puo.mutation.ClearLogoURL() + return puo +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (puo *PublisherUpdateOne) AddPublisherPermissionIDs(ids ...int) *PublisherUpdateOne { + puo.mutation.AddPublisherPermissionIDs(ids...) + return puo +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (puo *PublisherUpdateOne) AddPublisherPermissions(p ...*PublisherPermission) *PublisherUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.AddPublisherPermissionIDs(ids...) +} + +// AddNodeIDs adds the "nodes" edge to the Node entity by IDs. +func (puo *PublisherUpdateOne) AddNodeIDs(ids ...string) *PublisherUpdateOne { + puo.mutation.AddNodeIDs(ids...) + return puo +} + +// AddNodes adds the "nodes" edges to the Node entity. +func (puo *PublisherUpdateOne) AddNodes(n ...*Node) *PublisherUpdateOne { + ids := make([]string, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return puo.AddNodeIDs(ids...) +} + +// AddPersonalAccessTokenIDs adds the "personal_access_tokens" edge to the PersonalAccessToken entity by IDs. +func (puo *PublisherUpdateOne) AddPersonalAccessTokenIDs(ids ...uuid.UUID) *PublisherUpdateOne { + puo.mutation.AddPersonalAccessTokenIDs(ids...) + return puo +} + +// AddPersonalAccessTokens adds the "personal_access_tokens" edges to the PersonalAccessToken entity. +func (puo *PublisherUpdateOne) AddPersonalAccessTokens(p ...*PersonalAccessToken) *PublisherUpdateOne { + ids := make([]uuid.UUID, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.AddPersonalAccessTokenIDs(ids...) +} + +// Mutation returns the PublisherMutation object of the builder. +func (puo *PublisherUpdateOne) Mutation() *PublisherMutation { + return puo.mutation +} + +// ClearPublisherPermissions clears all "publisher_permissions" edges to the PublisherPermission entity. +func (puo *PublisherUpdateOne) ClearPublisherPermissions() *PublisherUpdateOne { + puo.mutation.ClearPublisherPermissions() + return puo +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to PublisherPermission entities by IDs. +func (puo *PublisherUpdateOne) RemovePublisherPermissionIDs(ids ...int) *PublisherUpdateOne { + puo.mutation.RemovePublisherPermissionIDs(ids...) + return puo +} + +// RemovePublisherPermissions removes "publisher_permissions" edges to PublisherPermission entities. +func (puo *PublisherUpdateOne) RemovePublisherPermissions(p ...*PublisherPermission) *PublisherUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.RemovePublisherPermissionIDs(ids...) +} + +// ClearNodes clears all "nodes" edges to the Node entity. +func (puo *PublisherUpdateOne) ClearNodes() *PublisherUpdateOne { + puo.mutation.ClearNodes() + return puo +} + +// RemoveNodeIDs removes the "nodes" edge to Node entities by IDs. +func (puo *PublisherUpdateOne) RemoveNodeIDs(ids ...string) *PublisherUpdateOne { + puo.mutation.RemoveNodeIDs(ids...) + return puo +} + +// RemoveNodes removes "nodes" edges to Node entities. +func (puo *PublisherUpdateOne) RemoveNodes(n ...*Node) *PublisherUpdateOne { + ids := make([]string, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return puo.RemoveNodeIDs(ids...) +} + +// ClearPersonalAccessTokens clears all "personal_access_tokens" edges to the PersonalAccessToken entity. +func (puo *PublisherUpdateOne) ClearPersonalAccessTokens() *PublisherUpdateOne { + puo.mutation.ClearPersonalAccessTokens() + return puo +} + +// RemovePersonalAccessTokenIDs removes the "personal_access_tokens" edge to PersonalAccessToken entities by IDs. +func (puo *PublisherUpdateOne) RemovePersonalAccessTokenIDs(ids ...uuid.UUID) *PublisherUpdateOne { + puo.mutation.RemovePersonalAccessTokenIDs(ids...) + return puo +} + +// RemovePersonalAccessTokens removes "personal_access_tokens" edges to PersonalAccessToken entities. +func (puo *PublisherUpdateOne) RemovePersonalAccessTokens(p ...*PersonalAccessToken) *PublisherUpdateOne { + ids := make([]uuid.UUID, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.RemovePersonalAccessTokenIDs(ids...) +} + +// Where appends a list predicates to the PublisherUpdate builder. +func (puo *PublisherUpdateOne) Where(ps ...predicate.Publisher) *PublisherUpdateOne { + puo.mutation.Where(ps...) + return puo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (puo *PublisherUpdateOne) Select(field string, fields ...string) *PublisherUpdateOne { + puo.fields = append([]string{field}, fields...) + return puo +} + +// Save executes the query and returns the updated Publisher entity. +func (puo *PublisherUpdateOne) Save(ctx context.Context) (*Publisher, error) { + puo.defaults() + return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (puo *PublisherUpdateOne) SaveX(ctx context.Context) *Publisher { + node, err := puo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (puo *PublisherUpdateOne) Exec(ctx context.Context) error { + _, err := puo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (puo *PublisherUpdateOne) ExecX(ctx context.Context) { + if err := puo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (puo *PublisherUpdateOne) defaults() { + if _, ok := puo.mutation.UpdateTime(); !ok { + v := publisher.UpdateDefaultUpdateTime() + puo.mutation.SetUpdateTime(v) + } +} + +func (puo *PublisherUpdateOne) sqlSave(ctx context.Context) (_node *Publisher, err error) { + _spec := sqlgraph.NewUpdateSpec(publisher.Table, publisher.Columns, sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString)) + id, ok := puo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Publisher.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := puo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, publisher.FieldID) + for _, f := range fields { + if !publisher.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != publisher.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := puo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := puo.mutation.UpdateTime(); ok { + _spec.SetField(publisher.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := puo.mutation.Name(); ok { + _spec.SetField(publisher.FieldName, field.TypeString, value) + } + if value, ok := puo.mutation.Description(); ok { + _spec.SetField(publisher.FieldDescription, field.TypeString, value) + } + if puo.mutation.DescriptionCleared() { + _spec.ClearField(publisher.FieldDescription, field.TypeString) + } + if value, ok := puo.mutation.Website(); ok { + _spec.SetField(publisher.FieldWebsite, field.TypeString, value) + } + if puo.mutation.WebsiteCleared() { + _spec.ClearField(publisher.FieldWebsite, field.TypeString) + } + if value, ok := puo.mutation.SupportEmail(); ok { + _spec.SetField(publisher.FieldSupportEmail, field.TypeString, value) + } + if puo.mutation.SupportEmailCleared() { + _spec.ClearField(publisher.FieldSupportEmail, field.TypeString) + } + if value, ok := puo.mutation.SourceCodeRepo(); ok { + _spec.SetField(publisher.FieldSourceCodeRepo, field.TypeString, value) + } + if puo.mutation.SourceCodeRepoCleared() { + _spec.ClearField(publisher.FieldSourceCodeRepo, field.TypeString) + } + if value, ok := puo.mutation.LogoURL(); ok { + _spec.SetField(publisher.FieldLogoURL, field.TypeString, value) + } + if puo.mutation.LogoURLCleared() { + _spec.ClearField(publisher.FieldLogoURL, field.TypeString) + } + if puo.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedPublisherPermissionsIDs(); len(nodes) > 0 && !puo.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PublisherPermissionsTable, + Columns: []string{publisher.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if puo.mutation.NodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedNodesIDs(); len(nodes) > 0 && !puo.mutation.NodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.NodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.NodesTable, + Columns: []string{publisher.NodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(node.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if puo.mutation.PersonalAccessTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedPersonalAccessTokensIDs(); len(nodes) > 0 && !puo.mutation.PersonalAccessTokensCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.PersonalAccessTokensIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: publisher.PersonalAccessTokensTable, + Columns: []string{publisher.PersonalAccessTokensColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(personalaccesstoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Publisher{config: puo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{publisher.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + puo.mutation.done = true + return _node, nil +} diff --git a/ent/publisherpermission.go b/ent/publisherpermission.go new file mode 100644 index 0000000..ab8d366 --- /dev/null +++ b/ent/publisherpermission.go @@ -0,0 +1,174 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/schema" + "registry-backend/ent/user" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// PublisherPermission is the model entity for the PublisherPermission schema. +type PublisherPermission struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Permission holds the value of the "permission" field. + Permission schema.PublisherPermissionType `json:"permission,omitempty"` + // UserID holds the value of the "user_id" field. + UserID string `json:"user_id,omitempty"` + // PublisherID holds the value of the "publisher_id" field. + PublisherID string `json:"publisher_id,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PublisherPermissionQuery when eager-loading is set. + Edges PublisherPermissionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PublisherPermissionEdges holds the relations/edges for other nodes in the graph. +type PublisherPermissionEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Publisher holds the value of the publisher edge. + Publisher *Publisher `json:"publisher,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PublisherPermissionEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// PublisherOrErr returns the Publisher value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PublisherPermissionEdges) PublisherOrErr() (*Publisher, error) { + if e.Publisher != nil { + return e.Publisher, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: publisher.Label} + } + return nil, &NotLoadedError{edge: "publisher"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PublisherPermission) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case publisherpermission.FieldID: + values[i] = new(sql.NullInt64) + case publisherpermission.FieldPermission, publisherpermission.FieldUserID, publisherpermission.FieldPublisherID: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PublisherPermission fields. +func (pp *PublisherPermission) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case publisherpermission.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + pp.ID = int(value.Int64) + case publisherpermission.FieldPermission: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field permission", values[i]) + } else if value.Valid { + pp.Permission = schema.PublisherPermissionType(value.String) + } + case publisherpermission.FieldUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + pp.UserID = value.String + } + case publisherpermission.FieldPublisherID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field publisher_id", values[i]) + } else if value.Valid { + pp.PublisherID = value.String + } + default: + pp.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PublisherPermission. +// This includes values selected through modifiers, order, etc. +func (pp *PublisherPermission) Value(name string) (ent.Value, error) { + return pp.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the PublisherPermission entity. +func (pp *PublisherPermission) QueryUser() *UserQuery { + return NewPublisherPermissionClient(pp.config).QueryUser(pp) +} + +// QueryPublisher queries the "publisher" edge of the PublisherPermission entity. +func (pp *PublisherPermission) QueryPublisher() *PublisherQuery { + return NewPublisherPermissionClient(pp.config).QueryPublisher(pp) +} + +// Update returns a builder for updating this PublisherPermission. +// Note that you need to call PublisherPermission.Unwrap() before calling this method if this PublisherPermission +// was returned from a transaction, and the transaction was committed or rolled back. +func (pp *PublisherPermission) Update() *PublisherPermissionUpdateOne { + return NewPublisherPermissionClient(pp.config).UpdateOne(pp) +} + +// Unwrap unwraps the PublisherPermission entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pp *PublisherPermission) Unwrap() *PublisherPermission { + _tx, ok := pp.config.driver.(*txDriver) + if !ok { + panic("ent: PublisherPermission is not a transactional entity") + } + pp.config.driver = _tx.drv + return pp +} + +// String implements the fmt.Stringer. +func (pp *PublisherPermission) String() string { + var builder strings.Builder + builder.WriteString("PublisherPermission(") + builder.WriteString(fmt.Sprintf("id=%v, ", pp.ID)) + builder.WriteString("permission=") + builder.WriteString(fmt.Sprintf("%v", pp.Permission)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(pp.UserID) + builder.WriteString(", ") + builder.WriteString("publisher_id=") + builder.WriteString(pp.PublisherID) + builder.WriteByte(')') + return builder.String() +} + +// PublisherPermissions is a parsable slice of PublisherPermission. +type PublisherPermissions []*PublisherPermission diff --git a/ent/publisherpermission/publisherpermission.go b/ent/publisherpermission/publisherpermission.go new file mode 100644 index 0000000..bb7bfb6 --- /dev/null +++ b/ent/publisherpermission/publisherpermission.go @@ -0,0 +1,123 @@ +// Code generated by ent, DO NOT EDIT. + +package publisherpermission + +import ( + "fmt" + "registry-backend/ent/schema" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the publisherpermission type in the database. + Label = "publisher_permission" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldPermission holds the string denoting the permission field in the database. + FieldPermission = "permission" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldPublisherID holds the string denoting the publisher_id field in the database. + FieldPublisherID = "publisher_id" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgePublisher holds the string denoting the publisher edge name in mutations. + EdgePublisher = "publisher" + // Table holds the table name of the publisherpermission in the database. + Table = "publisher_permissions" + // UserTable is the table that holds the user relation/edge. + UserTable = "publisher_permissions" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // PublisherTable is the table that holds the publisher relation/edge. + PublisherTable = "publisher_permissions" + // PublisherInverseTable is the table name for the Publisher entity. + // It exists in this package in order to avoid circular dependency with the "publisher" package. + PublisherInverseTable = "publishers" + // PublisherColumn is the table column denoting the publisher relation/edge. + PublisherColumn = "publisher_id" +) + +// Columns holds all SQL columns for publisherpermission fields. +var Columns = []string{ + FieldID, + FieldPermission, + FieldUserID, + FieldPublisherID, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// PermissionValidator is a validator for the "permission" field enum values. It is called by the builders before save. +func PermissionValidator(pe schema.PublisherPermissionType) error { + switch pe { + case "owner", "member": + return nil + default: + return fmt.Errorf("publisherpermission: invalid enum value for permission field: %q", pe) + } +} + +// OrderOption defines the ordering options for the PublisherPermission queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByPermission orders the results by the permission field. +func ByPermission(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPermission, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByPublisherID orders the results by the publisher_id field. +func ByPublisherID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPublisherID, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByPublisherField orders the results by publisher field. +func ByPublisherField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPublisherStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newPublisherStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PublisherInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) +} diff --git a/ent/publisherpermission/where.go b/ent/publisherpermission/where.go new file mode 100644 index 0000000..fd359d4 --- /dev/null +++ b/ent/publisherpermission/where.go @@ -0,0 +1,287 @@ +// Code generated by ent, DO NOT EDIT. + +package publisherpermission + +import ( + "registry-backend/ent/predicate" + "registry-backend/ent/schema" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLTE(FieldID, id)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldUserID, v)) +} + +// PublisherID applies equality check predicate on the "publisher_id" field. It's identical to PublisherIDEQ. +func PublisherID(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldPublisherID, v)) +} + +// PermissionEQ applies the EQ predicate on the "permission" field. +func PermissionEQ(v schema.PublisherPermissionType) predicate.PublisherPermission { + vc := v + return predicate.PublisherPermission(sql.FieldEQ(FieldPermission, vc)) +} + +// PermissionNEQ applies the NEQ predicate on the "permission" field. +func PermissionNEQ(v schema.PublisherPermissionType) predicate.PublisherPermission { + vc := v + return predicate.PublisherPermission(sql.FieldNEQ(FieldPermission, vc)) +} + +// PermissionIn applies the In predicate on the "permission" field. +func PermissionIn(vs ...schema.PublisherPermissionType) predicate.PublisherPermission { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.PublisherPermission(sql.FieldIn(FieldPermission, v...)) +} + +// PermissionNotIn applies the NotIn predicate on the "permission" field. +func PermissionNotIn(vs ...schema.PublisherPermissionType) predicate.PublisherPermission { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.PublisherPermission(sql.FieldNotIn(FieldPermission, v...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNotIn(FieldUserID, vs...)) +} + +// UserIDGT applies the GT predicate on the "user_id" field. +func UserIDGT(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGT(FieldUserID, v)) +} + +// UserIDGTE applies the GTE predicate on the "user_id" field. +func UserIDGTE(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGTE(FieldUserID, v)) +} + +// UserIDLT applies the LT predicate on the "user_id" field. +func UserIDLT(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLT(FieldUserID, v)) +} + +// UserIDLTE applies the LTE predicate on the "user_id" field. +func UserIDLTE(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLTE(FieldUserID, v)) +} + +// UserIDContains applies the Contains predicate on the "user_id" field. +func UserIDContains(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldContains(FieldUserID, v)) +} + +// UserIDHasPrefix applies the HasPrefix predicate on the "user_id" field. +func UserIDHasPrefix(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldHasPrefix(FieldUserID, v)) +} + +// UserIDHasSuffix applies the HasSuffix predicate on the "user_id" field. +func UserIDHasSuffix(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldHasSuffix(FieldUserID, v)) +} + +// UserIDEqualFold applies the EqualFold predicate on the "user_id" field. +func UserIDEqualFold(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEqualFold(FieldUserID, v)) +} + +// UserIDContainsFold applies the ContainsFold predicate on the "user_id" field. +func UserIDContainsFold(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldContainsFold(FieldUserID, v)) +} + +// PublisherIDEQ applies the EQ predicate on the "publisher_id" field. +func PublisherIDEQ(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEQ(FieldPublisherID, v)) +} + +// PublisherIDNEQ applies the NEQ predicate on the "publisher_id" field. +func PublisherIDNEQ(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNEQ(FieldPublisherID, v)) +} + +// PublisherIDIn applies the In predicate on the "publisher_id" field. +func PublisherIDIn(vs ...string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldIn(FieldPublisherID, vs...)) +} + +// PublisherIDNotIn applies the NotIn predicate on the "publisher_id" field. +func PublisherIDNotIn(vs ...string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldNotIn(FieldPublisherID, vs...)) +} + +// PublisherIDGT applies the GT predicate on the "publisher_id" field. +func PublisherIDGT(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGT(FieldPublisherID, v)) +} + +// PublisherIDGTE applies the GTE predicate on the "publisher_id" field. +func PublisherIDGTE(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldGTE(FieldPublisherID, v)) +} + +// PublisherIDLT applies the LT predicate on the "publisher_id" field. +func PublisherIDLT(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLT(FieldPublisherID, v)) +} + +// PublisherIDLTE applies the LTE predicate on the "publisher_id" field. +func PublisherIDLTE(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldLTE(FieldPublisherID, v)) +} + +// PublisherIDContains applies the Contains predicate on the "publisher_id" field. +func PublisherIDContains(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldContains(FieldPublisherID, v)) +} + +// PublisherIDHasPrefix applies the HasPrefix predicate on the "publisher_id" field. +func PublisherIDHasPrefix(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldHasPrefix(FieldPublisherID, v)) +} + +// PublisherIDHasSuffix applies the HasSuffix predicate on the "publisher_id" field. +func PublisherIDHasSuffix(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldHasSuffix(FieldPublisherID, v)) +} + +// PublisherIDEqualFold applies the EqualFold predicate on the "publisher_id" field. +func PublisherIDEqualFold(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldEqualFold(FieldPublisherID, v)) +} + +// PublisherIDContainsFold applies the ContainsFold predicate on the "publisher_id" field. +func PublisherIDContainsFold(v string) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.FieldContainsFold(FieldPublisherID, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.PublisherPermission { + return predicate.PublisherPermission(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.PublisherPermission { + return predicate.PublisherPermission(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasPublisher applies the HasEdge predicate on the "publisher" edge. +func HasPublisher() predicate.PublisherPermission { + return predicate.PublisherPermission(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PublisherTable, PublisherColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPublisherWith applies the HasEdge predicate on the "publisher" edge with a given conditions (other predicates). +func HasPublisherWith(preds ...predicate.Publisher) predicate.PublisherPermission { + return predicate.PublisherPermission(func(s *sql.Selector) { + step := newPublisherStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PublisherPermission) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PublisherPermission) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PublisherPermission) predicate.PublisherPermission { + return predicate.PublisherPermission(sql.NotPredicates(p)) +} diff --git a/ent/publisherpermission_create.go b/ent/publisherpermission_create.go new file mode 100644 index 0000000..5e912db --- /dev/null +++ b/ent/publisherpermission_create.go @@ -0,0 +1,610 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/schema" + "registry-backend/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherPermissionCreate is the builder for creating a PublisherPermission entity. +type PublisherPermissionCreate struct { + config + mutation *PublisherPermissionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetPermission sets the "permission" field. +func (ppc *PublisherPermissionCreate) SetPermission(spt schema.PublisherPermissionType) *PublisherPermissionCreate { + ppc.mutation.SetPermission(spt) + return ppc +} + +// SetUserID sets the "user_id" field. +func (ppc *PublisherPermissionCreate) SetUserID(s string) *PublisherPermissionCreate { + ppc.mutation.SetUserID(s) + return ppc +} + +// SetPublisherID sets the "publisher_id" field. +func (ppc *PublisherPermissionCreate) SetPublisherID(s string) *PublisherPermissionCreate { + ppc.mutation.SetPublisherID(s) + return ppc +} + +// SetUser sets the "user" edge to the User entity. +func (ppc *PublisherPermissionCreate) SetUser(u *User) *PublisherPermissionCreate { + return ppc.SetUserID(u.ID) +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (ppc *PublisherPermissionCreate) SetPublisher(p *Publisher) *PublisherPermissionCreate { + return ppc.SetPublisherID(p.ID) +} + +// Mutation returns the PublisherPermissionMutation object of the builder. +func (ppc *PublisherPermissionCreate) Mutation() *PublisherPermissionMutation { + return ppc.mutation +} + +// Save creates the PublisherPermission in the database. +func (ppc *PublisherPermissionCreate) Save(ctx context.Context) (*PublisherPermission, error) { + return withHooks(ctx, ppc.sqlSave, ppc.mutation, ppc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (ppc *PublisherPermissionCreate) SaveX(ctx context.Context) *PublisherPermission { + v, err := ppc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ppc *PublisherPermissionCreate) Exec(ctx context.Context) error { + _, err := ppc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppc *PublisherPermissionCreate) ExecX(ctx context.Context) { + if err := ppc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ppc *PublisherPermissionCreate) check() error { + if _, ok := ppc.mutation.Permission(); !ok { + return &ValidationError{Name: "permission", err: errors.New(`ent: missing required field "PublisherPermission.permission"`)} + } + if v, ok := ppc.mutation.Permission(); ok { + if err := publisherpermission.PermissionValidator(v); err != nil { + return &ValidationError{Name: "permission", err: fmt.Errorf(`ent: validator failed for field "PublisherPermission.permission": %w`, err)} + } + } + if _, ok := ppc.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "PublisherPermission.user_id"`)} + } + if _, ok := ppc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher_id", err: errors.New(`ent: missing required field "PublisherPermission.publisher_id"`)} + } + if _, ok := ppc.mutation.UserID(); !ok { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PublisherPermission.user"`)} + } + if _, ok := ppc.mutation.PublisherID(); !ok { + return &ValidationError{Name: "publisher", err: errors.New(`ent: missing required edge "PublisherPermission.publisher"`)} + } + return nil +} + +func (ppc *PublisherPermissionCreate) sqlSave(ctx context.Context) (*PublisherPermission, error) { + if err := ppc.check(); err != nil { + return nil, err + } + _node, _spec := ppc.createSpec() + if err := sqlgraph.CreateNode(ctx, ppc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + ppc.mutation.id = &_node.ID + ppc.mutation.done = true + return _node, nil +} + +func (ppc *PublisherPermissionCreate) createSpec() (*PublisherPermission, *sqlgraph.CreateSpec) { + var ( + _node = &PublisherPermission{config: ppc.config} + _spec = sqlgraph.NewCreateSpec(publisherpermission.Table, sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt)) + ) + _spec.OnConflict = ppc.conflict + if value, ok := ppc.mutation.Permission(); ok { + _spec.SetField(publisherpermission.FieldPermission, field.TypeEnum, value) + _node.Permission = value + } + if nodes := ppc.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.UserTable, + Columns: []string{publisherpermission.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ppc.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.PublisherTable, + Columns: []string{publisherpermission.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.PublisherID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PublisherPermission.Create(). +// SetPermission(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PublisherPermissionUpsert) { +// SetPermission(v+v). +// }). +// Exec(ctx) +func (ppc *PublisherPermissionCreate) OnConflict(opts ...sql.ConflictOption) *PublisherPermissionUpsertOne { + ppc.conflict = opts + return &PublisherPermissionUpsertOne{ + create: ppc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ppc *PublisherPermissionCreate) OnConflictColumns(columns ...string) *PublisherPermissionUpsertOne { + ppc.conflict = append(ppc.conflict, sql.ConflictColumns(columns...)) + return &PublisherPermissionUpsertOne{ + create: ppc, + } +} + +type ( + // PublisherPermissionUpsertOne is the builder for "upsert"-ing + // one PublisherPermission node. + PublisherPermissionUpsertOne struct { + create *PublisherPermissionCreate + } + + // PublisherPermissionUpsert is the "OnConflict" setter. + PublisherPermissionUpsert struct { + *sql.UpdateSet + } +) + +// SetPermission sets the "permission" field. +func (u *PublisherPermissionUpsert) SetPermission(v schema.PublisherPermissionType) *PublisherPermissionUpsert { + u.Set(publisherpermission.FieldPermission, v) + return u +} + +// UpdatePermission sets the "permission" field to the value that was provided on create. +func (u *PublisherPermissionUpsert) UpdatePermission() *PublisherPermissionUpsert { + u.SetExcluded(publisherpermission.FieldPermission) + return u +} + +// SetUserID sets the "user_id" field. +func (u *PublisherPermissionUpsert) SetUserID(v string) *PublisherPermissionUpsert { + u.Set(publisherpermission.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsert) UpdateUserID() *PublisherPermissionUpsert { + u.SetExcluded(publisherpermission.FieldUserID) + return u +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PublisherPermissionUpsert) SetPublisherID(v string) *PublisherPermissionUpsert { + u.Set(publisherpermission.FieldPublisherID, v) + return u +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsert) UpdatePublisherID() *PublisherPermissionUpsert { + u.SetExcluded(publisherpermission.FieldPublisherID) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PublisherPermissionUpsertOne) UpdateNewValues() *PublisherPermissionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PublisherPermissionUpsertOne) Ignore() *PublisherPermissionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PublisherPermissionUpsertOne) DoNothing() *PublisherPermissionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PublisherPermissionCreate.OnConflict +// documentation for more info. +func (u *PublisherPermissionUpsertOne) Update(set func(*PublisherPermissionUpsert)) *PublisherPermissionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PublisherPermissionUpsert{UpdateSet: update}) + })) + return u +} + +// SetPermission sets the "permission" field. +func (u *PublisherPermissionUpsertOne) SetPermission(v schema.PublisherPermissionType) *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetPermission(v) + }) +} + +// UpdatePermission sets the "permission" field to the value that was provided on create. +func (u *PublisherPermissionUpsertOne) UpdatePermission() *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdatePermission() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PublisherPermissionUpsertOne) SetUserID(v string) *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsertOne) UpdateUserID() *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdateUserID() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PublisherPermissionUpsertOne) SetPublisherID(v string) *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsertOne) UpdatePublisherID() *PublisherPermissionUpsertOne { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdatePublisherID() + }) +} + +// Exec executes the query. +func (u *PublisherPermissionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PublisherPermissionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PublisherPermissionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PublisherPermissionUpsertOne) ID(ctx context.Context) (id int, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PublisherPermissionUpsertOne) IDX(ctx context.Context) int { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PublisherPermissionCreateBulk is the builder for creating many PublisherPermission entities in bulk. +type PublisherPermissionCreateBulk struct { + config + err error + builders []*PublisherPermissionCreate + conflict []sql.ConflictOption +} + +// Save creates the PublisherPermission entities in the database. +func (ppcb *PublisherPermissionCreateBulk) Save(ctx context.Context) ([]*PublisherPermission, error) { + if ppcb.err != nil { + return nil, ppcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ppcb.builders)) + nodes := make([]*PublisherPermission, len(ppcb.builders)) + mutators := make([]Mutator, len(ppcb.builders)) + for i := range ppcb.builders { + func(i int, root context.Context) { + builder := ppcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PublisherPermissionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ppcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ppcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ppcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ppcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ppcb *PublisherPermissionCreateBulk) SaveX(ctx context.Context) []*PublisherPermission { + v, err := ppcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ppcb *PublisherPermissionCreateBulk) Exec(ctx context.Context) error { + _, err := ppcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppcb *PublisherPermissionCreateBulk) ExecX(ctx context.Context) { + if err := ppcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PublisherPermission.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PublisherPermissionUpsert) { +// SetPermission(v+v). +// }). +// Exec(ctx) +func (ppcb *PublisherPermissionCreateBulk) OnConflict(opts ...sql.ConflictOption) *PublisherPermissionUpsertBulk { + ppcb.conflict = opts + return &PublisherPermissionUpsertBulk{ + create: ppcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ppcb *PublisherPermissionCreateBulk) OnConflictColumns(columns ...string) *PublisherPermissionUpsertBulk { + ppcb.conflict = append(ppcb.conflict, sql.ConflictColumns(columns...)) + return &PublisherPermissionUpsertBulk{ + create: ppcb, + } +} + +// PublisherPermissionUpsertBulk is the builder for "upsert"-ing +// a bulk of PublisherPermission nodes. +type PublisherPermissionUpsertBulk struct { + create *PublisherPermissionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PublisherPermissionUpsertBulk) UpdateNewValues() *PublisherPermissionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PublisherPermission.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PublisherPermissionUpsertBulk) Ignore() *PublisherPermissionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PublisherPermissionUpsertBulk) DoNothing() *PublisherPermissionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PublisherPermissionCreateBulk.OnConflict +// documentation for more info. +func (u *PublisherPermissionUpsertBulk) Update(set func(*PublisherPermissionUpsert)) *PublisherPermissionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PublisherPermissionUpsert{UpdateSet: update}) + })) + return u +} + +// SetPermission sets the "permission" field. +func (u *PublisherPermissionUpsertBulk) SetPermission(v schema.PublisherPermissionType) *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetPermission(v) + }) +} + +// UpdatePermission sets the "permission" field to the value that was provided on create. +func (u *PublisherPermissionUpsertBulk) UpdatePermission() *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdatePermission() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PublisherPermissionUpsertBulk) SetUserID(v string) *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsertBulk) UpdateUserID() *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdateUserID() + }) +} + +// SetPublisherID sets the "publisher_id" field. +func (u *PublisherPermissionUpsertBulk) SetPublisherID(v string) *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.SetPublisherID(v) + }) +} + +// UpdatePublisherID sets the "publisher_id" field to the value that was provided on create. +func (u *PublisherPermissionUpsertBulk) UpdatePublisherID() *PublisherPermissionUpsertBulk { + return u.Update(func(s *PublisherPermissionUpsert) { + s.UpdatePublisherID() + }) +} + +// Exec executes the query. +func (u *PublisherPermissionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PublisherPermissionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PublisherPermissionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PublisherPermissionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/publisherpermission_delete.go b/ent/publisherpermission_delete.go new file mode 100644 index 0000000..b5796a1 --- /dev/null +++ b/ent/publisherpermission_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/predicate" + "registry-backend/ent/publisherpermission" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherPermissionDelete is the builder for deleting a PublisherPermission entity. +type PublisherPermissionDelete struct { + config + hooks []Hook + mutation *PublisherPermissionMutation +} + +// Where appends a list predicates to the PublisherPermissionDelete builder. +func (ppd *PublisherPermissionDelete) Where(ps ...predicate.PublisherPermission) *PublisherPermissionDelete { + ppd.mutation.Where(ps...) + return ppd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ppd *PublisherPermissionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ppd.sqlExec, ppd.mutation, ppd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppd *PublisherPermissionDelete) ExecX(ctx context.Context) int { + n, err := ppd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ppd *PublisherPermissionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(publisherpermission.Table, sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt)) + if ps := ppd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ppd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ppd.mutation.done = true + return affected, err +} + +// PublisherPermissionDeleteOne is the builder for deleting a single PublisherPermission entity. +type PublisherPermissionDeleteOne struct { + ppd *PublisherPermissionDelete +} + +// Where appends a list predicates to the PublisherPermissionDelete builder. +func (ppdo *PublisherPermissionDeleteOne) Where(ps ...predicate.PublisherPermission) *PublisherPermissionDeleteOne { + ppdo.ppd.mutation.Where(ps...) + return ppdo +} + +// Exec executes the deletion query. +func (ppdo *PublisherPermissionDeleteOne) Exec(ctx context.Context) error { + n, err := ppdo.ppd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{publisherpermission.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppdo *PublisherPermissionDeleteOne) ExecX(ctx context.Context) { + if err := ppdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/publisherpermission_query.go b/ent/publisherpermission_query.go new file mode 100644 index 0000000..00e51d0 --- /dev/null +++ b/ent/publisherpermission_query.go @@ -0,0 +1,717 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/user" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherPermissionQuery is the builder for querying PublisherPermission entities. +type PublisherPermissionQuery struct { + config + ctx *QueryContext + order []publisherpermission.OrderOption + inters []Interceptor + predicates []predicate.PublisherPermission + withUser *UserQuery + withPublisher *PublisherQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PublisherPermissionQuery builder. +func (ppq *PublisherPermissionQuery) Where(ps ...predicate.PublisherPermission) *PublisherPermissionQuery { + ppq.predicates = append(ppq.predicates, ps...) + return ppq +} + +// Limit the number of records to be returned by this query. +func (ppq *PublisherPermissionQuery) Limit(limit int) *PublisherPermissionQuery { + ppq.ctx.Limit = &limit + return ppq +} + +// Offset to start from. +func (ppq *PublisherPermissionQuery) Offset(offset int) *PublisherPermissionQuery { + ppq.ctx.Offset = &offset + return ppq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (ppq *PublisherPermissionQuery) Unique(unique bool) *PublisherPermissionQuery { + ppq.ctx.Unique = &unique + return ppq +} + +// Order specifies how the records should be ordered. +func (ppq *PublisherPermissionQuery) Order(o ...publisherpermission.OrderOption) *PublisherPermissionQuery { + ppq.order = append(ppq.order, o...) + return ppq +} + +// QueryUser chains the current query on the "user" edge. +func (ppq *PublisherPermissionQuery) QueryUser() *UserQuery { + query := (&UserClient{config: ppq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := ppq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := ppq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(publisherpermission.Table, publisherpermission.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, publisherpermission.UserTable, publisherpermission.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(ppq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryPublisher chains the current query on the "publisher" edge. +func (ppq *PublisherPermissionQuery) QueryPublisher() *PublisherQuery { + query := (&PublisherClient{config: ppq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := ppq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := ppq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(publisherpermission.Table, publisherpermission.FieldID, selector), + sqlgraph.To(publisher.Table, publisher.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, publisherpermission.PublisherTable, publisherpermission.PublisherColumn), + ) + fromU = sqlgraph.SetNeighbors(ppq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PublisherPermission entity from the query. +// Returns a *NotFoundError when no PublisherPermission was found. +func (ppq *PublisherPermissionQuery) First(ctx context.Context) (*PublisherPermission, error) { + nodes, err := ppq.Limit(1).All(setContextOp(ctx, ppq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{publisherpermission.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) FirstX(ctx context.Context) *PublisherPermission { + node, err := ppq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PublisherPermission ID from the query. +// Returns a *NotFoundError when no PublisherPermission ID was found. +func (ppq *PublisherPermissionQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = ppq.Limit(1).IDs(setContextOp(ctx, ppq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{publisherpermission.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) FirstIDX(ctx context.Context) int { + id, err := ppq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PublisherPermission entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PublisherPermission entity is found. +// Returns a *NotFoundError when no PublisherPermission entities are found. +func (ppq *PublisherPermissionQuery) Only(ctx context.Context) (*PublisherPermission, error) { + nodes, err := ppq.Limit(2).All(setContextOp(ctx, ppq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{publisherpermission.Label} + default: + return nil, &NotSingularError{publisherpermission.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) OnlyX(ctx context.Context) *PublisherPermission { + node, err := ppq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PublisherPermission ID in the query. +// Returns a *NotSingularError when more than one PublisherPermission ID is found. +// Returns a *NotFoundError when no entities are found. +func (ppq *PublisherPermissionQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = ppq.Limit(2).IDs(setContextOp(ctx, ppq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{publisherpermission.Label} + default: + err = &NotSingularError{publisherpermission.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) OnlyIDX(ctx context.Context) int { + id, err := ppq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PublisherPermissions. +func (ppq *PublisherPermissionQuery) All(ctx context.Context) ([]*PublisherPermission, error) { + ctx = setContextOp(ctx, ppq.ctx, "All") + if err := ppq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PublisherPermission, *PublisherPermissionQuery]() + return withInterceptors[[]*PublisherPermission](ctx, ppq, qr, ppq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) AllX(ctx context.Context) []*PublisherPermission { + nodes, err := ppq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PublisherPermission IDs. +func (ppq *PublisherPermissionQuery) IDs(ctx context.Context) (ids []int, err error) { + if ppq.ctx.Unique == nil && ppq.path != nil { + ppq.Unique(true) + } + ctx = setContextOp(ctx, ppq.ctx, "IDs") + if err = ppq.Select(publisherpermission.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) IDsX(ctx context.Context) []int { + ids, err := ppq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (ppq *PublisherPermissionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, ppq.ctx, "Count") + if err := ppq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, ppq, querierCount[*PublisherPermissionQuery](), ppq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) CountX(ctx context.Context) int { + count, err := ppq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (ppq *PublisherPermissionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, ppq.ctx, "Exist") + switch _, err := ppq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (ppq *PublisherPermissionQuery) ExistX(ctx context.Context) bool { + exist, err := ppq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PublisherPermissionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (ppq *PublisherPermissionQuery) Clone() *PublisherPermissionQuery { + if ppq == nil { + return nil + } + return &PublisherPermissionQuery{ + config: ppq.config, + ctx: ppq.ctx.Clone(), + order: append([]publisherpermission.OrderOption{}, ppq.order...), + inters: append([]Interceptor{}, ppq.inters...), + predicates: append([]predicate.PublisherPermission{}, ppq.predicates...), + withUser: ppq.withUser.Clone(), + withPublisher: ppq.withPublisher.Clone(), + // clone intermediate query. + sql: ppq.sql.Clone(), + path: ppq.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (ppq *PublisherPermissionQuery) WithUser(opts ...func(*UserQuery)) *PublisherPermissionQuery { + query := (&UserClient{config: ppq.config}).Query() + for _, opt := range opts { + opt(query) + } + ppq.withUser = query + return ppq +} + +// WithPublisher tells the query-builder to eager-load the nodes that are connected to +// the "publisher" edge. The optional arguments are used to configure the query builder of the edge. +func (ppq *PublisherPermissionQuery) WithPublisher(opts ...func(*PublisherQuery)) *PublisherPermissionQuery { + query := (&PublisherClient{config: ppq.config}).Query() + for _, opt := range opts { + opt(query) + } + ppq.withPublisher = query + return ppq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Permission schema.PublisherPermissionType `json:"permission,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PublisherPermission.Query(). +// GroupBy(publisherpermission.FieldPermission). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (ppq *PublisherPermissionQuery) GroupBy(field string, fields ...string) *PublisherPermissionGroupBy { + ppq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PublisherPermissionGroupBy{build: ppq} + grbuild.flds = &ppq.ctx.Fields + grbuild.label = publisherpermission.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Permission schema.PublisherPermissionType `json:"permission,omitempty"` +// } +// +// client.PublisherPermission.Query(). +// Select(publisherpermission.FieldPermission). +// Scan(ctx, &v) +func (ppq *PublisherPermissionQuery) Select(fields ...string) *PublisherPermissionSelect { + ppq.ctx.Fields = append(ppq.ctx.Fields, fields...) + sbuild := &PublisherPermissionSelect{PublisherPermissionQuery: ppq} + sbuild.label = publisherpermission.Label + sbuild.flds, sbuild.scan = &ppq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PublisherPermissionSelect configured with the given aggregations. +func (ppq *PublisherPermissionQuery) Aggregate(fns ...AggregateFunc) *PublisherPermissionSelect { + return ppq.Select().Aggregate(fns...) +} + +func (ppq *PublisherPermissionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range ppq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, ppq); err != nil { + return err + } + } + } + for _, f := range ppq.ctx.Fields { + if !publisherpermission.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if ppq.path != nil { + prev, err := ppq.path(ctx) + if err != nil { + return err + } + ppq.sql = prev + } + return nil +} + +func (ppq *PublisherPermissionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PublisherPermission, error) { + var ( + nodes = []*PublisherPermission{} + _spec = ppq.querySpec() + loadedTypes = [2]bool{ + ppq.withUser != nil, + ppq.withPublisher != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PublisherPermission).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PublisherPermission{config: ppq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(ppq.modifiers) > 0 { + _spec.Modifiers = ppq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, ppq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := ppq.withUser; query != nil { + if err := ppq.loadUser(ctx, query, nodes, nil, + func(n *PublisherPermission, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := ppq.withPublisher; query != nil { + if err := ppq.loadPublisher(ctx, query, nodes, nil, + func(n *PublisherPermission, e *Publisher) { n.Edges.Publisher = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (ppq *PublisherPermissionQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PublisherPermission, init func(*PublisherPermission), assign func(*PublisherPermission, *User)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*PublisherPermission) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (ppq *PublisherPermissionQuery) loadPublisher(ctx context.Context, query *PublisherQuery, nodes []*PublisherPermission, init func(*PublisherPermission), assign func(*PublisherPermission, *Publisher)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*PublisherPermission) + for i := range nodes { + fk := nodes[i].PublisherID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(publisher.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "publisher_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (ppq *PublisherPermissionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := ppq.querySpec() + if len(ppq.modifiers) > 0 { + _spec.Modifiers = ppq.modifiers + } + _spec.Node.Columns = ppq.ctx.Fields + if len(ppq.ctx.Fields) > 0 { + _spec.Unique = ppq.ctx.Unique != nil && *ppq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, ppq.driver, _spec) +} + +func (ppq *PublisherPermissionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(publisherpermission.Table, publisherpermission.Columns, sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt)) + _spec.From = ppq.sql + if unique := ppq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if ppq.path != nil { + _spec.Unique = true + } + if fields := ppq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, publisherpermission.FieldID) + for i := range fields { + if fields[i] != publisherpermission.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if ppq.withUser != nil { + _spec.Node.AddColumnOnce(publisherpermission.FieldUserID) + } + if ppq.withPublisher != nil { + _spec.Node.AddColumnOnce(publisherpermission.FieldPublisherID) + } + } + if ps := ppq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := ppq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := ppq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := ppq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (ppq *PublisherPermissionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(ppq.driver.Dialect()) + t1 := builder.Table(publisherpermission.Table) + columns := ppq.ctx.Fields + if len(columns) == 0 { + columns = publisherpermission.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if ppq.sql != nil { + selector = ppq.sql + selector.Select(selector.Columns(columns...)...) + } + if ppq.ctx.Unique != nil && *ppq.ctx.Unique { + selector.Distinct() + } + for _, m := range ppq.modifiers { + m(selector) + } + for _, p := range ppq.predicates { + p(selector) + } + for _, p := range ppq.order { + p(selector) + } + if offset := ppq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := ppq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (ppq *PublisherPermissionQuery) ForUpdate(opts ...sql.LockOption) *PublisherPermissionQuery { + if ppq.driver.Dialect() == dialect.Postgres { + ppq.Unique(false) + } + ppq.modifiers = append(ppq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return ppq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (ppq *PublisherPermissionQuery) ForShare(opts ...sql.LockOption) *PublisherPermissionQuery { + if ppq.driver.Dialect() == dialect.Postgres { + ppq.Unique(false) + } + ppq.modifiers = append(ppq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return ppq +} + +// PublisherPermissionGroupBy is the group-by builder for PublisherPermission entities. +type PublisherPermissionGroupBy struct { + selector + build *PublisherPermissionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ppgb *PublisherPermissionGroupBy) Aggregate(fns ...AggregateFunc) *PublisherPermissionGroupBy { + ppgb.fns = append(ppgb.fns, fns...) + return ppgb +} + +// Scan applies the selector query and scans the result into the given value. +func (ppgb *PublisherPermissionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ppgb.build.ctx, "GroupBy") + if err := ppgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PublisherPermissionQuery, *PublisherPermissionGroupBy](ctx, ppgb.build, ppgb, ppgb.build.inters, v) +} + +func (ppgb *PublisherPermissionGroupBy) sqlScan(ctx context.Context, root *PublisherPermissionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ppgb.fns)) + for _, fn := range ppgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ppgb.flds)+len(ppgb.fns)) + for _, f := range *ppgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ppgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ppgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PublisherPermissionSelect is the builder for selecting fields of PublisherPermission entities. +type PublisherPermissionSelect struct { + *PublisherPermissionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pps *PublisherPermissionSelect) Aggregate(fns ...AggregateFunc) *PublisherPermissionSelect { + pps.fns = append(pps.fns, fns...) + return pps +} + +// Scan applies the selector query and scans the result into the given value. +func (pps *PublisherPermissionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pps.ctx, "Select") + if err := pps.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PublisherPermissionQuery, *PublisherPermissionSelect](ctx, pps.PublisherPermissionQuery, pps, pps.inters, v) +} + +func (pps *PublisherPermissionSelect) sqlScan(ctx context.Context, root *PublisherPermissionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pps.fns)) + for _, fn := range pps.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*pps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pps.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/publisherpermission_update.go b/ent/publisherpermission_update.go new file mode 100644 index 0000000..5610f42 --- /dev/null +++ b/ent/publisherpermission_update.go @@ -0,0 +1,466 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/predicate" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/schema" + "registry-backend/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PublisherPermissionUpdate is the builder for updating PublisherPermission entities. +type PublisherPermissionUpdate struct { + config + hooks []Hook + mutation *PublisherPermissionMutation +} + +// Where appends a list predicates to the PublisherPermissionUpdate builder. +func (ppu *PublisherPermissionUpdate) Where(ps ...predicate.PublisherPermission) *PublisherPermissionUpdate { + ppu.mutation.Where(ps...) + return ppu +} + +// SetPermission sets the "permission" field. +func (ppu *PublisherPermissionUpdate) SetPermission(spt schema.PublisherPermissionType) *PublisherPermissionUpdate { + ppu.mutation.SetPermission(spt) + return ppu +} + +// SetNillablePermission sets the "permission" field if the given value is not nil. +func (ppu *PublisherPermissionUpdate) SetNillablePermission(spt *schema.PublisherPermissionType) *PublisherPermissionUpdate { + if spt != nil { + ppu.SetPermission(*spt) + } + return ppu +} + +// SetUserID sets the "user_id" field. +func (ppu *PublisherPermissionUpdate) SetUserID(s string) *PublisherPermissionUpdate { + ppu.mutation.SetUserID(s) + return ppu +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (ppu *PublisherPermissionUpdate) SetNillableUserID(s *string) *PublisherPermissionUpdate { + if s != nil { + ppu.SetUserID(*s) + } + return ppu +} + +// SetPublisherID sets the "publisher_id" field. +func (ppu *PublisherPermissionUpdate) SetPublisherID(s string) *PublisherPermissionUpdate { + ppu.mutation.SetPublisherID(s) + return ppu +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (ppu *PublisherPermissionUpdate) SetNillablePublisherID(s *string) *PublisherPermissionUpdate { + if s != nil { + ppu.SetPublisherID(*s) + } + return ppu +} + +// SetUser sets the "user" edge to the User entity. +func (ppu *PublisherPermissionUpdate) SetUser(u *User) *PublisherPermissionUpdate { + return ppu.SetUserID(u.ID) +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (ppu *PublisherPermissionUpdate) SetPublisher(p *Publisher) *PublisherPermissionUpdate { + return ppu.SetPublisherID(p.ID) +} + +// Mutation returns the PublisherPermissionMutation object of the builder. +func (ppu *PublisherPermissionUpdate) Mutation() *PublisherPermissionMutation { + return ppu.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (ppu *PublisherPermissionUpdate) ClearUser() *PublisherPermissionUpdate { + ppu.mutation.ClearUser() + return ppu +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (ppu *PublisherPermissionUpdate) ClearPublisher() *PublisherPermissionUpdate { + ppu.mutation.ClearPublisher() + return ppu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (ppu *PublisherPermissionUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, ppu.sqlSave, ppu.mutation, ppu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ppu *PublisherPermissionUpdate) SaveX(ctx context.Context) int { + affected, err := ppu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (ppu *PublisherPermissionUpdate) Exec(ctx context.Context) error { + _, err := ppu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppu *PublisherPermissionUpdate) ExecX(ctx context.Context) { + if err := ppu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ppu *PublisherPermissionUpdate) check() error { + if v, ok := ppu.mutation.Permission(); ok { + if err := publisherpermission.PermissionValidator(v); err != nil { + return &ValidationError{Name: "permission", err: fmt.Errorf(`ent: validator failed for field "PublisherPermission.permission": %w`, err)} + } + } + if _, ok := ppu.mutation.UserID(); ppu.mutation.UserCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PublisherPermission.user"`) + } + if _, ok := ppu.mutation.PublisherID(); ppu.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PublisherPermission.publisher"`) + } + return nil +} + +func (ppu *PublisherPermissionUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := ppu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(publisherpermission.Table, publisherpermission.Columns, sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt)) + if ps := ppu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ppu.mutation.Permission(); ok { + _spec.SetField(publisherpermission.FieldPermission, field.TypeEnum, value) + } + if ppu.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.UserTable, + Columns: []string{publisherpermission.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ppu.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.UserTable, + Columns: []string{publisherpermission.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if ppu.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.PublisherTable, + Columns: []string{publisherpermission.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ppu.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.PublisherTable, + Columns: []string{publisherpermission.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, ppu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{publisherpermission.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + ppu.mutation.done = true + return n, nil +} + +// PublisherPermissionUpdateOne is the builder for updating a single PublisherPermission entity. +type PublisherPermissionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PublisherPermissionMutation +} + +// SetPermission sets the "permission" field. +func (ppuo *PublisherPermissionUpdateOne) SetPermission(spt schema.PublisherPermissionType) *PublisherPermissionUpdateOne { + ppuo.mutation.SetPermission(spt) + return ppuo +} + +// SetNillablePermission sets the "permission" field if the given value is not nil. +func (ppuo *PublisherPermissionUpdateOne) SetNillablePermission(spt *schema.PublisherPermissionType) *PublisherPermissionUpdateOne { + if spt != nil { + ppuo.SetPermission(*spt) + } + return ppuo +} + +// SetUserID sets the "user_id" field. +func (ppuo *PublisherPermissionUpdateOne) SetUserID(s string) *PublisherPermissionUpdateOne { + ppuo.mutation.SetUserID(s) + return ppuo +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (ppuo *PublisherPermissionUpdateOne) SetNillableUserID(s *string) *PublisherPermissionUpdateOne { + if s != nil { + ppuo.SetUserID(*s) + } + return ppuo +} + +// SetPublisherID sets the "publisher_id" field. +func (ppuo *PublisherPermissionUpdateOne) SetPublisherID(s string) *PublisherPermissionUpdateOne { + ppuo.mutation.SetPublisherID(s) + return ppuo +} + +// SetNillablePublisherID sets the "publisher_id" field if the given value is not nil. +func (ppuo *PublisherPermissionUpdateOne) SetNillablePublisherID(s *string) *PublisherPermissionUpdateOne { + if s != nil { + ppuo.SetPublisherID(*s) + } + return ppuo +} + +// SetUser sets the "user" edge to the User entity. +func (ppuo *PublisherPermissionUpdateOne) SetUser(u *User) *PublisherPermissionUpdateOne { + return ppuo.SetUserID(u.ID) +} + +// SetPublisher sets the "publisher" edge to the Publisher entity. +func (ppuo *PublisherPermissionUpdateOne) SetPublisher(p *Publisher) *PublisherPermissionUpdateOne { + return ppuo.SetPublisherID(p.ID) +} + +// Mutation returns the PublisherPermissionMutation object of the builder. +func (ppuo *PublisherPermissionUpdateOne) Mutation() *PublisherPermissionMutation { + return ppuo.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (ppuo *PublisherPermissionUpdateOne) ClearUser() *PublisherPermissionUpdateOne { + ppuo.mutation.ClearUser() + return ppuo +} + +// ClearPublisher clears the "publisher" edge to the Publisher entity. +func (ppuo *PublisherPermissionUpdateOne) ClearPublisher() *PublisherPermissionUpdateOne { + ppuo.mutation.ClearPublisher() + return ppuo +} + +// Where appends a list predicates to the PublisherPermissionUpdate builder. +func (ppuo *PublisherPermissionUpdateOne) Where(ps ...predicate.PublisherPermission) *PublisherPermissionUpdateOne { + ppuo.mutation.Where(ps...) + return ppuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (ppuo *PublisherPermissionUpdateOne) Select(field string, fields ...string) *PublisherPermissionUpdateOne { + ppuo.fields = append([]string{field}, fields...) + return ppuo +} + +// Save executes the query and returns the updated PublisherPermission entity. +func (ppuo *PublisherPermissionUpdateOne) Save(ctx context.Context) (*PublisherPermission, error) { + return withHooks(ctx, ppuo.sqlSave, ppuo.mutation, ppuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ppuo *PublisherPermissionUpdateOne) SaveX(ctx context.Context) *PublisherPermission { + node, err := ppuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (ppuo *PublisherPermissionUpdateOne) Exec(ctx context.Context) error { + _, err := ppuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ppuo *PublisherPermissionUpdateOne) ExecX(ctx context.Context) { + if err := ppuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ppuo *PublisherPermissionUpdateOne) check() error { + if v, ok := ppuo.mutation.Permission(); ok { + if err := publisherpermission.PermissionValidator(v); err != nil { + return &ValidationError{Name: "permission", err: fmt.Errorf(`ent: validator failed for field "PublisherPermission.permission": %w`, err)} + } + } + if _, ok := ppuo.mutation.UserID(); ppuo.mutation.UserCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PublisherPermission.user"`) + } + if _, ok := ppuo.mutation.PublisherID(); ppuo.mutation.PublisherCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "PublisherPermission.publisher"`) + } + return nil +} + +func (ppuo *PublisherPermissionUpdateOne) sqlSave(ctx context.Context) (_node *PublisherPermission, err error) { + if err := ppuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(publisherpermission.Table, publisherpermission.Columns, sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt)) + id, ok := ppuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PublisherPermission.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := ppuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, publisherpermission.FieldID) + for _, f := range fields { + if !publisherpermission.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != publisherpermission.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := ppuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ppuo.mutation.Permission(); ok { + _spec.SetField(publisherpermission.FieldPermission, field.TypeEnum, value) + } + if ppuo.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.UserTable, + Columns: []string{publisherpermission.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ppuo.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.UserTable, + Columns: []string{publisherpermission.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if ppuo.mutation.PublisherCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.PublisherTable, + Columns: []string{publisherpermission.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ppuo.mutation.PublisherIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: publisherpermission.PublisherTable, + Columns: []string{publisherpermission.PublisherColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisher.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PublisherPermission{config: ppuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, ppuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{publisherpermission.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + ppuo.mutation.done = true + return _node, nil +} diff --git a/ent/runtime.go b/ent/runtime.go new file mode 100644 index 0000000..63e4103 --- /dev/null +++ b/ent/runtime.go @@ -0,0 +1,196 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "registry-backend/ent/schema" + "registry-backend/ent/storagefile" + "registry-backend/ent/user" + "time" + + "github.com/google/uuid" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + ciworkflowresultMixin := schema.CIWorkflowResult{}.Mixin() + ciworkflowresultMixinFields0 := ciworkflowresultMixin[0].Fields() + _ = ciworkflowresultMixinFields0 + ciworkflowresultFields := schema.CIWorkflowResult{}.Fields() + _ = ciworkflowresultFields + // ciworkflowresultDescCreateTime is the schema descriptor for create_time field. + ciworkflowresultDescCreateTime := ciworkflowresultMixinFields0[0].Descriptor() + // ciworkflowresult.DefaultCreateTime holds the default value on creation for the create_time field. + ciworkflowresult.DefaultCreateTime = ciworkflowresultDescCreateTime.Default.(func() time.Time) + // ciworkflowresultDescUpdateTime is the schema descriptor for update_time field. + ciworkflowresultDescUpdateTime := ciworkflowresultMixinFields0[1].Descriptor() + // ciworkflowresult.DefaultUpdateTime holds the default value on creation for the update_time field. + ciworkflowresult.DefaultUpdateTime = ciworkflowresultDescUpdateTime.Default.(func() time.Time) + // ciworkflowresult.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + ciworkflowresult.UpdateDefaultUpdateTime = ciworkflowresultDescUpdateTime.UpdateDefault.(func() time.Time) + // ciworkflowresultDescID is the schema descriptor for id field. + ciworkflowresultDescID := ciworkflowresultFields[0].Descriptor() + // ciworkflowresult.DefaultID holds the default value on creation for the id field. + ciworkflowresult.DefaultID = ciworkflowresultDescID.Default.(func() uuid.UUID) + gitcommitMixin := schema.GitCommit{}.Mixin() + gitcommitMixinFields0 := gitcommitMixin[0].Fields() + _ = gitcommitMixinFields0 + gitcommitFields := schema.GitCommit{}.Fields() + _ = gitcommitFields + // gitcommitDescCreateTime is the schema descriptor for create_time field. + gitcommitDescCreateTime := gitcommitMixinFields0[0].Descriptor() + // gitcommit.DefaultCreateTime holds the default value on creation for the create_time field. + gitcommit.DefaultCreateTime = gitcommitDescCreateTime.Default.(func() time.Time) + // gitcommitDescUpdateTime is the schema descriptor for update_time field. + gitcommitDescUpdateTime := gitcommitMixinFields0[1].Descriptor() + // gitcommit.DefaultUpdateTime holds the default value on creation for the update_time field. + gitcommit.DefaultUpdateTime = gitcommitDescUpdateTime.Default.(func() time.Time) + // gitcommit.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + gitcommit.UpdateDefaultUpdateTime = gitcommitDescUpdateTime.UpdateDefault.(func() time.Time) + // gitcommitDescID is the schema descriptor for id field. + gitcommitDescID := gitcommitFields[0].Descriptor() + // gitcommit.DefaultID holds the default value on creation for the id field. + gitcommit.DefaultID = gitcommitDescID.Default.(func() uuid.UUID) + nodeMixin := schema.Node{}.Mixin() + nodeMixinFields0 := nodeMixin[0].Fields() + _ = nodeMixinFields0 + nodeFields := schema.Node{}.Fields() + _ = nodeFields + // nodeDescCreateTime is the schema descriptor for create_time field. + nodeDescCreateTime := nodeMixinFields0[0].Descriptor() + // node.DefaultCreateTime holds the default value on creation for the create_time field. + node.DefaultCreateTime = nodeDescCreateTime.Default.(func() time.Time) + // nodeDescUpdateTime is the schema descriptor for update_time field. + nodeDescUpdateTime := nodeMixinFields0[1].Descriptor() + // node.DefaultUpdateTime holds the default value on creation for the update_time field. + node.DefaultUpdateTime = nodeDescUpdateTime.Default.(func() time.Time) + // node.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + node.UpdateDefaultUpdateTime = nodeDescUpdateTime.UpdateDefault.(func() time.Time) + // nodeDescTags is the schema descriptor for tags field. + nodeDescTags := nodeFields[8].Descriptor() + // node.DefaultTags holds the default value on creation for the tags field. + node.DefaultTags = nodeDescTags.Default.([]string) + nodeversionMixin := schema.NodeVersion{}.Mixin() + nodeversionMixinFields0 := nodeversionMixin[0].Fields() + _ = nodeversionMixinFields0 + nodeversionFields := schema.NodeVersion{}.Fields() + _ = nodeversionFields + // nodeversionDescCreateTime is the schema descriptor for create_time field. + nodeversionDescCreateTime := nodeversionMixinFields0[0].Descriptor() + // nodeversion.DefaultCreateTime holds the default value on creation for the create_time field. + nodeversion.DefaultCreateTime = nodeversionDescCreateTime.Default.(func() time.Time) + // nodeversionDescUpdateTime is the schema descriptor for update_time field. + nodeversionDescUpdateTime := nodeversionMixinFields0[1].Descriptor() + // nodeversion.DefaultUpdateTime holds the default value on creation for the update_time field. + nodeversion.DefaultUpdateTime = nodeversionDescUpdateTime.Default.(func() time.Time) + // nodeversion.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + nodeversion.UpdateDefaultUpdateTime = nodeversionDescUpdateTime.UpdateDefault.(func() time.Time) + // nodeversionDescDeprecated is the schema descriptor for deprecated field. + nodeversionDescDeprecated := nodeversionFields[5].Descriptor() + // nodeversion.DefaultDeprecated holds the default value on creation for the deprecated field. + nodeversion.DefaultDeprecated = nodeversionDescDeprecated.Default.(bool) + // nodeversionDescID is the schema descriptor for id field. + nodeversionDescID := nodeversionFields[0].Descriptor() + // nodeversion.DefaultID holds the default value on creation for the id field. + nodeversion.DefaultID = nodeversionDescID.Default.(func() uuid.UUID) + personalaccesstokenMixin := schema.PersonalAccessToken{}.Mixin() + personalaccesstokenMixinFields0 := personalaccesstokenMixin[0].Fields() + _ = personalaccesstokenMixinFields0 + personalaccesstokenFields := schema.PersonalAccessToken{}.Fields() + _ = personalaccesstokenFields + // personalaccesstokenDescCreateTime is the schema descriptor for create_time field. + personalaccesstokenDescCreateTime := personalaccesstokenMixinFields0[0].Descriptor() + // personalaccesstoken.DefaultCreateTime holds the default value on creation for the create_time field. + personalaccesstoken.DefaultCreateTime = personalaccesstokenDescCreateTime.Default.(func() time.Time) + // personalaccesstokenDescUpdateTime is the schema descriptor for update_time field. + personalaccesstokenDescUpdateTime := personalaccesstokenMixinFields0[1].Descriptor() + // personalaccesstoken.DefaultUpdateTime holds the default value on creation for the update_time field. + personalaccesstoken.DefaultUpdateTime = personalaccesstokenDescUpdateTime.Default.(func() time.Time) + // personalaccesstoken.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + personalaccesstoken.UpdateDefaultUpdateTime = personalaccesstokenDescUpdateTime.UpdateDefault.(func() time.Time) + // personalaccesstokenDescID is the schema descriptor for id field. + personalaccesstokenDescID := personalaccesstokenFields[0].Descriptor() + // personalaccesstoken.DefaultID holds the default value on creation for the id field. + personalaccesstoken.DefaultID = personalaccesstokenDescID.Default.(func() uuid.UUID) + publisherMixin := schema.Publisher{}.Mixin() + publisherMixinFields0 := publisherMixin[0].Fields() + _ = publisherMixinFields0 + publisherFields := schema.Publisher{}.Fields() + _ = publisherFields + // publisherDescCreateTime is the schema descriptor for create_time field. + publisherDescCreateTime := publisherMixinFields0[0].Descriptor() + // publisher.DefaultCreateTime holds the default value on creation for the create_time field. + publisher.DefaultCreateTime = publisherDescCreateTime.Default.(func() time.Time) + // publisherDescUpdateTime is the schema descriptor for update_time field. + publisherDescUpdateTime := publisherMixinFields0[1].Descriptor() + // publisher.DefaultUpdateTime holds the default value on creation for the update_time field. + publisher.DefaultUpdateTime = publisherDescUpdateTime.Default.(func() time.Time) + // publisher.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + publisher.UpdateDefaultUpdateTime = publisherDescUpdateTime.UpdateDefault.(func() time.Time) + storagefileMixin := schema.StorageFile{}.Mixin() + storagefileMixinFields0 := storagefileMixin[0].Fields() + _ = storagefileMixinFields0 + storagefileFields := schema.StorageFile{}.Fields() + _ = storagefileFields + // storagefileDescCreateTime is the schema descriptor for create_time field. + storagefileDescCreateTime := storagefileMixinFields0[0].Descriptor() + // storagefile.DefaultCreateTime holds the default value on creation for the create_time field. + storagefile.DefaultCreateTime = storagefileDescCreateTime.Default.(func() time.Time) + // storagefileDescUpdateTime is the schema descriptor for update_time field. + storagefileDescUpdateTime := storagefileMixinFields0[1].Descriptor() + // storagefile.DefaultUpdateTime holds the default value on creation for the update_time field. + storagefile.DefaultUpdateTime = storagefileDescUpdateTime.Default.(func() time.Time) + // storagefile.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + storagefile.UpdateDefaultUpdateTime = storagefileDescUpdateTime.UpdateDefault.(func() time.Time) + // storagefileDescBucketName is the schema descriptor for bucket_name field. + storagefileDescBucketName := storagefileFields[1].Descriptor() + // storagefile.BucketNameValidator is a validator for the "bucket_name" field. It is called by the builders before save. + storagefile.BucketNameValidator = storagefileDescBucketName.Validators[0].(func(string) error) + // storagefileDescObjectName is the schema descriptor for object_name field. + storagefileDescObjectName := storagefileFields[2].Descriptor() + // storagefile.ObjectNameValidator is a validator for the "object_name" field. It is called by the builders before save. + storagefile.ObjectNameValidator = storagefileDescObjectName.Validators[0].(func(string) error) + // storagefileDescFilePath is the schema descriptor for file_path field. + storagefileDescFilePath := storagefileFields[3].Descriptor() + // storagefile.FilePathValidator is a validator for the "file_path" field. It is called by the builders before save. + storagefile.FilePathValidator = storagefileDescFilePath.Validators[0].(func(string) error) + // storagefileDescFileType is the schema descriptor for file_type field. + storagefileDescFileType := storagefileFields[4].Descriptor() + // storagefile.FileTypeValidator is a validator for the "file_type" field. It is called by the builders before save. + storagefile.FileTypeValidator = storagefileDescFileType.Validators[0].(func(string) error) + // storagefileDescID is the schema descriptor for id field. + storagefileDescID := storagefileFields[0].Descriptor() + // storagefile.DefaultID holds the default value on creation for the id field. + storagefile.DefaultID = storagefileDescID.Default.(func() uuid.UUID) + userMixin := schema.User{}.Mixin() + userMixinFields0 := userMixin[0].Fields() + _ = userMixinFields0 + userFields := schema.User{}.Fields() + _ = userFields + // userDescCreateTime is the schema descriptor for create_time field. + userDescCreateTime := userMixinFields0[0].Descriptor() + // user.DefaultCreateTime holds the default value on creation for the create_time field. + user.DefaultCreateTime = userDescCreateTime.Default.(func() time.Time) + // userDescUpdateTime is the schema descriptor for update_time field. + userDescUpdateTime := userMixinFields0[1].Descriptor() + // user.DefaultUpdateTime holds the default value on creation for the update_time field. + user.DefaultUpdateTime = userDescUpdateTime.Default.(func() time.Time) + // user.UpdateDefaultUpdateTime holds the default value on update for the update_time field. + user.UpdateDefaultUpdateTime = userDescUpdateTime.UpdateDefault.(func() time.Time) + // userDescIsApproved is the schema descriptor for is_approved field. + userDescIsApproved := userFields[3].Descriptor() + // user.DefaultIsApproved holds the default value on creation for the is_approved field. + user.DefaultIsApproved = userDescIsApproved.Default.(bool) + // userDescIsAdmin is the schema descriptor for is_admin field. + userDescIsAdmin := userFields[4].Descriptor() + // user.DefaultIsAdmin holds the default value on creation for the is_admin field. + user.DefaultIsAdmin = userDescIsAdmin.Default.(bool) +} diff --git a/ent/runtime/runtime.go b/ent/runtime/runtime.go new file mode 100644 index 0000000..df3812a --- /dev/null +++ b/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in registry-backend/ent/runtime.go + +const ( + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. +) diff --git a/ent/schema/ci_workflow_result.go b/ent/schema/ci_workflow_result.go new file mode 100644 index 0000000..3bd3f96 --- /dev/null +++ b/ent/schema/ci_workflow_result.go @@ -0,0 +1,56 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +// CIWorkflowResult holds the schema definition for the CIWorkflowResult entity. +type CIWorkflowResult struct { + ent.Schema +} + +// Fields of the CIWorkflowResult. +func (CIWorkflowResult) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}).Default(uuid.New).Unique(), + field.String("operating_system").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("gpu_type").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.String("pytorch_version").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.String("workflow_name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.String("run_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.String("status").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.Int64("start_time").Optional(), + field.Int64("end_time").Optional(), + } +} + +// Edges of the CIWorkflowResult. +func (CIWorkflowResult) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("gitcommit", GitCommit.Type).Ref("results").Unique(), + edge.To("storage_file", StorageFile.Type).Unique(), + } +} + +func (CIWorkflowResult) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} diff --git a/ent/schema/git_commit.go b/ent/schema/git_commit.go new file mode 100644 index 0000000..6bac90c --- /dev/null +++ b/ent/schema/git_commit.go @@ -0,0 +1,60 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +// GitCommit holds the schema definition for the GitCommit entity. +type GitCommit struct { + ent.Schema +} + +// Fields of the GitCommit. +func (GitCommit) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}).Default(uuid.New), + field.String("commit_hash").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("branch_name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("repo_name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("commit_message").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.Time("commit_timestamp"), + field.String("author").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.Time("timestamp").Optional(), + } +} + +func (GitCommit) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("results", CIWorkflowResult.Type), + } +} + +func (GitCommit) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} + +func (GitCommit) Indexes() []ent.Index { + // Hashes within the same repo should be unique. + return []ent.Index{ + index.Fields("repo_name", "commit_hash"). + Unique(), + } +} diff --git a/ent/schema/mixins/createdBy.go b/ent/schema/mixins/createdBy.go new file mode 100644 index 0000000..2651a43 --- /dev/null +++ b/ent/schema/mixins/createdBy.go @@ -0,0 +1,24 @@ +package dripmixins + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +// Write a ent Mixin that adds a "created_by" field to all entities. +// The field should be of type "string" and should be optional. +// The field should be named "created_by". +// The field should have a default value of "system". + +type EditedByMixin struct { + mixin.Schema +} + +func (EditedByMixin) Fields() []ent.Field { + return []ent.Field{ + field.UUID("created_by", uuid.UUID{}).Optional(), + field.UUID("updated_by", uuid.UUID{}).Optional(), + } +} diff --git a/ent/schema/node.go b/ent/schema/node.go new file mode 100644 index 0000000..43efc2b --- /dev/null +++ b/ent/schema/node.go @@ -0,0 +1,60 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// Contains information about a custom node on the Comfy Registry. + +type Node struct { + ent.Schema +} + +func (Node) Fields() []ent.Field { + return []ent.Field{ + field.String("id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Unique(), + field.String("publisher_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("description").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("author").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.String("license").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("repository_url").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("icon_url").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + field.Strings("tags").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Default([]string{}), + } +} + +func (Node) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} + +func (Node) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("publisher", Publisher.Type).Field("publisher_id").Ref("nodes").Required().Unique(), + edge.To("versions", NodeVersion.Type), + } +} diff --git a/ent/schema/node_version.go b/ent/schema/node_version.go new file mode 100644 index 0000000..26e95bd --- /dev/null +++ b/ent/schema/node_version.go @@ -0,0 +1,55 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +// Contains information about a specific version of a custom node on the Comfy Registry. + +type NodeVersion struct { + ent.Schema +} + +func (NodeVersion) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}).Default(uuid.New), + field.String("node_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("version").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Comment("Must be SemVer compliant"), + field.String("changelog").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.Strings("pip_dependencies").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.Bool("deprecated").Default(false), + } +} + +func (NodeVersion) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} + +func (NodeVersion) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("node", Node.Type).Field("node_id").Ref("versions").Required().Unique(), + edge.To("storage_file", StorageFile.Type).Unique(), + } +} + +func (NodeVersion) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("node_id", "version").Unique(), + } +} diff --git a/ent/schema/personal_access_token.go b/ent/schema/personal_access_token.go new file mode 100644 index 0000000..eb6d374 --- /dev/null +++ b/ent/schema/personal_access_token.go @@ -0,0 +1,52 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +type PersonalAccessToken struct { + ent.Schema +} + +func (PersonalAccessToken) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}).Default(uuid.New), + field.String("name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("description").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("publisher_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("token").Sensitive().Unique().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + } +} + +func (PersonalAccessToken) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("token").Unique(), + } +} + +func (PersonalAccessToken) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("publisher", Publisher.Type).Ref("personal_access_tokens"). + Field("publisher_id").Unique().Required(), + } +} + +func (PersonalAccessToken) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} diff --git a/ent/schema/publisher.go b/ent/schema/publisher.go new file mode 100644 index 0000000..95c4e98 --- /dev/null +++ b/ent/schema/publisher.go @@ -0,0 +1,54 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// Contains information about a publisher on the Comfy Registry. +type Publisher struct { + ent.Schema +} + +func (Publisher) Fields() []ent.Field { + return []ent.Field{ + field.String("id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Unique().Comment("The unique identifier of the publisher. Cannot be changed."), + field.String("name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Comment("The publicly visible name of the publisher."), + field.String("description").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("website").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("support_email").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("source_code_repo").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("logo_url").Optional().SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional(), + } +} + +func (Publisher) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} + +func (Publisher) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("publisher_permissions", PublisherPermission.Type), + edge.To("nodes", Node.Type), + edge.To("personal_access_tokens", PersonalAccessToken.Type), + } +} diff --git a/ent/schema/publisherpermission.go b/ent/schema/publisherpermission.go new file mode 100644 index 0000000..af8464a --- /dev/null +++ b/ent/schema/publisherpermission.go @@ -0,0 +1,48 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" +) + +// This type describes which user has which permission on a publisher. +type PublisherPermission struct { + ent.Schema +} + +func (PublisherPermission) Fields() []ent.Field { + return []ent.Field{ + field.Enum("permission").GoType(PublisherPermissionType("")), + field.String("user_id").StorageKey("user_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.String("publisher_id").SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + } +} + +func (PublisherPermission) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type).Ref("publisher_permissions"). + Field("user_id").Unique().Required(), + edge.From("publisher", Publisher.Type).Ref("publisher_permissions"). + Field("publisher_id").Unique().Required(), + } +} + +type PublisherPermissionType string + +const ( + PublisherPermissionTypeOwner PublisherPermissionType = "owner" + PublisherPermissionTypeMember PublisherPermissionType = "member" +) + +func (PublisherPermissionType) Values() (types []string) { + return []string{ + string(PublisherPermissionTypeOwner), + string(PublisherPermissionTypeMember), + } +} diff --git a/ent/schema/storage_file.go b/ent/schema/storage_file.go new file mode 100644 index 0000000..979a36f --- /dev/null +++ b/ent/schema/storage_file.go @@ -0,0 +1,47 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" +) + +// Holds a generic GCP Storage Object +type StorageFile struct { + ent.Schema +} + +// Fields of the StorageFile. +func (StorageFile) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}).Default(uuid.New).Unique(), + field.String("bucket_name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).NotEmpty(), + field.String("object_name").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).NotEmpty().Optional(), + field.String("file_path").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).NotEmpty(), + field.String("file_type").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).NotEmpty().Comment("e.g., image, video"), + field.String("file_url").SchemaType(map[string]string{ + dialect.Postgres: "text", + }).Optional().Comment("Publicly accessible URL of the file, if available"), + } +} + +// Edges of the StorageFile. +func (StorageFile) Edges() []ent.Edge { + return []ent.Edge{} +} + +func (StorageFile) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} diff --git a/ent/schema/user.go b/ent/schema/user.go new file mode 100644 index 0000000..67ec683 --- /dev/null +++ b/ent/schema/user.go @@ -0,0 +1,37 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// User holds the schema definition for the User entity. +type User struct { + ent.Schema +} + +// Fields of the User. +func (User) Fields() []ent.Field { + return []ent.Field{ + field.String("id").Comment("The firebase UID of the user"), + field.String("email").Optional(), + field.String("name").Optional(), + field.Bool("is_approved").Default(false).Comment("Whether the user is approved to use the platform"), + field.Bool("is_admin").Default(false).Comment("Whether the user is approved to use the platform"), + } +} + +func (User) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixin.Time{}, + } +} + +// Edges of the User. +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("publisher_permissions", PublisherPermission.Type), + } +} diff --git a/ent/storagefile.go b/ent/storagefile.go new file mode 100644 index 0000000..df85918 --- /dev/null +++ b/ent/storagefile.go @@ -0,0 +1,173 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/storagefile" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// StorageFile is the model entity for the StorageFile schema. +type StorageFile struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // BucketName holds the value of the "bucket_name" field. + BucketName string `json:"bucket_name,omitempty"` + // ObjectName holds the value of the "object_name" field. + ObjectName string `json:"object_name,omitempty"` + // FilePath holds the value of the "file_path" field. + FilePath string `json:"file_path,omitempty"` + // e.g., image, video + FileType string `json:"file_type,omitempty"` + // Publicly accessible URL of the file, if available + FileURL string `json:"file_url,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*StorageFile) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case storagefile.FieldBucketName, storagefile.FieldObjectName, storagefile.FieldFilePath, storagefile.FieldFileType, storagefile.FieldFileURL: + values[i] = new(sql.NullString) + case storagefile.FieldCreateTime, storagefile.FieldUpdateTime: + values[i] = new(sql.NullTime) + case storagefile.FieldID: + values[i] = new(uuid.UUID) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the StorageFile fields. +func (sf *StorageFile) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case storagefile.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + sf.ID = *value + } + case storagefile.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + sf.CreateTime = value.Time + } + case storagefile.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + sf.UpdateTime = value.Time + } + case storagefile.FieldBucketName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field bucket_name", values[i]) + } else if value.Valid { + sf.BucketName = value.String + } + case storagefile.FieldObjectName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field object_name", values[i]) + } else if value.Valid { + sf.ObjectName = value.String + } + case storagefile.FieldFilePath: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field file_path", values[i]) + } else if value.Valid { + sf.FilePath = value.String + } + case storagefile.FieldFileType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field file_type", values[i]) + } else if value.Valid { + sf.FileType = value.String + } + case storagefile.FieldFileURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field file_url", values[i]) + } else if value.Valid { + sf.FileURL = value.String + } + default: + sf.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the StorageFile. +// This includes values selected through modifiers, order, etc. +func (sf *StorageFile) Value(name string) (ent.Value, error) { + return sf.selectValues.Get(name) +} + +// Update returns a builder for updating this StorageFile. +// Note that you need to call StorageFile.Unwrap() before calling this method if this StorageFile +// was returned from a transaction, and the transaction was committed or rolled back. +func (sf *StorageFile) Update() *StorageFileUpdateOne { + return NewStorageFileClient(sf.config).UpdateOne(sf) +} + +// Unwrap unwraps the StorageFile entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (sf *StorageFile) Unwrap() *StorageFile { + _tx, ok := sf.config.driver.(*txDriver) + if !ok { + panic("ent: StorageFile is not a transactional entity") + } + sf.config.driver = _tx.drv + return sf +} + +// String implements the fmt.Stringer. +func (sf *StorageFile) String() string { + var builder strings.Builder + builder.WriteString("StorageFile(") + builder.WriteString(fmt.Sprintf("id=%v, ", sf.ID)) + builder.WriteString("create_time=") + builder.WriteString(sf.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(sf.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("bucket_name=") + builder.WriteString(sf.BucketName) + builder.WriteString(", ") + builder.WriteString("object_name=") + builder.WriteString(sf.ObjectName) + builder.WriteString(", ") + builder.WriteString("file_path=") + builder.WriteString(sf.FilePath) + builder.WriteString(", ") + builder.WriteString("file_type=") + builder.WriteString(sf.FileType) + builder.WriteString(", ") + builder.WriteString("file_url=") + builder.WriteString(sf.FileURL) + builder.WriteByte(')') + return builder.String() +} + +// StorageFiles is a parsable slice of StorageFile. +type StorageFiles []*StorageFile diff --git a/ent/storagefile/storagefile.go b/ent/storagefile/storagefile.go new file mode 100644 index 0000000..c73252c --- /dev/null +++ b/ent/storagefile/storagefile.go @@ -0,0 +1,117 @@ +// Code generated by ent, DO NOT EDIT. + +package storagefile + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the storagefile type in the database. + Label = "storage_file" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldBucketName holds the string denoting the bucket_name field in the database. + FieldBucketName = "bucket_name" + // FieldObjectName holds the string denoting the object_name field in the database. + FieldObjectName = "object_name" + // FieldFilePath holds the string denoting the file_path field in the database. + FieldFilePath = "file_path" + // FieldFileType holds the string denoting the file_type field in the database. + FieldFileType = "file_type" + // FieldFileURL holds the string denoting the file_url field in the database. + FieldFileURL = "file_url" + // Table holds the table name of the storagefile in the database. + Table = "storage_files" +) + +// Columns holds all SQL columns for storagefile fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldBucketName, + FieldObjectName, + FieldFilePath, + FieldFileType, + FieldFileURL, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // BucketNameValidator is a validator for the "bucket_name" field. It is called by the builders before save. + BucketNameValidator func(string) error + // ObjectNameValidator is a validator for the "object_name" field. It is called by the builders before save. + ObjectNameValidator func(string) error + // FilePathValidator is a validator for the "file_path" field. It is called by the builders before save. + FilePathValidator func(string) error + // FileTypeValidator is a validator for the "file_type" field. It is called by the builders before save. + FileTypeValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the StorageFile queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByBucketName orders the results by the bucket_name field. +func ByBucketName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBucketName, opts...).ToFunc() +} + +// ByObjectName orders the results by the object_name field. +func ByObjectName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldObjectName, opts...).ToFunc() +} + +// ByFilePath orders the results by the file_path field. +func ByFilePath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFilePath, opts...).ToFunc() +} + +// ByFileType orders the results by the file_type field. +func ByFileType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFileType, opts...).ToFunc() +} + +// ByFileURL orders the results by the file_url field. +func ByFileURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFileURL, opts...).ToFunc() +} diff --git a/ent/storagefile/where.go b/ent/storagefile/where.go new file mode 100644 index 0000000..4471976 --- /dev/null +++ b/ent/storagefile/where.go @@ -0,0 +1,531 @@ +// Code generated by ent, DO NOT EDIT. + +package storagefile + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldUpdateTime, v)) +} + +// BucketName applies equality check predicate on the "bucket_name" field. It's identical to BucketNameEQ. +func BucketName(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldBucketName, v)) +} + +// ObjectName applies equality check predicate on the "object_name" field. It's identical to ObjectNameEQ. +func ObjectName(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldObjectName, v)) +} + +// FilePath applies equality check predicate on the "file_path" field. It's identical to FilePathEQ. +func FilePath(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFilePath, v)) +} + +// FileType applies equality check predicate on the "file_type" field. It's identical to FileTypeEQ. +func FileType(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFileType, v)) +} + +// FileURL applies equality check predicate on the "file_url" field. It's identical to FileURLEQ. +func FileURL(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFileURL, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldUpdateTime, v)) +} + +// BucketNameEQ applies the EQ predicate on the "bucket_name" field. +func BucketNameEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldBucketName, v)) +} + +// BucketNameNEQ applies the NEQ predicate on the "bucket_name" field. +func BucketNameNEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldBucketName, v)) +} + +// BucketNameIn applies the In predicate on the "bucket_name" field. +func BucketNameIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldBucketName, vs...)) +} + +// BucketNameNotIn applies the NotIn predicate on the "bucket_name" field. +func BucketNameNotIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldBucketName, vs...)) +} + +// BucketNameGT applies the GT predicate on the "bucket_name" field. +func BucketNameGT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldBucketName, v)) +} + +// BucketNameGTE applies the GTE predicate on the "bucket_name" field. +func BucketNameGTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldBucketName, v)) +} + +// BucketNameLT applies the LT predicate on the "bucket_name" field. +func BucketNameLT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldBucketName, v)) +} + +// BucketNameLTE applies the LTE predicate on the "bucket_name" field. +func BucketNameLTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldBucketName, v)) +} + +// BucketNameContains applies the Contains predicate on the "bucket_name" field. +func BucketNameContains(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContains(FieldBucketName, v)) +} + +// BucketNameHasPrefix applies the HasPrefix predicate on the "bucket_name" field. +func BucketNameHasPrefix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasPrefix(FieldBucketName, v)) +} + +// BucketNameHasSuffix applies the HasSuffix predicate on the "bucket_name" field. +func BucketNameHasSuffix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasSuffix(FieldBucketName, v)) +} + +// BucketNameEqualFold applies the EqualFold predicate on the "bucket_name" field. +func BucketNameEqualFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEqualFold(FieldBucketName, v)) +} + +// BucketNameContainsFold applies the ContainsFold predicate on the "bucket_name" field. +func BucketNameContainsFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContainsFold(FieldBucketName, v)) +} + +// ObjectNameEQ applies the EQ predicate on the "object_name" field. +func ObjectNameEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldObjectName, v)) +} + +// ObjectNameNEQ applies the NEQ predicate on the "object_name" field. +func ObjectNameNEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldObjectName, v)) +} + +// ObjectNameIn applies the In predicate on the "object_name" field. +func ObjectNameIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldObjectName, vs...)) +} + +// ObjectNameNotIn applies the NotIn predicate on the "object_name" field. +func ObjectNameNotIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldObjectName, vs...)) +} + +// ObjectNameGT applies the GT predicate on the "object_name" field. +func ObjectNameGT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldObjectName, v)) +} + +// ObjectNameGTE applies the GTE predicate on the "object_name" field. +func ObjectNameGTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldObjectName, v)) +} + +// ObjectNameLT applies the LT predicate on the "object_name" field. +func ObjectNameLT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldObjectName, v)) +} + +// ObjectNameLTE applies the LTE predicate on the "object_name" field. +func ObjectNameLTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldObjectName, v)) +} + +// ObjectNameContains applies the Contains predicate on the "object_name" field. +func ObjectNameContains(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContains(FieldObjectName, v)) +} + +// ObjectNameHasPrefix applies the HasPrefix predicate on the "object_name" field. +func ObjectNameHasPrefix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasPrefix(FieldObjectName, v)) +} + +// ObjectNameHasSuffix applies the HasSuffix predicate on the "object_name" field. +func ObjectNameHasSuffix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasSuffix(FieldObjectName, v)) +} + +// ObjectNameIsNil applies the IsNil predicate on the "object_name" field. +func ObjectNameIsNil() predicate.StorageFile { + return predicate.StorageFile(sql.FieldIsNull(FieldObjectName)) +} + +// ObjectNameNotNil applies the NotNil predicate on the "object_name" field. +func ObjectNameNotNil() predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotNull(FieldObjectName)) +} + +// ObjectNameEqualFold applies the EqualFold predicate on the "object_name" field. +func ObjectNameEqualFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEqualFold(FieldObjectName, v)) +} + +// ObjectNameContainsFold applies the ContainsFold predicate on the "object_name" field. +func ObjectNameContainsFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContainsFold(FieldObjectName, v)) +} + +// FilePathEQ applies the EQ predicate on the "file_path" field. +func FilePathEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFilePath, v)) +} + +// FilePathNEQ applies the NEQ predicate on the "file_path" field. +func FilePathNEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldFilePath, v)) +} + +// FilePathIn applies the In predicate on the "file_path" field. +func FilePathIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldFilePath, vs...)) +} + +// FilePathNotIn applies the NotIn predicate on the "file_path" field. +func FilePathNotIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldFilePath, vs...)) +} + +// FilePathGT applies the GT predicate on the "file_path" field. +func FilePathGT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldFilePath, v)) +} + +// FilePathGTE applies the GTE predicate on the "file_path" field. +func FilePathGTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldFilePath, v)) +} + +// FilePathLT applies the LT predicate on the "file_path" field. +func FilePathLT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldFilePath, v)) +} + +// FilePathLTE applies the LTE predicate on the "file_path" field. +func FilePathLTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldFilePath, v)) +} + +// FilePathContains applies the Contains predicate on the "file_path" field. +func FilePathContains(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContains(FieldFilePath, v)) +} + +// FilePathHasPrefix applies the HasPrefix predicate on the "file_path" field. +func FilePathHasPrefix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasPrefix(FieldFilePath, v)) +} + +// FilePathHasSuffix applies the HasSuffix predicate on the "file_path" field. +func FilePathHasSuffix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasSuffix(FieldFilePath, v)) +} + +// FilePathEqualFold applies the EqualFold predicate on the "file_path" field. +func FilePathEqualFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEqualFold(FieldFilePath, v)) +} + +// FilePathContainsFold applies the ContainsFold predicate on the "file_path" field. +func FilePathContainsFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContainsFold(FieldFilePath, v)) +} + +// FileTypeEQ applies the EQ predicate on the "file_type" field. +func FileTypeEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFileType, v)) +} + +// FileTypeNEQ applies the NEQ predicate on the "file_type" field. +func FileTypeNEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldFileType, v)) +} + +// FileTypeIn applies the In predicate on the "file_type" field. +func FileTypeIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldFileType, vs...)) +} + +// FileTypeNotIn applies the NotIn predicate on the "file_type" field. +func FileTypeNotIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldFileType, vs...)) +} + +// FileTypeGT applies the GT predicate on the "file_type" field. +func FileTypeGT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldFileType, v)) +} + +// FileTypeGTE applies the GTE predicate on the "file_type" field. +func FileTypeGTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldFileType, v)) +} + +// FileTypeLT applies the LT predicate on the "file_type" field. +func FileTypeLT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldFileType, v)) +} + +// FileTypeLTE applies the LTE predicate on the "file_type" field. +func FileTypeLTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldFileType, v)) +} + +// FileTypeContains applies the Contains predicate on the "file_type" field. +func FileTypeContains(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContains(FieldFileType, v)) +} + +// FileTypeHasPrefix applies the HasPrefix predicate on the "file_type" field. +func FileTypeHasPrefix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasPrefix(FieldFileType, v)) +} + +// FileTypeHasSuffix applies the HasSuffix predicate on the "file_type" field. +func FileTypeHasSuffix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasSuffix(FieldFileType, v)) +} + +// FileTypeEqualFold applies the EqualFold predicate on the "file_type" field. +func FileTypeEqualFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEqualFold(FieldFileType, v)) +} + +// FileTypeContainsFold applies the ContainsFold predicate on the "file_type" field. +func FileTypeContainsFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContainsFold(FieldFileType, v)) +} + +// FileURLEQ applies the EQ predicate on the "file_url" field. +func FileURLEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEQ(FieldFileURL, v)) +} + +// FileURLNEQ applies the NEQ predicate on the "file_url" field. +func FileURLNEQ(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNEQ(FieldFileURL, v)) +} + +// FileURLIn applies the In predicate on the "file_url" field. +func FileURLIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldIn(FieldFileURL, vs...)) +} + +// FileURLNotIn applies the NotIn predicate on the "file_url" field. +func FileURLNotIn(vs ...string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotIn(FieldFileURL, vs...)) +} + +// FileURLGT applies the GT predicate on the "file_url" field. +func FileURLGT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGT(FieldFileURL, v)) +} + +// FileURLGTE applies the GTE predicate on the "file_url" field. +func FileURLGTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldGTE(FieldFileURL, v)) +} + +// FileURLLT applies the LT predicate on the "file_url" field. +func FileURLLT(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLT(FieldFileURL, v)) +} + +// FileURLLTE applies the LTE predicate on the "file_url" field. +func FileURLLTE(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldLTE(FieldFileURL, v)) +} + +// FileURLContains applies the Contains predicate on the "file_url" field. +func FileURLContains(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContains(FieldFileURL, v)) +} + +// FileURLHasPrefix applies the HasPrefix predicate on the "file_url" field. +func FileURLHasPrefix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasPrefix(FieldFileURL, v)) +} + +// FileURLHasSuffix applies the HasSuffix predicate on the "file_url" field. +func FileURLHasSuffix(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldHasSuffix(FieldFileURL, v)) +} + +// FileURLIsNil applies the IsNil predicate on the "file_url" field. +func FileURLIsNil() predicate.StorageFile { + return predicate.StorageFile(sql.FieldIsNull(FieldFileURL)) +} + +// FileURLNotNil applies the NotNil predicate on the "file_url" field. +func FileURLNotNil() predicate.StorageFile { + return predicate.StorageFile(sql.FieldNotNull(FieldFileURL)) +} + +// FileURLEqualFold applies the EqualFold predicate on the "file_url" field. +func FileURLEqualFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldEqualFold(FieldFileURL, v)) +} + +// FileURLContainsFold applies the ContainsFold predicate on the "file_url" field. +func FileURLContainsFold(v string) predicate.StorageFile { + return predicate.StorageFile(sql.FieldContainsFold(FieldFileURL, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.StorageFile) predicate.StorageFile { + return predicate.StorageFile(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.StorageFile) predicate.StorageFile { + return predicate.StorageFile(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.StorageFile) predicate.StorageFile { + return predicate.StorageFile(sql.NotPredicates(p)) +} diff --git a/ent/storagefile_create.go b/ent/storagefile_create.go new file mode 100644 index 0000000..f786940 --- /dev/null +++ b/ent/storagefile_create.go @@ -0,0 +1,887 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// StorageFileCreate is the builder for creating a StorageFile entity. +type StorageFileCreate struct { + config + mutation *StorageFileMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (sfc *StorageFileCreate) SetCreateTime(t time.Time) *StorageFileCreate { + sfc.mutation.SetCreateTime(t) + return sfc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (sfc *StorageFileCreate) SetNillableCreateTime(t *time.Time) *StorageFileCreate { + if t != nil { + sfc.SetCreateTime(*t) + } + return sfc +} + +// SetUpdateTime sets the "update_time" field. +func (sfc *StorageFileCreate) SetUpdateTime(t time.Time) *StorageFileCreate { + sfc.mutation.SetUpdateTime(t) + return sfc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (sfc *StorageFileCreate) SetNillableUpdateTime(t *time.Time) *StorageFileCreate { + if t != nil { + sfc.SetUpdateTime(*t) + } + return sfc +} + +// SetBucketName sets the "bucket_name" field. +func (sfc *StorageFileCreate) SetBucketName(s string) *StorageFileCreate { + sfc.mutation.SetBucketName(s) + return sfc +} + +// SetObjectName sets the "object_name" field. +func (sfc *StorageFileCreate) SetObjectName(s string) *StorageFileCreate { + sfc.mutation.SetObjectName(s) + return sfc +} + +// SetNillableObjectName sets the "object_name" field if the given value is not nil. +func (sfc *StorageFileCreate) SetNillableObjectName(s *string) *StorageFileCreate { + if s != nil { + sfc.SetObjectName(*s) + } + return sfc +} + +// SetFilePath sets the "file_path" field. +func (sfc *StorageFileCreate) SetFilePath(s string) *StorageFileCreate { + sfc.mutation.SetFilePath(s) + return sfc +} + +// SetFileType sets the "file_type" field. +func (sfc *StorageFileCreate) SetFileType(s string) *StorageFileCreate { + sfc.mutation.SetFileType(s) + return sfc +} + +// SetFileURL sets the "file_url" field. +func (sfc *StorageFileCreate) SetFileURL(s string) *StorageFileCreate { + sfc.mutation.SetFileURL(s) + return sfc +} + +// SetNillableFileURL sets the "file_url" field if the given value is not nil. +func (sfc *StorageFileCreate) SetNillableFileURL(s *string) *StorageFileCreate { + if s != nil { + sfc.SetFileURL(*s) + } + return sfc +} + +// SetID sets the "id" field. +func (sfc *StorageFileCreate) SetID(u uuid.UUID) *StorageFileCreate { + sfc.mutation.SetID(u) + return sfc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (sfc *StorageFileCreate) SetNillableID(u *uuid.UUID) *StorageFileCreate { + if u != nil { + sfc.SetID(*u) + } + return sfc +} + +// Mutation returns the StorageFileMutation object of the builder. +func (sfc *StorageFileCreate) Mutation() *StorageFileMutation { + return sfc.mutation +} + +// Save creates the StorageFile in the database. +func (sfc *StorageFileCreate) Save(ctx context.Context) (*StorageFile, error) { + sfc.defaults() + return withHooks(ctx, sfc.sqlSave, sfc.mutation, sfc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (sfc *StorageFileCreate) SaveX(ctx context.Context) *StorageFile { + v, err := sfc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (sfc *StorageFileCreate) Exec(ctx context.Context) error { + _, err := sfc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfc *StorageFileCreate) ExecX(ctx context.Context) { + if err := sfc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (sfc *StorageFileCreate) defaults() { + if _, ok := sfc.mutation.CreateTime(); !ok { + v := storagefile.DefaultCreateTime() + sfc.mutation.SetCreateTime(v) + } + if _, ok := sfc.mutation.UpdateTime(); !ok { + v := storagefile.DefaultUpdateTime() + sfc.mutation.SetUpdateTime(v) + } + if _, ok := sfc.mutation.ID(); !ok { + v := storagefile.DefaultID() + sfc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (sfc *StorageFileCreate) check() error { + if _, ok := sfc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "StorageFile.create_time"`)} + } + if _, ok := sfc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "StorageFile.update_time"`)} + } + if _, ok := sfc.mutation.BucketName(); !ok { + return &ValidationError{Name: "bucket_name", err: errors.New(`ent: missing required field "StorageFile.bucket_name"`)} + } + if v, ok := sfc.mutation.BucketName(); ok { + if err := storagefile.BucketNameValidator(v); err != nil { + return &ValidationError{Name: "bucket_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.bucket_name": %w`, err)} + } + } + if v, ok := sfc.mutation.ObjectName(); ok { + if err := storagefile.ObjectNameValidator(v); err != nil { + return &ValidationError{Name: "object_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.object_name": %w`, err)} + } + } + if _, ok := sfc.mutation.FilePath(); !ok { + return &ValidationError{Name: "file_path", err: errors.New(`ent: missing required field "StorageFile.file_path"`)} + } + if v, ok := sfc.mutation.FilePath(); ok { + if err := storagefile.FilePathValidator(v); err != nil { + return &ValidationError{Name: "file_path", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_path": %w`, err)} + } + } + if _, ok := sfc.mutation.FileType(); !ok { + return &ValidationError{Name: "file_type", err: errors.New(`ent: missing required field "StorageFile.file_type"`)} + } + if v, ok := sfc.mutation.FileType(); ok { + if err := storagefile.FileTypeValidator(v); err != nil { + return &ValidationError{Name: "file_type", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_type": %w`, err)} + } + } + return nil +} + +func (sfc *StorageFileCreate) sqlSave(ctx context.Context) (*StorageFile, error) { + if err := sfc.check(); err != nil { + return nil, err + } + _node, _spec := sfc.createSpec() + if err := sqlgraph.CreateNode(ctx, sfc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + sfc.mutation.id = &_node.ID + sfc.mutation.done = true + return _node, nil +} + +func (sfc *StorageFileCreate) createSpec() (*StorageFile, *sqlgraph.CreateSpec) { + var ( + _node = &StorageFile{config: sfc.config} + _spec = sqlgraph.NewCreateSpec(storagefile.Table, sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = sfc.conflict + if id, ok := sfc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := sfc.mutation.CreateTime(); ok { + _spec.SetField(storagefile.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := sfc.mutation.UpdateTime(); ok { + _spec.SetField(storagefile.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := sfc.mutation.BucketName(); ok { + _spec.SetField(storagefile.FieldBucketName, field.TypeString, value) + _node.BucketName = value + } + if value, ok := sfc.mutation.ObjectName(); ok { + _spec.SetField(storagefile.FieldObjectName, field.TypeString, value) + _node.ObjectName = value + } + if value, ok := sfc.mutation.FilePath(); ok { + _spec.SetField(storagefile.FieldFilePath, field.TypeString, value) + _node.FilePath = value + } + if value, ok := sfc.mutation.FileType(); ok { + _spec.SetField(storagefile.FieldFileType, field.TypeString, value) + _node.FileType = value + } + if value, ok := sfc.mutation.FileURL(); ok { + _spec.SetField(storagefile.FieldFileURL, field.TypeString, value) + _node.FileURL = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.StorageFile.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.StorageFileUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (sfc *StorageFileCreate) OnConflict(opts ...sql.ConflictOption) *StorageFileUpsertOne { + sfc.conflict = opts + return &StorageFileUpsertOne{ + create: sfc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (sfc *StorageFileCreate) OnConflictColumns(columns ...string) *StorageFileUpsertOne { + sfc.conflict = append(sfc.conflict, sql.ConflictColumns(columns...)) + return &StorageFileUpsertOne{ + create: sfc, + } +} + +type ( + // StorageFileUpsertOne is the builder for "upsert"-ing + // one StorageFile node. + StorageFileUpsertOne struct { + create *StorageFileCreate + } + + // StorageFileUpsert is the "OnConflict" setter. + StorageFileUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *StorageFileUpsert) SetUpdateTime(v time.Time) *StorageFileUpsert { + u.Set(storagefile.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateUpdateTime() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldUpdateTime) + return u +} + +// SetBucketName sets the "bucket_name" field. +func (u *StorageFileUpsert) SetBucketName(v string) *StorageFileUpsert { + u.Set(storagefile.FieldBucketName, v) + return u +} + +// UpdateBucketName sets the "bucket_name" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateBucketName() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldBucketName) + return u +} + +// SetObjectName sets the "object_name" field. +func (u *StorageFileUpsert) SetObjectName(v string) *StorageFileUpsert { + u.Set(storagefile.FieldObjectName, v) + return u +} + +// UpdateObjectName sets the "object_name" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateObjectName() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldObjectName) + return u +} + +// ClearObjectName clears the value of the "object_name" field. +func (u *StorageFileUpsert) ClearObjectName() *StorageFileUpsert { + u.SetNull(storagefile.FieldObjectName) + return u +} + +// SetFilePath sets the "file_path" field. +func (u *StorageFileUpsert) SetFilePath(v string) *StorageFileUpsert { + u.Set(storagefile.FieldFilePath, v) + return u +} + +// UpdateFilePath sets the "file_path" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateFilePath() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldFilePath) + return u +} + +// SetFileType sets the "file_type" field. +func (u *StorageFileUpsert) SetFileType(v string) *StorageFileUpsert { + u.Set(storagefile.FieldFileType, v) + return u +} + +// UpdateFileType sets the "file_type" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateFileType() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldFileType) + return u +} + +// SetFileURL sets the "file_url" field. +func (u *StorageFileUpsert) SetFileURL(v string) *StorageFileUpsert { + u.Set(storagefile.FieldFileURL, v) + return u +} + +// UpdateFileURL sets the "file_url" field to the value that was provided on create. +func (u *StorageFileUpsert) UpdateFileURL() *StorageFileUpsert { + u.SetExcluded(storagefile.FieldFileURL) + return u +} + +// ClearFileURL clears the value of the "file_url" field. +func (u *StorageFileUpsert) ClearFileURL() *StorageFileUpsert { + u.SetNull(storagefile.FieldFileURL) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(storagefile.FieldID) +// }), +// ). +// Exec(ctx) +func (u *StorageFileUpsertOne) UpdateNewValues() *StorageFileUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(storagefile.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(storagefile.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *StorageFileUpsertOne) Ignore() *StorageFileUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *StorageFileUpsertOne) DoNothing() *StorageFileUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the StorageFileCreate.OnConflict +// documentation for more info. +func (u *StorageFileUpsertOne) Update(set func(*StorageFileUpsert)) *StorageFileUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&StorageFileUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *StorageFileUpsertOne) SetUpdateTime(v time.Time) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateUpdateTime() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetBucketName sets the "bucket_name" field. +func (u *StorageFileUpsertOne) SetBucketName(v string) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetBucketName(v) + }) +} + +// UpdateBucketName sets the "bucket_name" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateBucketName() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateBucketName() + }) +} + +// SetObjectName sets the "object_name" field. +func (u *StorageFileUpsertOne) SetObjectName(v string) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetObjectName(v) + }) +} + +// UpdateObjectName sets the "object_name" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateObjectName() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateObjectName() + }) +} + +// ClearObjectName clears the value of the "object_name" field. +func (u *StorageFileUpsertOne) ClearObjectName() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.ClearObjectName() + }) +} + +// SetFilePath sets the "file_path" field. +func (u *StorageFileUpsertOne) SetFilePath(v string) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetFilePath(v) + }) +} + +// UpdateFilePath sets the "file_path" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateFilePath() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFilePath() + }) +} + +// SetFileType sets the "file_type" field. +func (u *StorageFileUpsertOne) SetFileType(v string) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetFileType(v) + }) +} + +// UpdateFileType sets the "file_type" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateFileType() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFileType() + }) +} + +// SetFileURL sets the "file_url" field. +func (u *StorageFileUpsertOne) SetFileURL(v string) *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.SetFileURL(v) + }) +} + +// UpdateFileURL sets the "file_url" field to the value that was provided on create. +func (u *StorageFileUpsertOne) UpdateFileURL() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFileURL() + }) +} + +// ClearFileURL clears the value of the "file_url" field. +func (u *StorageFileUpsertOne) ClearFileURL() *StorageFileUpsertOne { + return u.Update(func(s *StorageFileUpsert) { + s.ClearFileURL() + }) +} + +// Exec executes the query. +func (u *StorageFileUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for StorageFileCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *StorageFileUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *StorageFileUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: StorageFileUpsertOne.ID is not supported by MySQL driver. Use StorageFileUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *StorageFileUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// StorageFileCreateBulk is the builder for creating many StorageFile entities in bulk. +type StorageFileCreateBulk struct { + config + err error + builders []*StorageFileCreate + conflict []sql.ConflictOption +} + +// Save creates the StorageFile entities in the database. +func (sfcb *StorageFileCreateBulk) Save(ctx context.Context) ([]*StorageFile, error) { + if sfcb.err != nil { + return nil, sfcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(sfcb.builders)) + nodes := make([]*StorageFile, len(sfcb.builders)) + mutators := make([]Mutator, len(sfcb.builders)) + for i := range sfcb.builders { + func(i int, root context.Context) { + builder := sfcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*StorageFileMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, sfcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = sfcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, sfcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, sfcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (sfcb *StorageFileCreateBulk) SaveX(ctx context.Context) []*StorageFile { + v, err := sfcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (sfcb *StorageFileCreateBulk) Exec(ctx context.Context) error { + _, err := sfcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfcb *StorageFileCreateBulk) ExecX(ctx context.Context) { + if err := sfcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.StorageFile.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.StorageFileUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (sfcb *StorageFileCreateBulk) OnConflict(opts ...sql.ConflictOption) *StorageFileUpsertBulk { + sfcb.conflict = opts + return &StorageFileUpsertBulk{ + create: sfcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (sfcb *StorageFileCreateBulk) OnConflictColumns(columns ...string) *StorageFileUpsertBulk { + sfcb.conflict = append(sfcb.conflict, sql.ConflictColumns(columns...)) + return &StorageFileUpsertBulk{ + create: sfcb, + } +} + +// StorageFileUpsertBulk is the builder for "upsert"-ing +// a bulk of StorageFile nodes. +type StorageFileUpsertBulk struct { + create *StorageFileCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(storagefile.FieldID) +// }), +// ). +// Exec(ctx) +func (u *StorageFileUpsertBulk) UpdateNewValues() *StorageFileUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(storagefile.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(storagefile.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.StorageFile.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *StorageFileUpsertBulk) Ignore() *StorageFileUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *StorageFileUpsertBulk) DoNothing() *StorageFileUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the StorageFileCreateBulk.OnConflict +// documentation for more info. +func (u *StorageFileUpsertBulk) Update(set func(*StorageFileUpsert)) *StorageFileUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&StorageFileUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *StorageFileUpsertBulk) SetUpdateTime(v time.Time) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateUpdateTime() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetBucketName sets the "bucket_name" field. +func (u *StorageFileUpsertBulk) SetBucketName(v string) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetBucketName(v) + }) +} + +// UpdateBucketName sets the "bucket_name" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateBucketName() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateBucketName() + }) +} + +// SetObjectName sets the "object_name" field. +func (u *StorageFileUpsertBulk) SetObjectName(v string) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetObjectName(v) + }) +} + +// UpdateObjectName sets the "object_name" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateObjectName() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateObjectName() + }) +} + +// ClearObjectName clears the value of the "object_name" field. +func (u *StorageFileUpsertBulk) ClearObjectName() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.ClearObjectName() + }) +} + +// SetFilePath sets the "file_path" field. +func (u *StorageFileUpsertBulk) SetFilePath(v string) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetFilePath(v) + }) +} + +// UpdateFilePath sets the "file_path" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateFilePath() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFilePath() + }) +} + +// SetFileType sets the "file_type" field. +func (u *StorageFileUpsertBulk) SetFileType(v string) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetFileType(v) + }) +} + +// UpdateFileType sets the "file_type" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateFileType() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFileType() + }) +} + +// SetFileURL sets the "file_url" field. +func (u *StorageFileUpsertBulk) SetFileURL(v string) *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.SetFileURL(v) + }) +} + +// UpdateFileURL sets the "file_url" field to the value that was provided on create. +func (u *StorageFileUpsertBulk) UpdateFileURL() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.UpdateFileURL() + }) +} + +// ClearFileURL clears the value of the "file_url" field. +func (u *StorageFileUpsertBulk) ClearFileURL() *StorageFileUpsertBulk { + return u.Update(func(s *StorageFileUpsert) { + s.ClearFileURL() + }) +} + +// Exec executes the query. +func (u *StorageFileUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the StorageFileCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for StorageFileCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *StorageFileUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/storagefile_delete.go b/ent/storagefile_delete.go new file mode 100644 index 0000000..fa5ea2c --- /dev/null +++ b/ent/storagefile_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// StorageFileDelete is the builder for deleting a StorageFile entity. +type StorageFileDelete struct { + config + hooks []Hook + mutation *StorageFileMutation +} + +// Where appends a list predicates to the StorageFileDelete builder. +func (sfd *StorageFileDelete) Where(ps ...predicate.StorageFile) *StorageFileDelete { + sfd.mutation.Where(ps...) + return sfd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (sfd *StorageFileDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, sfd.sqlExec, sfd.mutation, sfd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfd *StorageFileDelete) ExecX(ctx context.Context) int { + n, err := sfd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (sfd *StorageFileDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(storagefile.Table, sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID)) + if ps := sfd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, sfd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + sfd.mutation.done = true + return affected, err +} + +// StorageFileDeleteOne is the builder for deleting a single StorageFile entity. +type StorageFileDeleteOne struct { + sfd *StorageFileDelete +} + +// Where appends a list predicates to the StorageFileDelete builder. +func (sfdo *StorageFileDeleteOne) Where(ps ...predicate.StorageFile) *StorageFileDeleteOne { + sfdo.sfd.mutation.Where(ps...) + return sfdo +} + +// Exec executes the deletion query. +func (sfdo *StorageFileDeleteOne) Exec(ctx context.Context) error { + n, err := sfdo.sfd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{storagefile.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfdo *StorageFileDeleteOne) ExecX(ctx context.Context) { + if err := sfdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/storagefile_query.go b/ent/storagefile_query.go new file mode 100644 index 0000000..f2fe14e --- /dev/null +++ b/ent/storagefile_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// StorageFileQuery is the builder for querying StorageFile entities. +type StorageFileQuery struct { + config + ctx *QueryContext + order []storagefile.OrderOption + inters []Interceptor + predicates []predicate.StorageFile + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the StorageFileQuery builder. +func (sfq *StorageFileQuery) Where(ps ...predicate.StorageFile) *StorageFileQuery { + sfq.predicates = append(sfq.predicates, ps...) + return sfq +} + +// Limit the number of records to be returned by this query. +func (sfq *StorageFileQuery) Limit(limit int) *StorageFileQuery { + sfq.ctx.Limit = &limit + return sfq +} + +// Offset to start from. +func (sfq *StorageFileQuery) Offset(offset int) *StorageFileQuery { + sfq.ctx.Offset = &offset + return sfq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (sfq *StorageFileQuery) Unique(unique bool) *StorageFileQuery { + sfq.ctx.Unique = &unique + return sfq +} + +// Order specifies how the records should be ordered. +func (sfq *StorageFileQuery) Order(o ...storagefile.OrderOption) *StorageFileQuery { + sfq.order = append(sfq.order, o...) + return sfq +} + +// First returns the first StorageFile entity from the query. +// Returns a *NotFoundError when no StorageFile was found. +func (sfq *StorageFileQuery) First(ctx context.Context) (*StorageFile, error) { + nodes, err := sfq.Limit(1).All(setContextOp(ctx, sfq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{storagefile.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (sfq *StorageFileQuery) FirstX(ctx context.Context) *StorageFile { + node, err := sfq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first StorageFile ID from the query. +// Returns a *NotFoundError when no StorageFile ID was found. +func (sfq *StorageFileQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = sfq.Limit(1).IDs(setContextOp(ctx, sfq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{storagefile.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (sfq *StorageFileQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := sfq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single StorageFile entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one StorageFile entity is found. +// Returns a *NotFoundError when no StorageFile entities are found. +func (sfq *StorageFileQuery) Only(ctx context.Context) (*StorageFile, error) { + nodes, err := sfq.Limit(2).All(setContextOp(ctx, sfq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{storagefile.Label} + default: + return nil, &NotSingularError{storagefile.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (sfq *StorageFileQuery) OnlyX(ctx context.Context) *StorageFile { + node, err := sfq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only StorageFile ID in the query. +// Returns a *NotSingularError when more than one StorageFile ID is found. +// Returns a *NotFoundError when no entities are found. +func (sfq *StorageFileQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = sfq.Limit(2).IDs(setContextOp(ctx, sfq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{storagefile.Label} + default: + err = &NotSingularError{storagefile.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (sfq *StorageFileQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := sfq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of StorageFiles. +func (sfq *StorageFileQuery) All(ctx context.Context) ([]*StorageFile, error) { + ctx = setContextOp(ctx, sfq.ctx, "All") + if err := sfq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*StorageFile, *StorageFileQuery]() + return withInterceptors[[]*StorageFile](ctx, sfq, qr, sfq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (sfq *StorageFileQuery) AllX(ctx context.Context) []*StorageFile { + nodes, err := sfq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of StorageFile IDs. +func (sfq *StorageFileQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if sfq.ctx.Unique == nil && sfq.path != nil { + sfq.Unique(true) + } + ctx = setContextOp(ctx, sfq.ctx, "IDs") + if err = sfq.Select(storagefile.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (sfq *StorageFileQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := sfq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (sfq *StorageFileQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, sfq.ctx, "Count") + if err := sfq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, sfq, querierCount[*StorageFileQuery](), sfq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (sfq *StorageFileQuery) CountX(ctx context.Context) int { + count, err := sfq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (sfq *StorageFileQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, sfq.ctx, "Exist") + switch _, err := sfq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (sfq *StorageFileQuery) ExistX(ctx context.Context) bool { + exist, err := sfq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the StorageFileQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (sfq *StorageFileQuery) Clone() *StorageFileQuery { + if sfq == nil { + return nil + } + return &StorageFileQuery{ + config: sfq.config, + ctx: sfq.ctx.Clone(), + order: append([]storagefile.OrderOption{}, sfq.order...), + inters: append([]Interceptor{}, sfq.inters...), + predicates: append([]predicate.StorageFile{}, sfq.predicates...), + // clone intermediate query. + sql: sfq.sql.Clone(), + path: sfq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.StorageFile.Query(). +// GroupBy(storagefile.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (sfq *StorageFileQuery) GroupBy(field string, fields ...string) *StorageFileGroupBy { + sfq.ctx.Fields = append([]string{field}, fields...) + grbuild := &StorageFileGroupBy{build: sfq} + grbuild.flds = &sfq.ctx.Fields + grbuild.label = storagefile.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.StorageFile.Query(). +// Select(storagefile.FieldCreateTime). +// Scan(ctx, &v) +func (sfq *StorageFileQuery) Select(fields ...string) *StorageFileSelect { + sfq.ctx.Fields = append(sfq.ctx.Fields, fields...) + sbuild := &StorageFileSelect{StorageFileQuery: sfq} + sbuild.label = storagefile.Label + sbuild.flds, sbuild.scan = &sfq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a StorageFileSelect configured with the given aggregations. +func (sfq *StorageFileQuery) Aggregate(fns ...AggregateFunc) *StorageFileSelect { + return sfq.Select().Aggregate(fns...) +} + +func (sfq *StorageFileQuery) prepareQuery(ctx context.Context) error { + for _, inter := range sfq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, sfq); err != nil { + return err + } + } + } + for _, f := range sfq.ctx.Fields { + if !storagefile.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if sfq.path != nil { + prev, err := sfq.path(ctx) + if err != nil { + return err + } + sfq.sql = prev + } + return nil +} + +func (sfq *StorageFileQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*StorageFile, error) { + var ( + nodes = []*StorageFile{} + _spec = sfq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*StorageFile).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &StorageFile{config: sfq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(sfq.modifiers) > 0 { + _spec.Modifiers = sfq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, sfq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (sfq *StorageFileQuery) sqlCount(ctx context.Context) (int, error) { + _spec := sfq.querySpec() + if len(sfq.modifiers) > 0 { + _spec.Modifiers = sfq.modifiers + } + _spec.Node.Columns = sfq.ctx.Fields + if len(sfq.ctx.Fields) > 0 { + _spec.Unique = sfq.ctx.Unique != nil && *sfq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, sfq.driver, _spec) +} + +func (sfq *StorageFileQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(storagefile.Table, storagefile.Columns, sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID)) + _spec.From = sfq.sql + if unique := sfq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if sfq.path != nil { + _spec.Unique = true + } + if fields := sfq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, storagefile.FieldID) + for i := range fields { + if fields[i] != storagefile.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := sfq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := sfq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := sfq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := sfq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (sfq *StorageFileQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(sfq.driver.Dialect()) + t1 := builder.Table(storagefile.Table) + columns := sfq.ctx.Fields + if len(columns) == 0 { + columns = storagefile.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if sfq.sql != nil { + selector = sfq.sql + selector.Select(selector.Columns(columns...)...) + } + if sfq.ctx.Unique != nil && *sfq.ctx.Unique { + selector.Distinct() + } + for _, m := range sfq.modifiers { + m(selector) + } + for _, p := range sfq.predicates { + p(selector) + } + for _, p := range sfq.order { + p(selector) + } + if offset := sfq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := sfq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (sfq *StorageFileQuery) ForUpdate(opts ...sql.LockOption) *StorageFileQuery { + if sfq.driver.Dialect() == dialect.Postgres { + sfq.Unique(false) + } + sfq.modifiers = append(sfq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return sfq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (sfq *StorageFileQuery) ForShare(opts ...sql.LockOption) *StorageFileQuery { + if sfq.driver.Dialect() == dialect.Postgres { + sfq.Unique(false) + } + sfq.modifiers = append(sfq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return sfq +} + +// StorageFileGroupBy is the group-by builder for StorageFile entities. +type StorageFileGroupBy struct { + selector + build *StorageFileQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (sfgb *StorageFileGroupBy) Aggregate(fns ...AggregateFunc) *StorageFileGroupBy { + sfgb.fns = append(sfgb.fns, fns...) + return sfgb +} + +// Scan applies the selector query and scans the result into the given value. +func (sfgb *StorageFileGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sfgb.build.ctx, "GroupBy") + if err := sfgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*StorageFileQuery, *StorageFileGroupBy](ctx, sfgb.build, sfgb, sfgb.build.inters, v) +} + +func (sfgb *StorageFileGroupBy) sqlScan(ctx context.Context, root *StorageFileQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(sfgb.fns)) + for _, fn := range sfgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*sfgb.flds)+len(sfgb.fns)) + for _, f := range *sfgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*sfgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := sfgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// StorageFileSelect is the builder for selecting fields of StorageFile entities. +type StorageFileSelect struct { + *StorageFileQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (sfs *StorageFileSelect) Aggregate(fns ...AggregateFunc) *StorageFileSelect { + sfs.fns = append(sfs.fns, fns...) + return sfs +} + +// Scan applies the selector query and scans the result into the given value. +func (sfs *StorageFileSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sfs.ctx, "Select") + if err := sfs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*StorageFileQuery, *StorageFileSelect](ctx, sfs.StorageFileQuery, sfs, sfs.inters, v) +} + +func (sfs *StorageFileSelect) sqlScan(ctx context.Context, root *StorageFileQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(sfs.fns)) + for _, fn := range sfs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*sfs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := sfs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/storagefile_update.go b/ent/storagefile_update.go new file mode 100644 index 0000000..26bebda --- /dev/null +++ b/ent/storagefile_update.go @@ -0,0 +1,474 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/predicate" + "registry-backend/ent/storagefile" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// StorageFileUpdate is the builder for updating StorageFile entities. +type StorageFileUpdate struct { + config + hooks []Hook + mutation *StorageFileMutation +} + +// Where appends a list predicates to the StorageFileUpdate builder. +func (sfu *StorageFileUpdate) Where(ps ...predicate.StorageFile) *StorageFileUpdate { + sfu.mutation.Where(ps...) + return sfu +} + +// SetUpdateTime sets the "update_time" field. +func (sfu *StorageFileUpdate) SetUpdateTime(t time.Time) *StorageFileUpdate { + sfu.mutation.SetUpdateTime(t) + return sfu +} + +// SetBucketName sets the "bucket_name" field. +func (sfu *StorageFileUpdate) SetBucketName(s string) *StorageFileUpdate { + sfu.mutation.SetBucketName(s) + return sfu +} + +// SetNillableBucketName sets the "bucket_name" field if the given value is not nil. +func (sfu *StorageFileUpdate) SetNillableBucketName(s *string) *StorageFileUpdate { + if s != nil { + sfu.SetBucketName(*s) + } + return sfu +} + +// SetObjectName sets the "object_name" field. +func (sfu *StorageFileUpdate) SetObjectName(s string) *StorageFileUpdate { + sfu.mutation.SetObjectName(s) + return sfu +} + +// SetNillableObjectName sets the "object_name" field if the given value is not nil. +func (sfu *StorageFileUpdate) SetNillableObjectName(s *string) *StorageFileUpdate { + if s != nil { + sfu.SetObjectName(*s) + } + return sfu +} + +// ClearObjectName clears the value of the "object_name" field. +func (sfu *StorageFileUpdate) ClearObjectName() *StorageFileUpdate { + sfu.mutation.ClearObjectName() + return sfu +} + +// SetFilePath sets the "file_path" field. +func (sfu *StorageFileUpdate) SetFilePath(s string) *StorageFileUpdate { + sfu.mutation.SetFilePath(s) + return sfu +} + +// SetNillableFilePath sets the "file_path" field if the given value is not nil. +func (sfu *StorageFileUpdate) SetNillableFilePath(s *string) *StorageFileUpdate { + if s != nil { + sfu.SetFilePath(*s) + } + return sfu +} + +// SetFileType sets the "file_type" field. +func (sfu *StorageFileUpdate) SetFileType(s string) *StorageFileUpdate { + sfu.mutation.SetFileType(s) + return sfu +} + +// SetNillableFileType sets the "file_type" field if the given value is not nil. +func (sfu *StorageFileUpdate) SetNillableFileType(s *string) *StorageFileUpdate { + if s != nil { + sfu.SetFileType(*s) + } + return sfu +} + +// SetFileURL sets the "file_url" field. +func (sfu *StorageFileUpdate) SetFileURL(s string) *StorageFileUpdate { + sfu.mutation.SetFileURL(s) + return sfu +} + +// SetNillableFileURL sets the "file_url" field if the given value is not nil. +func (sfu *StorageFileUpdate) SetNillableFileURL(s *string) *StorageFileUpdate { + if s != nil { + sfu.SetFileURL(*s) + } + return sfu +} + +// ClearFileURL clears the value of the "file_url" field. +func (sfu *StorageFileUpdate) ClearFileURL() *StorageFileUpdate { + sfu.mutation.ClearFileURL() + return sfu +} + +// Mutation returns the StorageFileMutation object of the builder. +func (sfu *StorageFileUpdate) Mutation() *StorageFileMutation { + return sfu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (sfu *StorageFileUpdate) Save(ctx context.Context) (int, error) { + sfu.defaults() + return withHooks(ctx, sfu.sqlSave, sfu.mutation, sfu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (sfu *StorageFileUpdate) SaveX(ctx context.Context) int { + affected, err := sfu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (sfu *StorageFileUpdate) Exec(ctx context.Context) error { + _, err := sfu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfu *StorageFileUpdate) ExecX(ctx context.Context) { + if err := sfu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (sfu *StorageFileUpdate) defaults() { + if _, ok := sfu.mutation.UpdateTime(); !ok { + v := storagefile.UpdateDefaultUpdateTime() + sfu.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (sfu *StorageFileUpdate) check() error { + if v, ok := sfu.mutation.BucketName(); ok { + if err := storagefile.BucketNameValidator(v); err != nil { + return &ValidationError{Name: "bucket_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.bucket_name": %w`, err)} + } + } + if v, ok := sfu.mutation.ObjectName(); ok { + if err := storagefile.ObjectNameValidator(v); err != nil { + return &ValidationError{Name: "object_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.object_name": %w`, err)} + } + } + if v, ok := sfu.mutation.FilePath(); ok { + if err := storagefile.FilePathValidator(v); err != nil { + return &ValidationError{Name: "file_path", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_path": %w`, err)} + } + } + if v, ok := sfu.mutation.FileType(); ok { + if err := storagefile.FileTypeValidator(v); err != nil { + return &ValidationError{Name: "file_type", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_type": %w`, err)} + } + } + return nil +} + +func (sfu *StorageFileUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := sfu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(storagefile.Table, storagefile.Columns, sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID)) + if ps := sfu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := sfu.mutation.UpdateTime(); ok { + _spec.SetField(storagefile.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := sfu.mutation.BucketName(); ok { + _spec.SetField(storagefile.FieldBucketName, field.TypeString, value) + } + if value, ok := sfu.mutation.ObjectName(); ok { + _spec.SetField(storagefile.FieldObjectName, field.TypeString, value) + } + if sfu.mutation.ObjectNameCleared() { + _spec.ClearField(storagefile.FieldObjectName, field.TypeString) + } + if value, ok := sfu.mutation.FilePath(); ok { + _spec.SetField(storagefile.FieldFilePath, field.TypeString, value) + } + if value, ok := sfu.mutation.FileType(); ok { + _spec.SetField(storagefile.FieldFileType, field.TypeString, value) + } + if value, ok := sfu.mutation.FileURL(); ok { + _spec.SetField(storagefile.FieldFileURL, field.TypeString, value) + } + if sfu.mutation.FileURLCleared() { + _spec.ClearField(storagefile.FieldFileURL, field.TypeString) + } + if n, err = sqlgraph.UpdateNodes(ctx, sfu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{storagefile.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + sfu.mutation.done = true + return n, nil +} + +// StorageFileUpdateOne is the builder for updating a single StorageFile entity. +type StorageFileUpdateOne struct { + config + fields []string + hooks []Hook + mutation *StorageFileMutation +} + +// SetUpdateTime sets the "update_time" field. +func (sfuo *StorageFileUpdateOne) SetUpdateTime(t time.Time) *StorageFileUpdateOne { + sfuo.mutation.SetUpdateTime(t) + return sfuo +} + +// SetBucketName sets the "bucket_name" field. +func (sfuo *StorageFileUpdateOne) SetBucketName(s string) *StorageFileUpdateOne { + sfuo.mutation.SetBucketName(s) + return sfuo +} + +// SetNillableBucketName sets the "bucket_name" field if the given value is not nil. +func (sfuo *StorageFileUpdateOne) SetNillableBucketName(s *string) *StorageFileUpdateOne { + if s != nil { + sfuo.SetBucketName(*s) + } + return sfuo +} + +// SetObjectName sets the "object_name" field. +func (sfuo *StorageFileUpdateOne) SetObjectName(s string) *StorageFileUpdateOne { + sfuo.mutation.SetObjectName(s) + return sfuo +} + +// SetNillableObjectName sets the "object_name" field if the given value is not nil. +func (sfuo *StorageFileUpdateOne) SetNillableObjectName(s *string) *StorageFileUpdateOne { + if s != nil { + sfuo.SetObjectName(*s) + } + return sfuo +} + +// ClearObjectName clears the value of the "object_name" field. +func (sfuo *StorageFileUpdateOne) ClearObjectName() *StorageFileUpdateOne { + sfuo.mutation.ClearObjectName() + return sfuo +} + +// SetFilePath sets the "file_path" field. +func (sfuo *StorageFileUpdateOne) SetFilePath(s string) *StorageFileUpdateOne { + sfuo.mutation.SetFilePath(s) + return sfuo +} + +// SetNillableFilePath sets the "file_path" field if the given value is not nil. +func (sfuo *StorageFileUpdateOne) SetNillableFilePath(s *string) *StorageFileUpdateOne { + if s != nil { + sfuo.SetFilePath(*s) + } + return sfuo +} + +// SetFileType sets the "file_type" field. +func (sfuo *StorageFileUpdateOne) SetFileType(s string) *StorageFileUpdateOne { + sfuo.mutation.SetFileType(s) + return sfuo +} + +// SetNillableFileType sets the "file_type" field if the given value is not nil. +func (sfuo *StorageFileUpdateOne) SetNillableFileType(s *string) *StorageFileUpdateOne { + if s != nil { + sfuo.SetFileType(*s) + } + return sfuo +} + +// SetFileURL sets the "file_url" field. +func (sfuo *StorageFileUpdateOne) SetFileURL(s string) *StorageFileUpdateOne { + sfuo.mutation.SetFileURL(s) + return sfuo +} + +// SetNillableFileURL sets the "file_url" field if the given value is not nil. +func (sfuo *StorageFileUpdateOne) SetNillableFileURL(s *string) *StorageFileUpdateOne { + if s != nil { + sfuo.SetFileURL(*s) + } + return sfuo +} + +// ClearFileURL clears the value of the "file_url" field. +func (sfuo *StorageFileUpdateOne) ClearFileURL() *StorageFileUpdateOne { + sfuo.mutation.ClearFileURL() + return sfuo +} + +// Mutation returns the StorageFileMutation object of the builder. +func (sfuo *StorageFileUpdateOne) Mutation() *StorageFileMutation { + return sfuo.mutation +} + +// Where appends a list predicates to the StorageFileUpdate builder. +func (sfuo *StorageFileUpdateOne) Where(ps ...predicate.StorageFile) *StorageFileUpdateOne { + sfuo.mutation.Where(ps...) + return sfuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (sfuo *StorageFileUpdateOne) Select(field string, fields ...string) *StorageFileUpdateOne { + sfuo.fields = append([]string{field}, fields...) + return sfuo +} + +// Save executes the query and returns the updated StorageFile entity. +func (sfuo *StorageFileUpdateOne) Save(ctx context.Context) (*StorageFile, error) { + sfuo.defaults() + return withHooks(ctx, sfuo.sqlSave, sfuo.mutation, sfuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (sfuo *StorageFileUpdateOne) SaveX(ctx context.Context) *StorageFile { + node, err := sfuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (sfuo *StorageFileUpdateOne) Exec(ctx context.Context) error { + _, err := sfuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sfuo *StorageFileUpdateOne) ExecX(ctx context.Context) { + if err := sfuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (sfuo *StorageFileUpdateOne) defaults() { + if _, ok := sfuo.mutation.UpdateTime(); !ok { + v := storagefile.UpdateDefaultUpdateTime() + sfuo.mutation.SetUpdateTime(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (sfuo *StorageFileUpdateOne) check() error { + if v, ok := sfuo.mutation.BucketName(); ok { + if err := storagefile.BucketNameValidator(v); err != nil { + return &ValidationError{Name: "bucket_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.bucket_name": %w`, err)} + } + } + if v, ok := sfuo.mutation.ObjectName(); ok { + if err := storagefile.ObjectNameValidator(v); err != nil { + return &ValidationError{Name: "object_name", err: fmt.Errorf(`ent: validator failed for field "StorageFile.object_name": %w`, err)} + } + } + if v, ok := sfuo.mutation.FilePath(); ok { + if err := storagefile.FilePathValidator(v); err != nil { + return &ValidationError{Name: "file_path", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_path": %w`, err)} + } + } + if v, ok := sfuo.mutation.FileType(); ok { + if err := storagefile.FileTypeValidator(v); err != nil { + return &ValidationError{Name: "file_type", err: fmt.Errorf(`ent: validator failed for field "StorageFile.file_type": %w`, err)} + } + } + return nil +} + +func (sfuo *StorageFileUpdateOne) sqlSave(ctx context.Context) (_node *StorageFile, err error) { + if err := sfuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(storagefile.Table, storagefile.Columns, sqlgraph.NewFieldSpec(storagefile.FieldID, field.TypeUUID)) + id, ok := sfuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "StorageFile.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := sfuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, storagefile.FieldID) + for _, f := range fields { + if !storagefile.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != storagefile.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := sfuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := sfuo.mutation.UpdateTime(); ok { + _spec.SetField(storagefile.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := sfuo.mutation.BucketName(); ok { + _spec.SetField(storagefile.FieldBucketName, field.TypeString, value) + } + if value, ok := sfuo.mutation.ObjectName(); ok { + _spec.SetField(storagefile.FieldObjectName, field.TypeString, value) + } + if sfuo.mutation.ObjectNameCleared() { + _spec.ClearField(storagefile.FieldObjectName, field.TypeString) + } + if value, ok := sfuo.mutation.FilePath(); ok { + _spec.SetField(storagefile.FieldFilePath, field.TypeString, value) + } + if value, ok := sfuo.mutation.FileType(); ok { + _spec.SetField(storagefile.FieldFileType, field.TypeString, value) + } + if value, ok := sfuo.mutation.FileURL(); ok { + _spec.SetField(storagefile.FieldFileURL, field.TypeString, value) + } + if sfuo.mutation.FileURLCleared() { + _spec.ClearField(storagefile.FieldFileURL, field.TypeString) + } + _node = &StorageFile{config: sfuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, sfuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{storagefile.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + sfuo.mutation.done = true + return _node, nil +} diff --git a/ent/tx.go b/ent/tx.go new file mode 100644 index 0000000..2fae588 --- /dev/null +++ b/ent/tx.go @@ -0,0 +1,234 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // CIWorkflowResult is the client for interacting with the CIWorkflowResult builders. + CIWorkflowResult *CIWorkflowResultClient + // GitCommit is the client for interacting with the GitCommit builders. + GitCommit *GitCommitClient + // Node is the client for interacting with the Node builders. + Node *NodeClient + // NodeVersion is the client for interacting with the NodeVersion builders. + NodeVersion *NodeVersionClient + // PersonalAccessToken is the client for interacting with the PersonalAccessToken builders. + PersonalAccessToken *PersonalAccessTokenClient + // Publisher is the client for interacting with the Publisher builders. + Publisher *PublisherClient + // PublisherPermission is the client for interacting with the PublisherPermission builders. + PublisherPermission *PublisherPermissionClient + // StorageFile is the client for interacting with the StorageFile builders. + StorageFile *StorageFileClient + // User is the client for interacting with the User builders. + User *UserClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.CIWorkflowResult = NewCIWorkflowResultClient(tx.config) + tx.GitCommit = NewGitCommitClient(tx.config) + tx.Node = NewNodeClient(tx.config) + tx.NodeVersion = NewNodeVersionClient(tx.config) + tx.PersonalAccessToken = NewPersonalAccessTokenClient(tx.config) + tx.Publisher = NewPublisherClient(tx.config) + tx.PublisherPermission = NewPublisherPermissionClient(tx.config) + tx.StorageFile = NewStorageFileClient(tx.config) + tx.User = NewUserClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: CIWorkflowResult.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/ent/user.go b/ent/user.go new file mode 100644 index 0000000..23b524f --- /dev/null +++ b/ent/user.go @@ -0,0 +1,188 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "registry-backend/ent/user" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + // The firebase UID of the user + ID string `json:"id,omitempty"` + // CreateTime holds the value of the "create_time" field. + CreateTime time.Time `json:"create_time,omitempty"` + // UpdateTime holds the value of the "update_time" field. + UpdateTime time.Time `json:"update_time,omitempty"` + // Email holds the value of the "email" field. + Email string `json:"email,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Whether the user is approved to use the platform + IsApproved bool `json:"is_approved,omitempty"` + // Whether the user is approved to use the platform + IsAdmin bool `json:"is_admin,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserQuery when eager-loading is set. + Edges UserEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserEdges holds the relations/edges for other nodes in the graph. +type UserEdges struct { + // PublisherPermissions holds the value of the publisher_permissions edge. + PublisherPermissions []*PublisherPermission `json:"publisher_permissions,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// PublisherPermissionsOrErr returns the PublisherPermissions value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) PublisherPermissionsOrErr() ([]*PublisherPermission, error) { + if e.loadedTypes[0] { + return e.PublisherPermissions, nil + } + return nil, &NotLoadedError{edge: "publisher_permissions"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldIsApproved, user.FieldIsAdmin: + values[i] = new(sql.NullBool) + case user.FieldID, user.FieldEmail, user.FieldName: + values[i] = new(sql.NullString) + case user.FieldCreateTime, user.FieldUpdateTime: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (u *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + u.ID = value.String + } + case user.FieldCreateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field create_time", values[i]) + } else if value.Valid { + u.CreateTime = value.Time + } + case user.FieldUpdateTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field update_time", values[i]) + } else if value.Valid { + u.UpdateTime = value.Time + } + case user.FieldEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field email", values[i]) + } else if value.Valid { + u.Email = value.String + } + case user.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + u.Name = value.String + } + case user.FieldIsApproved: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_approved", values[i]) + } else if value.Valid { + u.IsApproved = value.Bool + } + case user.FieldIsAdmin: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_admin", values[i]) + } else if value.Valid { + u.IsAdmin = value.Bool + } + default: + u.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + +// QueryPublisherPermissions queries the "publisher_permissions" edge of the User entity. +func (u *User) QueryPublisherPermissions() *PublisherPermissionQuery { + return NewUserClient(u.config).QueryPublisherPermissions(u) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (u *User) Update() *UserUpdateOne { + return NewUserClient(u.config).UpdateOne(u) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (u *User) Unwrap() *User { + _tx, ok := u.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + u.config.driver = _tx.drv + return u +} + +// String implements the fmt.Stringer. +func (u *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString("create_time=") + builder.WriteString(u.CreateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("update_time=") + builder.WriteString(u.UpdateTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("email=") + builder.WriteString(u.Email) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(u.Name) + builder.WriteString(", ") + builder.WriteString("is_approved=") + builder.WriteString(fmt.Sprintf("%v", u.IsApproved)) + builder.WriteString(", ") + builder.WriteString("is_admin=") + builder.WriteString(fmt.Sprintf("%v", u.IsAdmin)) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/ent/user/user.go b/ent/user/user.go new file mode 100644 index 0000000..656c8fb --- /dev/null +++ b/ent/user/user.go @@ -0,0 +1,133 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreateTime holds the string denoting the create_time field in the database. + FieldCreateTime = "create_time" + // FieldUpdateTime holds the string denoting the update_time field in the database. + FieldUpdateTime = "update_time" + // FieldEmail holds the string denoting the email field in the database. + FieldEmail = "email" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldIsApproved holds the string denoting the is_approved field in the database. + FieldIsApproved = "is_approved" + // FieldIsAdmin holds the string denoting the is_admin field in the database. + FieldIsAdmin = "is_admin" + // EdgePublisherPermissions holds the string denoting the publisher_permissions edge name in mutations. + EdgePublisherPermissions = "publisher_permissions" + // Table holds the table name of the user in the database. + Table = "users" + // PublisherPermissionsTable is the table that holds the publisher_permissions relation/edge. + PublisherPermissionsTable = "publisher_permissions" + // PublisherPermissionsInverseTable is the table name for the PublisherPermission entity. + // It exists in this package in order to avoid circular dependency with the "publisherpermission" package. + PublisherPermissionsInverseTable = "publisher_permissions" + // PublisherPermissionsColumn is the table column denoting the publisher_permissions relation/edge. + PublisherPermissionsColumn = "user_id" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldCreateTime, + FieldUpdateTime, + FieldEmail, + FieldName, + FieldIsApproved, + FieldIsAdmin, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreateTime holds the default value on creation for the "create_time" field. + DefaultCreateTime func() time.Time + // DefaultUpdateTime holds the default value on creation for the "update_time" field. + DefaultUpdateTime func() time.Time + // UpdateDefaultUpdateTime holds the default value on update for the "update_time" field. + UpdateDefaultUpdateTime func() time.Time + // DefaultIsApproved holds the default value on creation for the "is_approved" field. + DefaultIsApproved bool + // DefaultIsAdmin holds the default value on creation for the "is_admin" field. + DefaultIsAdmin bool +) + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreateTime orders the results by the create_time field. +func ByCreateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreateTime, opts...).ToFunc() +} + +// ByUpdateTime orders the results by the update_time field. +func ByUpdateTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdateTime, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByIsApproved orders the results by the is_approved field. +func ByIsApproved(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsApproved, opts...).ToFunc() +} + +// ByIsAdmin orders the results by the is_admin field. +func ByIsAdmin(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsAdmin, opts...).ToFunc() +} + +// ByPublisherPermissionsCount orders the results by publisher_permissions count. +func ByPublisherPermissionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPublisherPermissionsStep(), opts...) + } +} + +// ByPublisherPermissions orders the results by publisher_permissions terms. +func ByPublisherPermissions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPublisherPermissionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPublisherPermissionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PublisherPermissionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PublisherPermissionsTable, PublisherPermissionsColumn), + ) +} diff --git a/ent/user/where.go b/ent/user/where.go new file mode 100644 index 0000000..0471803 --- /dev/null +++ b/ent/user/where.go @@ -0,0 +1,384 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "registry-backend/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldID, id)) +} + +// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ. +func CreateTime(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreateTime, v)) +} + +// UpdateTime applies equality check predicate on the "update_time" field. It's identical to UpdateTimeEQ. +func UpdateTime(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdateTime, v)) +} + +// Email applies equality check predicate on the "email" field. It's identical to EmailEQ. +func Email(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldName, v)) +} + +// IsApproved applies equality check predicate on the "is_approved" field. It's identical to IsApprovedEQ. +func IsApproved(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsApproved, v)) +} + +// IsAdmin applies equality check predicate on the "is_admin" field. It's identical to IsAdminEQ. +func IsAdmin(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsAdmin, v)) +} + +// CreateTimeEQ applies the EQ predicate on the "create_time" field. +func CreateTimeEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreateTime, v)) +} + +// CreateTimeNEQ applies the NEQ predicate on the "create_time" field. +func CreateTimeNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreateTime, v)) +} + +// CreateTimeIn applies the In predicate on the "create_time" field. +func CreateTimeIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreateTime, vs...)) +} + +// CreateTimeNotIn applies the NotIn predicate on the "create_time" field. +func CreateTimeNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreateTime, vs...)) +} + +// CreateTimeGT applies the GT predicate on the "create_time" field. +func CreateTimeGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreateTime, v)) +} + +// CreateTimeGTE applies the GTE predicate on the "create_time" field. +func CreateTimeGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreateTime, v)) +} + +// CreateTimeLT applies the LT predicate on the "create_time" field. +func CreateTimeLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreateTime, v)) +} + +// CreateTimeLTE applies the LTE predicate on the "create_time" field. +func CreateTimeLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreateTime, v)) +} + +// UpdateTimeEQ applies the EQ predicate on the "update_time" field. +func UpdateTimeEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdateTime, v)) +} + +// UpdateTimeNEQ applies the NEQ predicate on the "update_time" field. +func UpdateTimeNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdateTime, v)) +} + +// UpdateTimeIn applies the In predicate on the "update_time" field. +func UpdateTimeIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeNotIn applies the NotIn predicate on the "update_time" field. +func UpdateTimeNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdateTime, vs...)) +} + +// UpdateTimeGT applies the GT predicate on the "update_time" field. +func UpdateTimeGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdateTime, v)) +} + +// UpdateTimeGTE applies the GTE predicate on the "update_time" field. +func UpdateTimeGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdateTime, v)) +} + +// UpdateTimeLT applies the LT predicate on the "update_time" field. +func UpdateTimeLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdateTime, v)) +} + +// UpdateTimeLTE applies the LTE predicate on the "update_time" field. +func UpdateTimeLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdateTime, v)) +} + +// EmailEQ applies the EQ predicate on the "email" field. +func EmailEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// EmailNEQ applies the NEQ predicate on the "email" field. +func EmailNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldEmail, v)) +} + +// EmailIn applies the In predicate on the "email" field. +func EmailIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldEmail, vs...)) +} + +// EmailNotIn applies the NotIn predicate on the "email" field. +func EmailNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldEmail, vs...)) +} + +// EmailGT applies the GT predicate on the "email" field. +func EmailGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldEmail, v)) +} + +// EmailGTE applies the GTE predicate on the "email" field. +func EmailGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldEmail, v)) +} + +// EmailLT applies the LT predicate on the "email" field. +func EmailLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldEmail, v)) +} + +// EmailLTE applies the LTE predicate on the "email" field. +func EmailLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldEmail, v)) +} + +// EmailContains applies the Contains predicate on the "email" field. +func EmailContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldEmail, v)) +} + +// EmailHasPrefix applies the HasPrefix predicate on the "email" field. +func EmailHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldEmail, v)) +} + +// EmailHasSuffix applies the HasSuffix predicate on the "email" field. +func EmailHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldEmail, v)) +} + +// EmailIsNil applies the IsNil predicate on the "email" field. +func EmailIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldEmail)) +} + +// EmailNotNil applies the NotNil predicate on the "email" field. +func EmailNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldEmail)) +} + +// EmailEqualFold applies the EqualFold predicate on the "email" field. +func EmailEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldEmail, v)) +} + +// EmailContainsFold applies the ContainsFold predicate on the "email" field. +func EmailContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldEmail, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldName, v)) +} + +// NameIsNil applies the IsNil predicate on the "name" field. +func NameIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldName)) +} + +// NameNotNil applies the NotNil predicate on the "name" field. +func NameNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldName)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldName, v)) +} + +// IsApprovedEQ applies the EQ predicate on the "is_approved" field. +func IsApprovedEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsApproved, v)) +} + +// IsApprovedNEQ applies the NEQ predicate on the "is_approved" field. +func IsApprovedNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldIsApproved, v)) +} + +// IsAdminEQ applies the EQ predicate on the "is_admin" field. +func IsAdminEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsAdmin, v)) +} + +// IsAdminNEQ applies the NEQ predicate on the "is_admin" field. +func IsAdminNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldIsAdmin, v)) +} + +// HasPublisherPermissions applies the HasEdge predicate on the "publisher_permissions" edge. +func HasPublisherPermissions() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PublisherPermissionsTable, PublisherPermissionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPublisherPermissionsWith applies the HasEdge predicate on the "publisher_permissions" edge with a given conditions (other predicates). +func HasPublisherPermissionsWith(preds ...predicate.PublisherPermission) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newPublisherPermissionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/ent/user_create.go b/ent/user_create.go new file mode 100644 index 0000000..7f669c3 --- /dev/null +++ b/ent/user_create.go @@ -0,0 +1,857 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/user" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreateTime sets the "create_time" field. +func (uc *UserCreate) SetCreateTime(t time.Time) *UserCreate { + uc.mutation.SetCreateTime(t) + return uc +} + +// SetNillableCreateTime sets the "create_time" field if the given value is not nil. +func (uc *UserCreate) SetNillableCreateTime(t *time.Time) *UserCreate { + if t != nil { + uc.SetCreateTime(*t) + } + return uc +} + +// SetUpdateTime sets the "update_time" field. +func (uc *UserCreate) SetUpdateTime(t time.Time) *UserCreate { + uc.mutation.SetUpdateTime(t) + return uc +} + +// SetNillableUpdateTime sets the "update_time" field if the given value is not nil. +func (uc *UserCreate) SetNillableUpdateTime(t *time.Time) *UserCreate { + if t != nil { + uc.SetUpdateTime(*t) + } + return uc +} + +// SetEmail sets the "email" field. +func (uc *UserCreate) SetEmail(s string) *UserCreate { + uc.mutation.SetEmail(s) + return uc +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uc *UserCreate) SetNillableEmail(s *string) *UserCreate { + if s != nil { + uc.SetEmail(*s) + } + return uc +} + +// SetName sets the "name" field. +func (uc *UserCreate) SetName(s string) *UserCreate { + uc.mutation.SetName(s) + return uc +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (uc *UserCreate) SetNillableName(s *string) *UserCreate { + if s != nil { + uc.SetName(*s) + } + return uc +} + +// SetIsApproved sets the "is_approved" field. +func (uc *UserCreate) SetIsApproved(b bool) *UserCreate { + uc.mutation.SetIsApproved(b) + return uc +} + +// SetNillableIsApproved sets the "is_approved" field if the given value is not nil. +func (uc *UserCreate) SetNillableIsApproved(b *bool) *UserCreate { + if b != nil { + uc.SetIsApproved(*b) + } + return uc +} + +// SetIsAdmin sets the "is_admin" field. +func (uc *UserCreate) SetIsAdmin(b bool) *UserCreate { + uc.mutation.SetIsAdmin(b) + return uc +} + +// SetNillableIsAdmin sets the "is_admin" field if the given value is not nil. +func (uc *UserCreate) SetNillableIsAdmin(b *bool) *UserCreate { + if b != nil { + uc.SetIsAdmin(*b) + } + return uc +} + +// SetID sets the "id" field. +func (uc *UserCreate) SetID(s string) *UserCreate { + uc.mutation.SetID(s) + return uc +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (uc *UserCreate) AddPublisherPermissionIDs(ids ...int) *UserCreate { + uc.mutation.AddPublisherPermissionIDs(ids...) + return uc +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (uc *UserCreate) AddPublisherPermissions(p ...*PublisherPermission) *UserCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uc.AddPublisherPermissionIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uc *UserCreate) Mutation() *UserMutation { + return uc.mutation +} + +// Save creates the User in the database. +func (uc *UserCreate) Save(ctx context.Context) (*User, error) { + uc.defaults() + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (uc *UserCreate) SaveX(ctx context.Context) *User { + v, err := uc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (uc *UserCreate) Exec(ctx context.Context) error { + _, err := uc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uc *UserCreate) ExecX(ctx context.Context) { + if err := uc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uc *UserCreate) defaults() { + if _, ok := uc.mutation.CreateTime(); !ok { + v := user.DefaultCreateTime() + uc.mutation.SetCreateTime(v) + } + if _, ok := uc.mutation.UpdateTime(); !ok { + v := user.DefaultUpdateTime() + uc.mutation.SetUpdateTime(v) + } + if _, ok := uc.mutation.IsApproved(); !ok { + v := user.DefaultIsApproved + uc.mutation.SetIsApproved(v) + } + if _, ok := uc.mutation.IsAdmin(); !ok { + v := user.DefaultIsAdmin + uc.mutation.SetIsAdmin(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uc *UserCreate) check() error { + if _, ok := uc.mutation.CreateTime(); !ok { + return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "User.create_time"`)} + } + if _, ok := uc.mutation.UpdateTime(); !ok { + return &ValidationError{Name: "update_time", err: errors.New(`ent: missing required field "User.update_time"`)} + } + if _, ok := uc.mutation.IsApproved(); !ok { + return &ValidationError{Name: "is_approved", err: errors.New(`ent: missing required field "User.is_approved"`)} + } + if _, ok := uc.mutation.IsAdmin(); !ok { + return &ValidationError{Name: "is_admin", err: errors.New(`ent: missing required field "User.is_admin"`)} + } + return nil +} + +func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := uc.check(); err != nil { + return nil, err + } + _node, _spec := uc.createSpec() + if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected User.ID type: %T", _spec.ID.Value) + } + } + uc.mutation.id = &_node.ID + uc.mutation.done = true + return _node, nil +} + +func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: uc.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeString)) + ) + _spec.OnConflict = uc.conflict + if id, ok := uc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := uc.mutation.CreateTime(); ok { + _spec.SetField(user.FieldCreateTime, field.TypeTime, value) + _node.CreateTime = value + } + if value, ok := uc.mutation.UpdateTime(); ok { + _spec.SetField(user.FieldUpdateTime, field.TypeTime, value) + _node.UpdateTime = value + } + if value, ok := uc.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + _node.Email = value + } + if value, ok := uc.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := uc.mutation.IsApproved(); ok { + _spec.SetField(user.FieldIsApproved, field.TypeBool, value) + _node.IsApproved = value + } + if value, ok := uc.mutation.IsAdmin(); ok { + _spec.SetField(user.FieldIsAdmin, field.TypeBool, value) + _node.IsAdmin = value + } + if nodes := uc.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.Create(). +// SetCreateTime(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (uc *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { + uc.conflict = opts + return &UserUpsertOne{ + create: uc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (uc *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { + uc.conflict = append(uc.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertOne{ + create: uc, + } +} + +type ( + // UserUpsertOne is the builder for "upsert"-ing + // one User node. + UserUpsertOne struct { + create *UserCreate + } + + // UserUpsert is the "OnConflict" setter. + UserUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdateTime sets the "update_time" field. +func (u *UserUpsert) SetUpdateTime(v time.Time) *UserUpsert { + u.Set(user.FieldUpdateTime, v) + return u +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *UserUpsert) UpdateUpdateTime() *UserUpsert { + u.SetExcluded(user.FieldUpdateTime) + return u +} + +// SetEmail sets the "email" field. +func (u *UserUpsert) SetEmail(v string) *UserUpsert { + u.Set(user.FieldEmail, v) + return u +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsert) UpdateEmail() *UserUpsert { + u.SetExcluded(user.FieldEmail) + return u +} + +// ClearEmail clears the value of the "email" field. +func (u *UserUpsert) ClearEmail() *UserUpsert { + u.SetNull(user.FieldEmail) + return u +} + +// SetName sets the "name" field. +func (u *UserUpsert) SetName(v string) *UserUpsert { + u.Set(user.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserUpsert) UpdateName() *UserUpsert { + u.SetExcluded(user.FieldName) + return u +} + +// ClearName clears the value of the "name" field. +func (u *UserUpsert) ClearName() *UserUpsert { + u.SetNull(user.FieldName) + return u +} + +// SetIsApproved sets the "is_approved" field. +func (u *UserUpsert) SetIsApproved(v bool) *UserUpsert { + u.Set(user.FieldIsApproved, v) + return u +} + +// UpdateIsApproved sets the "is_approved" field to the value that was provided on create. +func (u *UserUpsert) UpdateIsApproved() *UserUpsert { + u.SetExcluded(user.FieldIsApproved) + return u +} + +// SetIsAdmin sets the "is_admin" field. +func (u *UserUpsert) SetIsAdmin(v bool) *UserUpsert { + u.Set(user.FieldIsAdmin, v) + return u +} + +// UpdateIsAdmin sets the "is_admin" field to the value that was provided on create. +func (u *UserUpsert) UpdateIsAdmin() *UserUpsert { + u.SetExcluded(user.FieldIsAdmin) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(user.FieldID) +// }), +// ). +// Exec(ctx) +func (u *UserUpsertOne) UpdateNewValues() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(user.FieldID) + } + if _, exists := u.create.mutation.CreateTime(); exists { + s.SetIgnore(user.FieldCreateTime) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertOne) Ignore() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertOne) DoNothing() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreate.OnConflict +// documentation for more info. +func (u *UserUpsertOne) Update(set func(*UserUpsert)) *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *UserUpsertOne) SetUpdateTime(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUpdateTime() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertOne) SetEmail(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateEmail() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// ClearEmail clears the value of the "email" field. +func (u *UserUpsertOne) ClearEmail() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearEmail() + }) +} + +// SetName sets the "name" field. +func (u *UserUpsertOne) SetName(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateName() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateName() + }) +} + +// ClearName clears the value of the "name" field. +func (u *UserUpsertOne) ClearName() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearName() + }) +} + +// SetIsApproved sets the "is_approved" field. +func (u *UserUpsertOne) SetIsApproved(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetIsApproved(v) + }) +} + +// UpdateIsApproved sets the "is_approved" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateIsApproved() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateIsApproved() + }) +} + +// SetIsAdmin sets the "is_admin" field. +func (u *UserUpsertOne) SetIsAdmin(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetIsAdmin(v) + }) +} + +// UpdateIsAdmin sets the "is_admin" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateIsAdmin() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateIsAdmin() + }) +} + +// Exec executes the query. +func (u *UserUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserUpsertOne) ID(ctx context.Context) (id string, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: UserUpsertOne.ID is not supported by MySQL driver. Use UserUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserUpsertOne) IDX(ctx context.Context) string { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate + conflict []sql.ConflictOption +} + +// Save creates the User entities in the database. +func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) + nodes := make([]*User, len(ucb.builders)) + mutators := make([]Mutator, len(ucb.builders)) + for i := range ucb.builders { + func(i int, root context.Context) { + builder := ucb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ucb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := ucb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ucb *UserCreateBulk) Exec(ctx context.Context) error { + _, err := ucb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ucb *UserCreateBulk) ExecX(ctx context.Context) { + if err := ucb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreateTime(v+v). +// }). +// Exec(ctx) +func (ucb *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { + ucb.conflict = opts + return &UserUpsertBulk{ + create: ucb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ucb *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { + ucb.conflict = append(ucb.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertBulk{ + create: ucb, + } +} + +// UserUpsertBulk is the builder for "upsert"-ing +// a bulk of User nodes. +type UserUpsertBulk struct { + create *UserCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(user.FieldID) +// }), +// ). +// Exec(ctx) +func (u *UserUpsertBulk) UpdateNewValues() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(user.FieldID) + } + if _, exists := b.mutation.CreateTime(); exists { + s.SetIgnore(user.FieldCreateTime) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertBulk) Ignore() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertBulk) DoNothing() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreateBulk.OnConflict +// documentation for more info. +func (u *UserUpsertBulk) Update(set func(*UserUpsert)) *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdateTime sets the "update_time" field. +func (u *UserUpsertBulk) SetUpdateTime(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUpdateTime(v) + }) +} + +// UpdateUpdateTime sets the "update_time" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUpdateTime() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdateTime() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertBulk) SetEmail(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateEmail() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// ClearEmail clears the value of the "email" field. +func (u *UserUpsertBulk) ClearEmail() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearEmail() + }) +} + +// SetName sets the "name" field. +func (u *UserUpsertBulk) SetName(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateName() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateName() + }) +} + +// ClearName clears the value of the "name" field. +func (u *UserUpsertBulk) ClearName() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearName() + }) +} + +// SetIsApproved sets the "is_approved" field. +func (u *UserUpsertBulk) SetIsApproved(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetIsApproved(v) + }) +} + +// UpdateIsApproved sets the "is_approved" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateIsApproved() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateIsApproved() + }) +} + +// SetIsAdmin sets the "is_admin" field. +func (u *UserUpsertBulk) SetIsAdmin(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetIsAdmin(v) + }) +} + +// UpdateIsAdmin sets the "is_admin" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateIsAdmin() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateIsAdmin() + }) +} + +// Exec executes the query. +func (u *UserUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/user_delete.go b/ent/user_delete.go new file mode 100644 index 0000000..11e11c8 --- /dev/null +++ b/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "registry-backend/ent/predicate" + "registry-backend/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { + ud.mutation.Where(ps...) + return ud +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ud *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ud *UserDelete) ExecX(ctx context.Context) int { + n, err := ud.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeString)) + if ps := ud.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ud.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + ud *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + udo.ud.mutation.Where(ps...) + return udo +} + +// Exec executes the deletion query. +func (udo *UserDeleteOne) Exec(ctx context.Context) error { + n, err := udo.ud.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (udo *UserDeleteOne) ExecX(ctx context.Context) { + if err := udo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/user_query.go b/ent/user_query.go new file mode 100644 index 0000000..3c42ebc --- /dev/null +++ b/ent/user_query.go @@ -0,0 +1,644 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "registry-backend/ent/predicate" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/user" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withPublisherPermissions *PublisherPermissionQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { + uq.predicates = append(uq.predicates, ps...) + return uq +} + +// Limit the number of records to be returned by this query. +func (uq *UserQuery) Limit(limit int) *UserQuery { + uq.ctx.Limit = &limit + return uq +} + +// Offset to start from. +func (uq *UserQuery) Offset(offset int) *UserQuery { + uq.ctx.Offset = &offset + return uq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (uq *UserQuery) Unique(unique bool) *UserQuery { + uq.ctx.Unique = &unique + return uq +} + +// Order specifies how the records should be ordered. +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { + uq.order = append(uq.order, o...) + return uq +} + +// QueryPublisherPermissions chains the current query on the "publisher_permissions" edge. +func (uq *UserQuery) QueryPublisherPermissions() *PublisherPermissionQuery { + query := (&PublisherPermissionClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(publisherpermission.Table, publisherpermission.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PublisherPermissionsTable, user.PublisherPermissionsColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (uq *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (uq *UserQuery) FirstX(ctx context.Context) *User { + node, err := uq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (uq *UserQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (uq *UserQuery) FirstIDX(ctx context.Context) string { + id, err := uq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (uq *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (uq *UserQuery) OnlyX(ctx context.Context) *User { + node, err := uq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (uq *UserQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (uq *UserQuery) OnlyIDX(ctx context.Context) string { + id, err := uq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, uq.ctx, "All") + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, uq, qr, uq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (uq *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := uq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (uq *UserQuery) IDs(ctx context.Context) (ids []string, err error) { + if uq.ctx.Unique == nil && uq.path != nil { + uq.Unique(true) + } + ctx = setContextOp(ctx, uq.ctx, "IDs") + if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (uq *UserQuery) IDsX(ctx context.Context) []string { + ids, err := uq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (uq *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, uq.ctx, "Count") + if err := uq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (uq *UserQuery) CountX(ctx context.Context) int { + count, err := uq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, uq.ctx, "Exist") + switch _, err := uq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (uq *UserQuery) ExistX(ctx context.Context) bool { + exist, err := uq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (uq *UserQuery) Clone() *UserQuery { + if uq == nil { + return nil + } + return &UserQuery{ + config: uq.config, + ctx: uq.ctx.Clone(), + order: append([]user.OrderOption{}, uq.order...), + inters: append([]Interceptor{}, uq.inters...), + predicates: append([]predicate.User{}, uq.predicates...), + withPublisherPermissions: uq.withPublisherPermissions.Clone(), + // clone intermediate query. + sql: uq.sql.Clone(), + path: uq.path, + } +} + +// WithPublisherPermissions tells the query-builder to eager-load the nodes that are connected to +// the "publisher_permissions" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithPublisherPermissions(opts ...func(*PublisherPermissionQuery)) *UserQuery { + query := (&PublisherPermissionClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withPublisherPermissions = query + return uq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldCreateTime). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + uq.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: uq} + grbuild.flds = &uq.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreateTime time.Time `json:"create_time,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldCreateTime). +// Scan(ctx, &v) +func (uq *UserQuery) Select(fields ...string) *UserSelect { + uq.ctx.Fields = append(uq.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: uq} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return uq.Select().Aggregate(fns...) +} + +func (uq *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range uq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, uq); err != nil { + return err + } + } + } + for _, f := range uq.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if uq.path != nil { + prev, err := uq.path(ctx) + if err != nil { + return err + } + uq.sql = prev + } + return nil +} + +func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + _spec = uq.querySpec() + loadedTypes = [1]bool{ + uq.withPublisherPermissions != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: uq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(uq.modifiers) > 0 { + _spec.Modifiers = uq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := uq.withPublisherPermissions; query != nil { + if err := uq.loadPublisherPermissions(ctx, query, nodes, + func(n *User) { n.Edges.PublisherPermissions = []*PublisherPermission{} }, + func(n *User, e *PublisherPermission) { + n.Edges.PublisherPermissions = append(n.Edges.PublisherPermissions, e) + }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (uq *UserQuery) loadPublisherPermissions(ctx context.Context, query *PublisherPermissionQuery, nodes []*User, init func(*User), assign func(*User, *PublisherPermission)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(publisherpermission.FieldUserID) + } + query.Where(predicate.PublisherPermission(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.PublisherPermissionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := uq.querySpec() + if len(uq.modifiers) > 0 { + _spec.Modifiers = uq.modifiers + } + _spec.Node.Columns = uq.ctx.Fields + if len(uq.ctx.Fields) > 0 { + _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, uq.driver, _spec) +} + +func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeString)) + _spec.From = uq.sql + if unique := uq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if uq.path != nil { + _spec.Unique = true + } + if fields := uq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := uq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := uq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := uq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := uq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(uq.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := uq.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if uq.sql != nil { + selector = uq.sql + selector.Select(selector.Columns(columns...)...) + } + if uq.ctx.Unique != nil && *uq.ctx.Unique { + selector.Distinct() + } + for _, m := range uq.modifiers { + m(selector) + } + for _, p := range uq.predicates { + p(selector) + } + for _, p := range uq.order { + p(selector) + } + if offset := uq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := uq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (uq *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery { + if uq.driver.Dialect() == dialect.Postgres { + uq.Unique(false) + } + uq.modifiers = append(uq.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return uq +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (uq *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery { + if uq.driver.Dialect() == dialect.Postgres { + uq.Unique(false) + } + uq.modifiers = append(uq.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return uq +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb +} + +// Scan applies the selector query and scans the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy") + if err := ugb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) +} + +func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ugb.fns)) + for _, fn := range ugb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) + for _, f := range *ugb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ugb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + us.fns = append(us.fns, fns...) + return us +} + +// Scan applies the selector query and scans the result into the given value. +func (us *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, us.ctx, "Select") + if err := us.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) +} + +func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(us.fns)) + for _, fn := range us.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*us.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := us.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/user_update.go b/ent/user_update.go new file mode 100644 index 0000000..28e0135 --- /dev/null +++ b/ent/user_update.go @@ -0,0 +1,547 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "registry-backend/ent/predicate" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/user" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + uu.mutation.Where(ps...) + return uu +} + +// SetUpdateTime sets the "update_time" field. +func (uu *UserUpdate) SetUpdateTime(t time.Time) *UserUpdate { + uu.mutation.SetUpdateTime(t) + return uu +} + +// SetEmail sets the "email" field. +func (uu *UserUpdate) SetEmail(s string) *UserUpdate { + uu.mutation.SetEmail(s) + return uu +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate { + if s != nil { + uu.SetEmail(*s) + } + return uu +} + +// ClearEmail clears the value of the "email" field. +func (uu *UserUpdate) ClearEmail() *UserUpdate { + uu.mutation.ClearEmail() + return uu +} + +// SetName sets the "name" field. +func (uu *UserUpdate) SetName(s string) *UserUpdate { + uu.mutation.SetName(s) + return uu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate { + if s != nil { + uu.SetName(*s) + } + return uu +} + +// ClearName clears the value of the "name" field. +func (uu *UserUpdate) ClearName() *UserUpdate { + uu.mutation.ClearName() + return uu +} + +// SetIsApproved sets the "is_approved" field. +func (uu *UserUpdate) SetIsApproved(b bool) *UserUpdate { + uu.mutation.SetIsApproved(b) + return uu +} + +// SetNillableIsApproved sets the "is_approved" field if the given value is not nil. +func (uu *UserUpdate) SetNillableIsApproved(b *bool) *UserUpdate { + if b != nil { + uu.SetIsApproved(*b) + } + return uu +} + +// SetIsAdmin sets the "is_admin" field. +func (uu *UserUpdate) SetIsAdmin(b bool) *UserUpdate { + uu.mutation.SetIsAdmin(b) + return uu +} + +// SetNillableIsAdmin sets the "is_admin" field if the given value is not nil. +func (uu *UserUpdate) SetNillableIsAdmin(b *bool) *UserUpdate { + if b != nil { + uu.SetIsAdmin(*b) + } + return uu +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (uu *UserUpdate) AddPublisherPermissionIDs(ids ...int) *UserUpdate { + uu.mutation.AddPublisherPermissionIDs(ids...) + return uu +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (uu *UserUpdate) AddPublisherPermissions(p ...*PublisherPermission) *UserUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uu.AddPublisherPermissionIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uu *UserUpdate) Mutation() *UserMutation { + return uu.mutation +} + +// ClearPublisherPermissions clears all "publisher_permissions" edges to the PublisherPermission entity. +func (uu *UserUpdate) ClearPublisherPermissions() *UserUpdate { + uu.mutation.ClearPublisherPermissions() + return uu +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to PublisherPermission entities by IDs. +func (uu *UserUpdate) RemovePublisherPermissionIDs(ids ...int) *UserUpdate { + uu.mutation.RemovePublisherPermissionIDs(ids...) + return uu +} + +// RemovePublisherPermissions removes "publisher_permissions" edges to PublisherPermission entities. +func (uu *UserUpdate) RemovePublisherPermissions(p ...*PublisherPermission) *UserUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uu.RemovePublisherPermissionIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (uu *UserUpdate) Save(ctx context.Context) (int, error) { + uu.defaults() + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uu *UserUpdate) SaveX(ctx context.Context) int { + affected, err := uu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (uu *UserUpdate) Exec(ctx context.Context) error { + _, err := uu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uu *UserUpdate) ExecX(ctx context.Context) { + if err := uu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uu *UserUpdate) defaults() { + if _, ok := uu.mutation.UpdateTime(); !ok { + v := user.UpdateDefaultUpdateTime() + uu.mutation.SetUpdateTime(v) + } +} + +func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeString)) + if ps := uu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uu.mutation.UpdateTime(); ok { + _spec.SetField(user.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := uu.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if uu.mutation.EmailCleared() { + _spec.ClearField(user.FieldEmail, field.TypeString) + } + if value, ok := uu.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + } + if uu.mutation.NameCleared() { + _spec.ClearField(user.FieldName, field.TypeString) + } + if value, ok := uu.mutation.IsApproved(); ok { + _spec.SetField(user.FieldIsApproved, field.TypeBool, value) + } + if value, ok := uu.mutation.IsAdmin(); ok { + _spec.SetField(user.FieldIsAdmin, field.TypeBool, value) + } + if uu.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedPublisherPermissionsIDs(); len(nodes) > 0 && !uu.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + uu.mutation.done = true + return n, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetUpdateTime sets the "update_time" field. +func (uuo *UserUpdateOne) SetUpdateTime(t time.Time) *UserUpdateOne { + uuo.mutation.SetUpdateTime(t) + return uuo +} + +// SetEmail sets the "email" field. +func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne { + uuo.mutation.SetEmail(s) + return uuo +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne { + if s != nil { + uuo.SetEmail(*s) + } + return uuo +} + +// ClearEmail clears the value of the "email" field. +func (uuo *UserUpdateOne) ClearEmail() *UserUpdateOne { + uuo.mutation.ClearEmail() + return uuo +} + +// SetName sets the "name" field. +func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne { + uuo.mutation.SetName(s) + return uuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne { + if s != nil { + uuo.SetName(*s) + } + return uuo +} + +// ClearName clears the value of the "name" field. +func (uuo *UserUpdateOne) ClearName() *UserUpdateOne { + uuo.mutation.ClearName() + return uuo +} + +// SetIsApproved sets the "is_approved" field. +func (uuo *UserUpdateOne) SetIsApproved(b bool) *UserUpdateOne { + uuo.mutation.SetIsApproved(b) + return uuo +} + +// SetNillableIsApproved sets the "is_approved" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableIsApproved(b *bool) *UserUpdateOne { + if b != nil { + uuo.SetIsApproved(*b) + } + return uuo +} + +// SetIsAdmin sets the "is_admin" field. +func (uuo *UserUpdateOne) SetIsAdmin(b bool) *UserUpdateOne { + uuo.mutation.SetIsAdmin(b) + return uuo +} + +// SetNillableIsAdmin sets the "is_admin" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableIsAdmin(b *bool) *UserUpdateOne { + if b != nil { + uuo.SetIsAdmin(*b) + } + return uuo +} + +// AddPublisherPermissionIDs adds the "publisher_permissions" edge to the PublisherPermission entity by IDs. +func (uuo *UserUpdateOne) AddPublisherPermissionIDs(ids ...int) *UserUpdateOne { + uuo.mutation.AddPublisherPermissionIDs(ids...) + return uuo +} + +// AddPublisherPermissions adds the "publisher_permissions" edges to the PublisherPermission entity. +func (uuo *UserUpdateOne) AddPublisherPermissions(p ...*PublisherPermission) *UserUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uuo.AddPublisherPermissionIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uuo *UserUpdateOne) Mutation() *UserMutation { + return uuo.mutation +} + +// ClearPublisherPermissions clears all "publisher_permissions" edges to the PublisherPermission entity. +func (uuo *UserUpdateOne) ClearPublisherPermissions() *UserUpdateOne { + uuo.mutation.ClearPublisherPermissions() + return uuo +} + +// RemovePublisherPermissionIDs removes the "publisher_permissions" edge to PublisherPermission entities by IDs. +func (uuo *UserUpdateOne) RemovePublisherPermissionIDs(ids ...int) *UserUpdateOne { + uuo.mutation.RemovePublisherPermissionIDs(ids...) + return uuo +} + +// RemovePublisherPermissions removes "publisher_permissions" edges to PublisherPermission entities. +func (uuo *UserUpdateOne) RemovePublisherPermissions(p ...*PublisherPermission) *UserUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uuo.RemovePublisherPermissionIDs(ids...) +} + +// Where appends a list predicates to the UserUpdate builder. +func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + uuo.mutation.Where(ps...) + return uuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + uuo.fields = append([]string{field}, fields...) + return uuo +} + +// Save executes the query and returns the updated User entity. +func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { + uuo.defaults() + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := uuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (uuo *UserUpdateOne) Exec(ctx context.Context) error { + _, err := uuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uuo *UserUpdateOne) ExecX(ctx context.Context) { + if err := uuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uuo *UserUpdateOne) defaults() { + if _, ok := uuo.mutation.UpdateTime(); !ok { + v := user.UpdateDefaultUpdateTime() + uuo.mutation.SetUpdateTime(v) + } +} + +func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeString)) + id, ok := uuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := uuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := uuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uuo.mutation.UpdateTime(); ok { + _spec.SetField(user.FieldUpdateTime, field.TypeTime, value) + } + if value, ok := uuo.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if uuo.mutation.EmailCleared() { + _spec.ClearField(user.FieldEmail, field.TypeString) + } + if value, ok := uuo.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + } + if uuo.mutation.NameCleared() { + _spec.ClearField(user.FieldName, field.TypeString) + } + if value, ok := uuo.mutation.IsApproved(); ok { + _spec.SetField(user.FieldIsApproved, field.TypeBool, value) + } + if value, ok := uuo.mutation.IsAdmin(); ok { + _spec.SetField(user.FieldIsAdmin, field.TypeBool, value) + } + if uuo.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedPublisherPermissionsIDs(); len(nodes) > 0 && !uuo.mutation.PublisherPermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.PublisherPermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PublisherPermissionsTable, + Columns: []string{user.PublisherPermissionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(publisherpermission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &User{config: uuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + uuo.mutation.done = true + return _node, nil +} diff --git a/gateways/slack/slack.go b/gateways/slack/slack.go new file mode 100644 index 0000000..3bdcd16 --- /dev/null +++ b/gateways/slack/slack.go @@ -0,0 +1,59 @@ +package gateway + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +const registrySlackChannelWebhook = "https://hooks.slack.com/services/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" + +type SlackService interface { + SendRegistryMessageToSlack(msg string) error +} + +type DripSlackService struct { +} + +func NewSlackService() *DripSlackService { + return &DripSlackService{} + +} + +type slackRequestBody struct { + Text string `json:"text"` +} + +func (s *DripSlackService) SendRegistryMessageToSlack(msg string) error { + return sendSlackNotification(msg, registrySlackChannelWebhook) +} + +func sendSlackNotification(msg string, slackWebhookURL string) error { + body, err := json.Marshal(slackRequestBody{Text: msg}) + if err != nil { + return err + } + + req, err := http.NewRequest(http.MethodPost, slackWebhookURL, bytes.NewBuffer(body)) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + // You can handle or log the HTTP error status code here + return fmt.Errorf("request to Slack returned error status: %d", resp.StatusCode) + } + + return nil +} diff --git a/gateways/storage/files.go b/gateways/storage/files.go new file mode 100644 index 0000000..13d1258 --- /dev/null +++ b/gateways/storage/files.go @@ -0,0 +1,162 @@ +package storage + +import ( + "bytes" + "context" + "fmt" + + "io" + "os" + "time" + + "cloud.google.com/go/storage" + "github.com/rs/zerolog/log" +) + +const BucketName = "comfy-workflow-json" + +type StorageService interface { + UploadFile(ctx context.Context, bucket, object, filePath string) (string, error) + StreamFileUpload(w io.Writer, objectName, blob string) (string, string, error) + GetFileUrl(ctx context.Context, bucketName, objectPath string) (string, error) + GenerateSignedURL(bucketName, objectName string) (string, error) +} + +type GCPStorageService struct { + client *storage.Client +} + +func NewGCPStorageService(ctx context.Context) (*GCPStorageService, error) { + StorageClient, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("NewStorageClient: %v", err) + } + + return &GCPStorageService{ + client: StorageClient, + }, nil +} + +// uploadFile uploads an object. +func (s *GCPStorageService) UploadFile(ctx context.Context, bucket, object string, filePath string) (string, error) { + log.Ctx(ctx).Info().Msgf("Uploading %v to %v/%v.\n", filePath, bucket, object) + client, err := storage.NewClient(ctx) + if err != nil { + return "", fmt.Errorf("storage.NewClient: %w", err) + } + defer client.Close() + + // Open local file. + f, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("os.Open: %w", err) + } + defer f.Close() + + ctx, cancel := context.WithTimeout(ctx, time.Second*50) + defer cancel() + + o := client.Bucket(bucket).Object(object) + + // Optional: set a generation-match precondition to avoid potential race + // conditions and data corruptions. The request to upload is aborted if the + // object's generation number does not match your precondition. + // For an object that does not yet exist, set the DoesNotExist precondition. + o = o.If(storage.Conditions{DoesNotExist: true}) + // If the live object already exists in your bucket, set instead a + // generation-match precondition using the live object's generation number. + // attrs, err := o.Attrs(ctx) + // if err != nil { + // return fmt.Errorf("object.Attrs: %w", err) + // } + // o = o.If(storage.Conditions{GenerationMatch: attrs.Generation}) + + // Upload an object with storage.Writer. + wc := o.NewWriter(ctx) + if _, err = io.Copy(wc, f); err != nil { + return "", fmt.Errorf("io.Copy: %w", err) + } + if err := wc.Close(); err != nil { + return "", fmt.Errorf("Writer.Close: %w", err) + } + log.Ctx(ctx).Info().Msgf("Blob %v uploaded.\n", object) + // Make the file publicly accessible + if err := o.ACL().Set(ctx, storage.AllUsers, storage.RoleReader); err != nil { + return "", fmt.Errorf("ACL().Set: %w", err) + } + + // Construct the public URL + publicURL := fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object) + log.Ctx(ctx).Info().Msgf("Blob is publicly accessible at %v.\n", publicURL) + return publicURL, nil +} + +// StreamFileUpload uploads an object via a stream. +func (s *GCPStorageService) StreamFileUpload(w io.Writer, objectName string, blob string) (string, string, error) { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + return "", "", fmt.Errorf("storage.NewClient: %w", err) + } + defer client.Close() + + b := []byte(blob) + buf := bytes.NewBuffer(b) + + ctx, cancel := context.WithTimeout(ctx, time.Second*50) + defer cancel() + + // Upload an object with storage.Writer. + wc := client.Bucket(BucketName).Object(objectName).NewWriter(ctx) + wc.ChunkSize = 0 // note retries are not supported for chunk size 0. + + if _, err = io.Copy(wc, buf); err != nil { + return "", "", fmt.Errorf("io.Copy: %w", err) + } + // Data can continue to be added to the file until the writer is closed. + if err := wc.Close(); err != nil { + return "", "", fmt.Errorf("Writer.Close: %w", err) + } + log.Ctx(ctx).Info().Msgf("%v uploaded to %v.\n", objectName, BucketName) + + return BucketName, objectName, nil +} + +func (s *GCPStorageService) GetFileUrl(ctx context.Context, bucketName, objectPath string) (string, error) { + // Get public url of a file in a bucket + client, err := storage.NewClient(ctx) + if err != nil { + return "", fmt.Errorf("storage.NewClient: %w", err) + } + defer client.Close() + + // Get Public URL + attrs, err := client.Bucket(bucketName).Object(objectPath).Attrs(ctx) + if err != nil { + return "", fmt.Errorf("object.Attrs: %w", err) + } + publicURL := attrs.MediaLink + log.Ctx(ctx).Info().Msgf("Public URL: %v", publicURL) + return publicURL, nil +} + +func (s *GCPStorageService) GenerateSignedURL(bucketName, objectName string) (string, error) { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + return "", err + } + defer client.Close() + + expires := time.Now().Add(15 * time.Minute) + url, err := client.Bucket(bucketName).SignedURL(objectName, &storage.SignedURLOptions{ + ContentType: "application/gzip", + Method: "PUT", + Expires: expires, + }) + if err != nil { + return "", err + } + + return url, nil +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..2578032 --- /dev/null +++ b/go.mod @@ -0,0 +1,122 @@ +module registry-backend + +go 1.21.5 + +require ( + cloud.google.com/go/monitoring v1.18.0 + cloud.google.com/go/storage v1.38.0 + entgo.io/ent v0.13.1 + firebase.google.com/go v3.13.0+incompatible + github.com/Masterminds/semver/v3 v3.2.1 + github.com/deepmap/oapi-codegen/v2 v2.1.0 + github.com/getkin/kin-openapi v0.123.0 + github.com/google/uuid v1.6.0 + github.com/labstack/echo/v4 v4.11.4 + github.com/lib/pq v1.10.9 + github.com/mixpanel/mixpanel-go v1.2.1 + github.com/oapi-codegen/runtime v1.1.1 + github.com/rs/zerolog v1.32.0 + github.com/stretchr/testify v1.8.4 + github.com/testcontainers/testcontainers-go v0.28.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 + google.golang.org/api v0.165.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9 + google.golang.org/protobuf v1.32.0 +) + +require ( + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect + cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go/compute v1.24.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/firestore v1.14.0 // indirect + cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v25.0.2+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.1 // indirect + github.com/hashicorp/hcl/v2 v2.19.1 // indirect + github.com/invopop/yaml v0.2.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/sergi/go-diff v1.2.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zclconf/go-cty v1.14.2 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.1 // indirect + go.opentelemetry.io/otel/metric v1.23.1 // indirect + go.opentelemetry.io/otel/trace v1.23.1 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.18.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.61.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..1862205 --- /dev/null +++ b/go.sum @@ -0,0 +1,444 @@ +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= +cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4= +cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= +cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= +cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= +entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= +firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= +firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen/v2 v2.1.0 h1:I/NMVhJCtuvL9x+S2QzZKpSjGi33oDZwPRdemvOZWyQ= +github.com/deepmap/oapi-codegen/v2 v2.1.0/go.mod h1:R1wL226vc5VmCNJUvMyYr3hJMm5reyv25j952zAVXZ8= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY= +github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= +github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.5.3 h1:Ces6/M3wbDXYpM8JyyPD57ivTtJACFZJd885pdIaV2s= +github.com/jackc/pgx/v5 v5.5.3/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zGq8= +github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mixpanel/mixpanel-go v1.2.1 h1:iykbHKomTJjVoWU95Vt1sjZy4HLt8UOYacMEEEMFBok= +github.com/mixpanel/mixpanel-go v1.2.1/go.mod h1:mPGaNhBoZMJuLu8k7Y1KhU5n8Vw13rxQZZjHj+b9RLk= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8= +github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU= +github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4= +github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.165.0 h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc= +google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9 h1:4++qSzdWBUy9/2x8L5KZgwZw+mjJZ2yDSCGMVM0YzRs= +google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:PVreiBMirk8ypES6aw9d4p6iiBNSIfZEBqr3UGoAi2E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/integration-tests/registry_integration_test.go b/integration-tests/registry_integration_test.go new file mode 100644 index 0000000..87a7fe1 --- /dev/null +++ b/integration-tests/registry_integration_test.go @@ -0,0 +1,671 @@ +package integration + +import ( + "context" + "registry-backend/config" + "registry-backend/drip" + "registry-backend/ent" + "registry-backend/mock/gateways" + "registry-backend/server/implementation" + "strings" + "testing" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func setUpTest(client *ent.Client) (context.Context, *ent.User) { + ctx := context.Background() + // create a User and attach to context + testUser := createTestUser(ctx, client) + ctx = decorateUserInContext(ctx, testUser) + return ctx, testUser +} + +func TestRegistry(t *testing.T) { + clientCtx := context.Background() + client, postgresContainer := setupDB(t, clientCtx) + // Cleanup + defer func() { + if err := postgresContainer.Terminate(clientCtx); err != nil { + log.Ctx(clientCtx).Error().Msgf("failed to terminate container: %s", err) + } + }() + + // Initialize the Service + mockStorageService := new(gateways.MockStorageService) + mockSlackService := new(gateways.MockSlackService) + mockSlackService. + On("SendRegistryMessageToSlack", mock.Anything). + Return(nil) // Do nothing for all slack messsage calls. + impl := implementation.NewStrictServerImplementation( + client, &config.Config{}, mockStorageService, mockSlackService) + + t.Run("Publisher", func(t *testing.T) { + ctx, testUser := setUpTest(client) + publisherId := "test-publisher" + description := "test-description" + source_code_repo := "test-source-code-repo" + website := "test-website" + support := "test-support" + logo := "test-logo" + name := "test-name" + + t.Run("Create Publisher", func(t *testing.T) { + createPublisherResponse, err := impl.CreatePublisher(ctx, drip.CreatePublisherRequestObject{ + Body: &drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &source_code_repo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + }, + }) + require.NoError(t, err, "should return created publisher") + require.NotNil(t, createPublisherResponse, "should return created publisher") + assert.Equal(t, publisherId, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Id) + assert.Equal(t, description, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Description) + assert.Equal(t, source_code_repo, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).SourceCodeRepo) + assert.Equal(t, website, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Website) + assert.Equal(t, support, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Support) + assert.Equal(t, logo, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Logo) + }) + + t.Run("Validate Publisher", func(t *testing.T) { + res, err := impl.ValidatePublisher(ctx, drip.ValidatePublisherRequestObject{ + Params: drip.ValidatePublisherParams{Username: name}, + }) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ValidatePublisher200JSONResponse{}, res, "should return 200") + require.True(t, *res.(drip.ValidatePublisher200JSONResponse).IsAvailable, "should be available") + }) + + t.Run("Get Publisher", func(t *testing.T) { + getPublisherResponse, err := impl.GetPublisher(ctx, drip.GetPublisherRequestObject{ + PublisherId: publisherId}) + require.NoError(t, err, "should return created publisher") + assert.Equal(t, publisherId, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Id) + assert.Equal(t, description, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Description) + assert.Equal(t, source_code_repo, *getPublisherResponse.(drip.GetPublisher200JSONResponse).SourceCodeRepo) + assert.Equal(t, website, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Website) + assert.Equal(t, support, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Support) + assert.Equal(t, logo, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Logo) + assert.Equal(t, name, *getPublisherResponse.(drip.GetPublisher200JSONResponse).Name) + + // Check the number of members returned + expectedMembersCount := 1 // Adjust to your expected count + assert.Equal(t, expectedMembersCount, + len(*getPublisherResponse.(drip.GetPublisher200JSONResponse).Members), + "should return the correct number of members") + + // Check specific properties of each member, adjust indices accordingly + for _, member := range *getPublisherResponse.(drip.GetPublisher200JSONResponse).Members { + expectedUserId := testUser.ID + expectedUserName := testUser.Name + expectedUserEmail := testUser.Email + + assert.Equal(t, expectedUserId, *member.User.Id, "User ID should match") + assert.Equal(t, expectedUserName, *member.User.Name, "User name should match") + assert.Equal(t, expectedUserEmail, *member.User.Email, "User email should match") + } + }) + + t.Run("List Publishers", func(t *testing.T) { + res, err := impl.ListPublishers(ctx, drip.ListPublishersRequestObject{}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListPublishers200JSONResponse{}, res, "should return 200 status code") + res200 := res.(drip.ListPublishers200JSONResponse) + require.Len(t, res200, 1, "should return all stored publlishers") + assert.Equal(t, drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &source_code_repo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + + // generated thus ignored in comparison + Members: res200[0].Members, + CreatedAt: res200[0].CreatedAt, + }, res200[0], "should return correct publishers") + }) + + t.Run("Get Non-Exist Publisher", func(t *testing.T) { + res, err := impl.GetPublisher(ctx, drip.GetPublisherRequestObject{PublisherId: publisherId + "invalid"}) + require.NoError(t, err, "should not return error") + assert.IsType(t, drip.GetPublisher404JSONResponse{}, res) + }) + + t.Run("Update Publisher", func(t *testing.T) { + update_description := "update-test-description" + update_source_code_repo := "update-test-source-code-repo" + update_website := "update-test-website" + update_support := "update-test-support" + update_logo := "update-test-logo" + update_name := "update-test-name" + + updatePublisherResponse, err := impl.UpdatePublisher(ctx, drip.UpdatePublisherRequestObject{ + PublisherId: publisherId, + Body: &drip.Publisher{ + Description: &update_description, + SourceCodeRepo: &update_source_code_repo, + Website: &update_website, + Support: &update_support, + Logo: &update_logo, + Name: &update_name, + }, + }) + require.NoError(t, err, "should return created publisher") + assert.Equal(t, publisherId, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).Id) + assert.Equal(t, update_description, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).Description) + assert.Equal(t, update_source_code_repo, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).SourceCodeRepo) + assert.Equal(t, update_website, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).Website) + assert.Equal(t, update_support, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).Support) + assert.Equal(t, update_logo, *updatePublisherResponse.(drip.UpdatePublisher200JSONResponse).Logo) + + _, err = impl.ListPublishersForUser(ctx, drip.ListPublishersForUserRequestObject{}) + require.NoError(t, err, "should return created publisher") + }) + + t.Run("Reject New Publisher With The Same Name", func(t *testing.T) { + duplicateCreatePublisherResponse, err := impl.CreatePublisher(ctx, drip.CreatePublisherRequestObject{ + Body: &drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &source_code_repo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + }, + }) + require.NoError(t, err, "should return error") + assert.IsType(t, drip.CreatePublisher400JSONResponse{}, duplicateCreatePublisherResponse) + }) + + t.Run("Delete Publisher", func(t *testing.T) { + res, err := impl.DeletePublisher(ctx, drip.DeletePublisherRequestObject{PublisherId: publisherId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.DeletePublisher204Response{}, res, "should return 204") + }) + }) + + t.Run("Personal Access Token", func(t *testing.T) { + ctx, _ := setUpTest(client) + publisherId := "test-publisher-pat" + description := "test-description" + source_code_repo := "test-source-code-repo" + website := "test-website" + support := "test-support" + logo := "test-logo" + name := "test-name" + tokenName := "test-token-name" + tokenDescription := "test-token-description" + + t.Run("Create Publisher", func(t *testing.T) { + createPublisherResponse, err := impl.CreatePublisher(ctx, drip.CreatePublisherRequestObject{ + Body: &drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &source_code_repo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + }, + }) + + require.NoError(t, err, "should return created publisher") + require.NotNil(t, createPublisherResponse, "should return created publisher") + }) + + t.Run("List Personal Access Token Before Create", func(t *testing.T) { + none, err := impl.ListPersonalAccessTokens(ctx, drip.ListPersonalAccessTokensRequestObject{ + PublisherId: publisherId, + }) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListPersonalAccessTokens200JSONResponse{}, none, "should return 200") + assert.Empty(t, none.(drip.ListPersonalAccessTokens200JSONResponse)) + }) + + t.Run("Create Personal Acccess Token", func(t *testing.T) { + createPersonalAccessTokenResponse, err := impl.CreatePersonalAccessToken( + ctx, drip.CreatePersonalAccessTokenRequestObject{ + PublisherId: publisherId, + Body: &drip.PersonalAccessToken{ + Name: &tokenName, + Description: &tokenDescription, + }, + }) + require.NoError(t, err, "should return created token") + require.NotNil(t, + *createPersonalAccessTokenResponse.(drip.CreatePersonalAccessToken201JSONResponse).Token, + "Token should have a value.") + }) + + t.Run("List Personal Access Token", func(t *testing.T) { + getPersonalAccessTokenResponse, err := impl.ListPersonalAccessTokens(ctx, drip.ListPersonalAccessTokensRequestObject{ + PublisherId: publisherId, + }) + require.NoError(t, err, "should return created token") + assert.Equal(t, tokenName, + *getPersonalAccessTokenResponse.(drip.ListPersonalAccessTokens200JSONResponse)[0].Name) + assert.Equal(t, tokenDescription, + *getPersonalAccessTokenResponse.(drip.ListPersonalAccessTokens200JSONResponse)[0].Description) + assert.True(t, + isTokenMasked(*getPersonalAccessTokenResponse.(drip.ListPersonalAccessTokens200JSONResponse)[0].Token)) + }) + }) + + t.Run("Node", func(t *testing.T) { + ctx, _ := setUpTest(client) + publisherId := "test-publisher-node" + description := "test-description" + sourceCodeRepo := "test-source-code-repo" + website := "test-website" + support := "test-support" + logo := "test-logo" + name := "test-name" + + createPublisherResponse, err := impl.CreatePublisher(ctx, drip.CreatePublisherRequestObject{ + Body: &drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &sourceCodeRepo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + }, + }) + require.NoError(t, err, "should return created publisher") + require.NotNil(t, createPublisherResponse, "should return created publisher") + + nodeId := "test-node" + nodeDescription := "test-node-description" + nodeAuthor := "test-node-author" + nodeLicense := "test-node-license" + nodeName := "test-node-name" + nodeTags := []string{"test-node-tag"} + icon := "https://wwww.github.com/test-icon.svg" + githubUrl := "https://www.github.com/test-github-url" + + var real_node_id *string + t.Run("Create Node", func(t *testing.T) { + createNodeResponse, err := impl.CreateNode(ctx, drip.CreateNodeRequestObject{ + PublisherId: publisherId, + Body: &drip.Node{ + Id: &nodeId, + Name: &nodeName, + Description: &nodeDescription, + Author: &nodeAuthor, + License: &nodeLicense, + Tags: &nodeTags, + Icon: &icon, + Repository: &githubUrl, + }, + }) + require.NoError(t, err, "should return created node") + require.NotNil(t, createNodeResponse, "should return created node") + assert.Equal(t, nodeId, *createNodeResponse.(drip.CreateNode201JSONResponse).Id) + assert.Equal(t, nodeDescription, *createNodeResponse.(drip.CreateNode201JSONResponse).Description) + assert.Equal(t, nodeAuthor, *createNodeResponse.(drip.CreateNode201JSONResponse).Author) + assert.Equal(t, nodeLicense, *createNodeResponse.(drip.CreateNode201JSONResponse).License) + assert.Equal(t, nodeName, *createNodeResponse.(drip.CreateNode201JSONResponse).Name) + assert.Equal(t, nodeTags, *createNodeResponse.(drip.CreateNode201JSONResponse).Tags) + assert.Equal(t, icon, *createNodeResponse.(drip.CreateNode201JSONResponse).Icon) + assert.Equal(t, githubUrl, *createNodeResponse.(drip.CreateNode201JSONResponse).Repository) + real_node_id = createNodeResponse.(drip.CreateNode201JSONResponse).Id + + }) + + t.Run("Get Node", func(t *testing.T) { + res, err := impl.GetNode(ctx, drip.GetNodeRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.GetNode200JSONResponse{}, res) + res200 := res.(drip.GetNode200JSONResponse) + assert.Equal(t, drip.GetNode200JSONResponse{ + Id: &nodeId, + Name: &nodeName, + Description: &nodeDescription, + Author: &nodeAuthor, + Tags: &nodeTags, + License: &nodeLicense, + Icon: &icon, + Repository: &githubUrl, + }, res200, "should return stored node data") + }) + + t.Run("Get Not Exist Node", func(t *testing.T) { + res, err := impl.GetNode(ctx, drip.GetNodeRequestObject{NodeId: nodeId + "fake"}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.GetNode404JSONResponse{}, res) + }) + + t.Run("Get Publisher Nodes", func(t *testing.T) { + res, err := impl.ListNodesForPublisher(ctx, drip.ListNodesForPublisherRequestObject{ + PublisherId: publisherId, + }) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListNodesForPublisher200JSONResponse{}, res) + res200 := res.(drip.ListNodesForPublisher200JSONResponse) + require.Len(t, res200, 1) + assert.Equal(t, drip.Node{ + Id: &nodeId, + Name: &nodeName, + Description: &nodeDescription, + Author: &nodeAuthor, + Tags: &nodeTags, + License: &nodeLicense, + Icon: &icon, + Repository: &githubUrl, + }, res200[0], "should return stored node data") + }) + + t.Run("Update Node", func(t *testing.T) { + updateNodeDescription := "update_test-node-description" + updateNodeAuthor := "update_test-node-author" + updateNodeLicense := "update_test-node-license" + updateNodeName := "update_test-node-name" + updateNodeTags := []string{"update-test-node-tag"} + updateIcon := "https://wwww.github.com/update-icon.svg" + updateGithubUrl := "https://www.github.com/update-github-url" + + updateNodeResponse, err := impl.UpdateNode(ctx, drip.UpdateNodeRequestObject{ + PublisherId: publisherId, + NodeId: *real_node_id, + Body: &drip.Node{ + Id: &nodeId, + Description: &updateNodeDescription, + Author: &updateNodeAuthor, + License: &updateNodeLicense, + Name: &updateNodeName, + Tags: &updateNodeTags, + Icon: &updateIcon, + Repository: &updateGithubUrl, + }, + }) + require.NoError(t, err, "should return created node") + assert.Equal(t, nodeId, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Id) + assert.Equal(t, updateNodeDescription, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Description) + assert.Equal(t, updateNodeAuthor, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Author) + assert.Equal(t, updateNodeLicense, *updateNodeResponse.(drip.UpdateNode200JSONResponse).License) + assert.Equal(t, updateNodeName, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Name) + assert.Equal(t, updateNodeTags, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Tags) + assert.Equal(t, updateIcon, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Icon) + assert.Equal(t, updateGithubUrl, *updateNodeResponse.(drip.UpdateNode200JSONResponse).Repository) + + resUpdated, err := impl.GetNode(ctx, drip.GetNodeRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.GetNode200JSONResponse{}, resUpdated) + res200Updated := resUpdated.(drip.GetNode200JSONResponse) + assert.Equal(t, drip.GetNode200JSONResponse{ + Id: &nodeId, + Description: &updateNodeDescription, + Author: &updateNodeAuthor, + License: &updateNodeLicense, + Name: &updateNodeName, + Tags: &updateNodeTags, + Icon: &updateIcon, + Repository: &updateGithubUrl, + }, res200Updated, "should return updated node data") + }) + + t.Run("Update Not Exist Node", func(t *testing.T) { + res, err := impl.UpdateNode(ctx, drip.UpdateNodeRequestObject{PublisherId: publisherId, NodeId: nodeId + "fake"}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.UpdateNode404JSONResponse{}, res) + }) + + t.Run("Delete Node", func(t *testing.T) { + res, err := impl.DeleteNode(ctx, drip.DeleteNodeRequestObject{PublisherId: publisherId, NodeId: nodeId}) + require.NoError(t, err, "should not return error") + assert.IsType(t, drip.DeleteNode204Response{}, res) + }) + }) + + t.Run("Node Version", func(t *testing.T) { + ctx, _ := setUpTest(client) + publisherId := "test-publisher-node-version" + description := "test-description" + source_code_repo := "test-source-code-repo" + website := "test-website" + support := "test-support" + logo := "test-logo" + name := "test-name" + + createPublisherResponse, err := impl.CreatePublisher(ctx, drip.CreatePublisherRequestObject{ + Body: &drip.Publisher{ + Id: &publisherId, + Description: &description, + SourceCodeRepo: &source_code_repo, + Website: &website, + Support: &support, + Logo: &logo, + Name: &name, + }, + }) + require.NoError(t, err, "should return created publisher") + require.NotNil(t, createPublisherResponse, "should return created publisher") + assert.Equal(t, publisherId, *createPublisherResponse.(drip.CreatePublisher201JSONResponse).Id) + + tokenName := "test-token-name" + tokenDescription := "test-token-description" + createPersonalAccessTokenResponse, err := impl.CreatePersonalAccessToken(ctx, drip.CreatePersonalAccessTokenRequestObject{ + PublisherId: publisherId, + Body: &drip.PersonalAccessToken{ + Name: &tokenName, + Description: &tokenDescription, + }, + }) + require.NoError(t, err, "should return created token") + require.NotNil(t, *createPersonalAccessTokenResponse.(drip.CreatePersonalAccessToken201JSONResponse).Token, "Token should have a value.") + + nodeId := "test-node1" + nodeDescription := "test-node-description" + nodeAuthor := "test-node-author" + nodeLicense := "test-node-license" + nodeName := "test-node-name" + nodeTags := []string{"test-node-tag"} + nodeVersionLiteral := "1.0.0" + changelog := "test-changelog" + dependencies := []string{"test-dependency"} + downloadUrl := "https://storage.googleapis.com/comfy-registry/test-publisher-node-version/test-node1/1.0.0/node.tar.gz" + + createdPublisher := createPublisherResponse.(drip.CreatePublisher201JSONResponse) + var createdNodeVersion drip.NodeVersion + + t.Run("List Node Version Before Create", func(t *testing.T) { + resVersions, err := impl.ListNodeVersions(ctx, drip.ListNodeVersionsRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should return error since node version doesn't exists") + require.IsType(t, drip.ListNodeVersions200JSONResponse{}, resVersions) + assert.Empty(t, resVersions.(drip.ListNodeVersions200JSONResponse), "should not return any node versions") + }) + + t.Run("Create Node Version with Fake Token", func(t *testing.T) { + response, err := impl.PublishNodeVersion(ctx, drip.PublishNodeVersionRequestObject{ + PublisherId: publisherId, + NodeId: nodeId, + Body: &drip.PublishNodeVersionJSONRequestBody{ + Node: drip.Node{ + Id: &nodeId, + Description: &nodeDescription, + Author: &nodeAuthor, + License: &nodeLicense, + Name: &nodeName, + Tags: &nodeTags, + Repository: &source_code_repo, + }, + NodeVersion: drip.NodeVersion{ + Version: &nodeVersionLiteral, + Changelog: &changelog, + Dependencies: &dependencies, + }, + PersonalAccessToken: "faketoken", + }, + }) + require.NoError(t, err) + assert.Equal(t, "Invalid personal access token", response.(drip.PublishNodeVersion400JSONResponse).Message, "should return error message") + }) + + t.Run("Create Node Version", func(t *testing.T) { + mockStorageService.On("GenerateSignedURL", mock.Anything, mock.Anything).Return("test-url", nil) + mockStorageService.On("GetFileUrl", mock.Anything, mock.Anything, mock.Anything).Return("test-url", nil) + createNodeVersionResp, err := impl.PublishNodeVersion(ctx, drip.PublishNodeVersionRequestObject{ + PublisherId: publisherId, + NodeId: nodeId, + Body: &drip.PublishNodeVersionJSONRequestBody{ + Node: drip.Node{ + Id: &nodeId, + Description: &nodeDescription, + Author: &nodeAuthor, + License: &nodeLicense, + Name: &nodeName, + Tags: &nodeTags, + Repository: &source_code_repo, + }, + NodeVersion: drip.NodeVersion{ + Version: &nodeVersionLiteral, + Changelog: &changelog, + Dependencies: &dependencies, + }, + PersonalAccessToken: *createPersonalAccessTokenResponse.(drip.CreatePersonalAccessToken201JSONResponse).Token, + }, + }) + require.NoError(t, err, "should return created node version") + assert.Equal(t, nodeVersionLiteral, *createNodeVersionResp.(drip.PublishNodeVersion201JSONResponse).NodeVersion.Version) + require.Equal(t, "test-url", *createNodeVersionResp.(drip.PublishNodeVersion201JSONResponse).SignedUrl, "should return signed url") + require.Equal(t, dependencies, *createNodeVersionResp.(drip.PublishNodeVersion201JSONResponse).NodeVersion.Dependencies, "should return pip dependencies") + require.Equal(t, changelog, *createNodeVersionResp.(drip.PublishNodeVersion201JSONResponse).NodeVersion.Changelog, "should return changelog") + createdNodeVersion = *createNodeVersionResp.(drip.PublishNodeVersion201JSONResponse).NodeVersion + }) + + t.Run("Get not exist Node Version ", func(t *testing.T) { + res, err := impl.GetNodeVersion(ctx, drip.GetNodeVersionRequestObject{NodeId: nodeId + "fake", VersionId: nodeVersionLiteral}) + require.NoError(t, err, "should not return error") + assert.IsType(t, drip.GetNodeVersion404JSONResponse{}, res) + }) + + t.Run("Create Node Version of Not Exist Node", func(t *testing.T) { + response, err := impl.PublishNodeVersion(ctx, drip.PublishNodeVersionRequestObject{ + PublisherId: publisherId, + NodeId: nodeId + "fake", + Body: &drip.PublishNodeVersionJSONRequestBody{}, + }) + require.NoError(t, err) + assert.Equal(t, "Invalid personal access token", response.(drip.PublishNodeVersion400JSONResponse).Message, "should return error message") + }) + + t.Run("List Node Versions", func(t *testing.T) { + resVersions, err := impl.ListNodeVersions(ctx, drip.ListNodeVersionsRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListNodeVersions200JSONResponse{}, resVersions, "should return 200") + resVersions200 := resVersions.(drip.ListNodeVersions200JSONResponse) + require.Len(t, resVersions200, 1, "should return only one version") + assert.Equal(t, drip.NodeVersion{ + // generated attribute + Id: resVersions200[0].Id, + CreatedAt: resVersions200[0].CreatedAt, + + Deprecated: proto.Bool(false), + Version: &nodeVersionLiteral, + Changelog: &changelog, + Dependencies: &dependencies, + DownloadUrl: &downloadUrl, + }, resVersions200[0], "should be equal") + }) + + t.Run("Update Node Version", func(t *testing.T) { + updatedChangelog := "test-changelog-2" + resUNV, err := impl.UpdateNodeVersion(ctx, drip.UpdateNodeVersionRequestObject{ + PublisherId: publisherId, + NodeId: nodeId, + VersionId: *createdNodeVersion.Id, + Body: &drip.NodeVersionUpdateRequest{ + Changelog: &updatedChangelog, + Deprecated: proto.Bool(true), + }, + }) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.UpdateNodeVersion200JSONResponse{}, resUNV, "should return 200") + + res, err := impl.ListNodeVersions(ctx, drip.ListNodeVersionsRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListNodeVersions200JSONResponse{}, res, "should return 200") + res200 := res.(drip.ListNodeVersions200JSONResponse) + require.Len(t, res200, 1, "should return only one version") + updatedNodeVersion := drip.NodeVersion{ + // generated attribute + Id: res200[0].Id, + CreatedAt: res200[0].CreatedAt, + + Deprecated: proto.Bool(true), + Version: &nodeVersionLiteral, + Changelog: &updatedChangelog, + Dependencies: &dependencies, + DownloadUrl: &downloadUrl, + } + assert.Equal(t, updatedNodeVersion, res200[0], "should be equal") + createdNodeVersion = res200[0] + }) + + t.Run("List Nodes", func(t *testing.T) { + resNodes, err := impl.ListAllNodes(ctx, drip.ListAllNodesRequestObject{}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.ListAllNodes200JSONResponse{}, resNodes, "should return 200 server response") + resNodes200 := resNodes.(drip.ListAllNodes200JSONResponse) + assert.Len(t, *resNodes200.Nodes, 1, "should only contain 1 node") + + expectedNode := drip.Node{ + Id: &nodeId, + Name: &nodeName, + Repository: &source_code_repo, + Description: &nodeDescription, + Author: &nodeAuthor, + License: &nodeLicense, + Tags: &nodeTags, + LatestVersion: &createdNodeVersion, + Icon: proto.String(""), + Publisher: (*drip.Publisher)(&createdPublisher), + } + expectedNode.LatestVersion.DownloadUrl = (*resNodes200.Nodes)[0].LatestVersion.DownloadUrl // generated + expectedNode.LatestVersion.Deprecated = (*resNodes200.Nodes)[0].LatestVersion.Deprecated // generated + expectedNode.Publisher.CreatedAt = (*resNodes200.Nodes)[0].Publisher.CreatedAt + assert.Equal(t, expectedNode, (*resNodes200.Nodes)[0]) + }) + + t.Run("Install Node", func(t *testing.T) { + resIns, err := impl.InstallNode(ctx, drip.InstallNodeRequestObject{NodeId: nodeId}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.InstallNode200JSONResponse{}, resIns, "should return 200") + + resIns, err = impl.InstallNode(ctx, drip.InstallNodeRequestObject{ + NodeId: nodeId, Params: drip.InstallNodeParams{Version: &nodeVersionLiteral}}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.InstallNode200JSONResponse{}, resIns, "should return 200") + }) + + t.Run("Install Node Version on not exist node or version", func(t *testing.T) { + resIns, err := impl.InstallNode(ctx, drip.InstallNodeRequestObject{NodeId: nodeId + "fake"}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.InstallNode404JSONResponse{}, resIns, "should return 404") + resIns, err = impl.InstallNode(ctx, drip.InstallNodeRequestObject{ + NodeId: nodeId, Params: drip.InstallNodeParams{Version: proto.String(nodeVersionLiteral + "fake")}}) + require.NoError(t, err, "should not return error") + require.IsType(t, drip.InstallNode404JSONResponse{}, resIns, "should return 404") + }) + }) +} + +func isTokenMasked(token string) bool { + tokenLength := len(token) + // Ensure that only the first 4 and last 4 characters are not asterisks. + middle := token[4:tokenLength] + return strings.Count(middle, "*") == len(middle) +} diff --git a/integration-tests/test_util.go b/integration-tests/test_util.go new file mode 100644 index 0000000..ecdccd5 --- /dev/null +++ b/integration-tests/test_util.go @@ -0,0 +1,111 @@ +package integration + +import ( + "context" + "fmt" + "net" + + "registry-backend/ent" + "registry-backend/ent/migrate" + auth "registry-backend/server/middleware" + "testing" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog/log" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" + + _ "github.com/lib/pq" +) + +func createTestUser(ctx context.Context, client *ent.Client) *ent.User { + return client.User.Create(). + SetID(uuid.New().String()). + SetIsApproved(true). + SetName("integration-test"). + SetEmail("integration-test@gmail.com"). + SaveX(ctx) +} + +func decorateUserInContext(ctx context.Context, user *ent.User) context.Context { + return context.WithValue(ctx, auth.UserContextKey, &auth.UserDetails{ + ID: user.ID, + Email: user.Email, + Name: user.Name, + }) +} + +func setupDB(t *testing.T, ctx context.Context) (*ent.Client, *postgres.PostgresContainer) { + // Define Postgres container request + postgresContainer, err := postgres.RunContainer(ctx, + testcontainers.WithImage("docker.io/postgres:15.2-alpine"), + postgres.WithDatabase("postgres"), + postgres.WithUsername("postgres"), + postgres.WithPassword("password"), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(60*time.Second)), + ) + if err != nil { + t.Fatalf("Failed to start container: %s", err) + } + println("Postgres container started") + + host, err := postgresContainer.Host(ctx) + if err != nil { + t.Fatalf("Failed to get the host: %s", err) + } + port, err := postgresContainer.MappedPort(ctx, "5432") + if err != nil { + t.Fatalf("Failed to get the mapped port: %s", err) + } + waitPortOpen(t, host, port.Port(), time.Minute) + databaseURL := fmt.Sprintf("postgres://postgres:password@%s:%s/postgres?sslmode=disable", host, port.Port()) + + if err != nil { + t.Fatalf("Failed to start container: %s", err) + } + + client, err := ent.Open("postgres", databaseURL) + if err != nil { + log.Ctx(ctx).Fatal().Err(err).Msg("failed opening connection to postgres") + } + + if err := client.Schema.Create(context.Background(), migrate.WithDropIndex(true), + migrate.WithDropColumn(true), migrate.WithDropIndex(true)); err != nil { + log.Ctx(ctx).Fatal().Err(err).Msg("failed creating schema resources.") + println("Failed to create schema") + + } + println("Schema created") + return client, postgresContainer +} + +func waitPortOpen(t *testing.T, host string, port string, timeout time.Duration) { + tc := time.After(timeout) + w, m := 500*time.Microsecond, 32*time.Second + for { + select { + case <-tc: + t.Errorf("timeout waiting to connect to '%s:%s'", host, port) + default: + } + + conn, err := net.Dial("tcp", net.JoinHostPort(host, port)) + if err != nil { + t.Logf("error connecting to '%s:%s' : %s", host, port, err) + if w < m { + w *= 2 + } + <-time.After(w) + continue + } + + conn.Close() + return + } + +} diff --git a/logging/logging.go b/logging/logging.go new file mode 100644 index 0000000..981dd40 --- /dev/null +++ b/logging/logging.go @@ -0,0 +1,61 @@ +package drip_logging + +import ( + "github.com/rs/zerolog/log" + "os" + "sync" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/pkgerrors" +) + +func SetupLogger() zerolog.Logger { + var once sync.Once + var log zerolog.Logger + + once.Do(func() { + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + // Needed to conform with GCP Cloud Logging format. + zerolog.LevelFieldName = "severity" + zerolog.LevelFieldMarshalFunc = func(l zerolog.Level) string { + switch l { + case zerolog.DebugLevel: + return "DEBUG" + case zerolog.InfoLevel: + return "INFO" + case zerolog.WarnLevel: + return "WARNING" + case zerolog.ErrorLevel: + return "ERROR" + case zerolog.FatalLevel: + return "CRITICAL" + case zerolog.PanicLevel: + return "ALERT" + default: + return "DEFAULT" + } + } + log = zerolog.New(os.Stdout) + + if os.Getenv("DRIP_ENV") == "localdev" { + log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + } + }) + + return log +} + +func SetGlobalLogLevel(logLevel string) { + // Default to info level + defaultLevel := zerolog.InfoLevel + if logLevel != "" { + level, err := zerolog.ParseLevel(logLevel) + if err == nil { + defaultLevel = level + } else { + log.Error().Err(err).Msg("Invalid log level") + } + } + + zerolog.SetGlobalLevel(defaultLevel) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..9dbd537 --- /dev/null +++ b/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "context" + "fmt" + "os" + "registry-backend/config" + "registry-backend/ent" + "registry-backend/ent/migrate" + drip_logging "registry-backend/logging" + "registry-backend/server" + + "github.com/rs/zerolog/log" + + _ "github.com/lib/pq" +) + +func main() { + drip_logging.SetGlobalLogLevel(os.Getenv("LOG_LEVEL")) + + connection_string := os.Getenv("DB_CONNECTION_STRING") + + config := config.Config{ + ProjectID: os.Getenv("PROJECT_ID"), + DripEnv: os.Getenv("DRIP_ENV"), + } + + var dsn string + if os.Getenv("DRIP_ENV") == "localdev" { + dsn = fmt.Sprintf("%s sslmode=disable", connection_string) + } else { + dsn = connection_string + } + + client, err := ent.Open("postgres", dsn) + + if err != nil { + log.Fatal().Err(err).Msg("failed opening connection to postgres.") + } + defer client.Close() + // Run the auto migration tool for localdev. + if os.Getenv("DRIP_ENV") == "localdev" || os.Getenv("DRIP_ENV") == "staging" { + log.Info().Msg("Running migrations") + if err := client.Schema.Create(context.Background(), migrate.WithDropIndex(true), + migrate.WithDropColumn(true)); err != nil { + log.Fatal().Err(err).Msg("failed creating schema resources.") + } + } + + server := server.NewServer(client, &config) + server.Start() +} diff --git a/mapper/context.go b/mapper/context.go new file mode 100644 index 0000000..4c0d6c0 --- /dev/null +++ b/mapper/context.go @@ -0,0 +1,20 @@ +package mapper + +import ( + "context" + "errors" + auth "registry-backend/server/middleware" +) + +func GetUserIDFromContext(ctx context.Context) (string, error) { + user, ok := ctx.Value(auth.UserContextKey).(*auth.UserDetails) + if !ok || user == nil { + return "", errors.New("no user in context") + } + + if user.ID == "" { + return "", errors.New("no user id in context") + } + + return user.ID, nil +} diff --git a/mapper/node.go b/mapper/node.go new file mode 100644 index 0000000..118151d --- /dev/null +++ b/mapper/node.go @@ -0,0 +1,109 @@ +package mapper + +import ( + "fmt" + "regexp" + "registry-backend/drip" + "registry-backend/ent" + "strings" +) + +func ApiCreateNodeToDb(publisherId string, node *drip.Node, client *ent.Client) (*ent.NodeCreate, error) { + newNode := client.Node.Create() + newNode.SetPublisherID(publisherId) + if node.Description != nil { + newNode.SetDescription(*node.Description) + } + if node.Id != nil { + lowerCaseNodeID := strings.ToLower(*node.Id) + newNode.SetID(lowerCaseNodeID) + } + if node.Author != nil { + newNode.SetAuthor(*node.Author) + } + if node.License != nil { + newNode.SetLicense(*node.License) + } + if node.Name != nil { + newNode.SetName(*node.Name) + } + if node.Tags != nil { + newNode.SetTags(*node.Tags) + } + if node.Repository != nil { + newNode.SetRepositoryURL(*node.Repository) + } + if node.Icon != nil { + newNode.SetIconURL(*node.Icon) + } + + return newNode, nil +} + +func ApiUpdateNodeToUpdateFields(nodeID string, node *drip.Node, client *ent.Client) *ent.NodeUpdateOne { + update := client.Node.UpdateOneID(nodeID) + if node.Description != nil { + update.SetDescription(*node.Description) + } + if node.Author != nil { + update.SetAuthor(*node.Author) + } + if node.License != nil { + update.SetLicense(*node.License) + } + if node.Name != nil { + update.SetName(*node.Name) + } + if node.Tags != nil { + update.SetTags(*node.Tags) + } + if node.Repository != nil { + update.SetRepositoryURL(*node.Repository) + } + if node.Icon != nil { + update.SetIconURL(*node.Icon) + } + + return update +} + +func ValidateNode(node *drip.Node) error { + if node.Id != nil { + if len(*node.Id) > 100 { + return fmt.Errorf("node id is too long") + } + if !IsValidNodeID(*node.Id) { + return fmt.Errorf("invalid node id") + } + } + return nil +} + +func IsValidNodeID(nodeID string) bool { + if len(nodeID) == 0 || len(nodeID) > 50 { + return false + } + // Regular expression pattern for Node ID validation (lowercase letters only) + pattern := `^[a-z][a-z0-9-_]+(\.[a-z0-9-_]+)*$` + // Compile the regular expression pattern + regex := regexp.MustCompile(pattern) + // Check if the string matches the pattern + return regex.MatchString(nodeID) +} + +func DbNodeToApiNode(node *ent.Node) *drip.Node { + if node == nil { + return nil + } + + return &drip.Node{ + Author: &node.Author, + Description: &node.Description, + Id: &node.ID, + License: &node.License, + Name: &node.Name, + Tags: &node.Tags, + Repository: &node.RepositoryURL, + Icon: &node.IconURL, + } +} diff --git a/mapper/node_test.go b/mapper/node_test.go new file mode 100644 index 0000000..1bcb0fd --- /dev/null +++ b/mapper/node_test.go @@ -0,0 +1,152 @@ +package mapper_test + +import ( + "registry-backend/drip" + "registry-backend/mapper" + "testing" +) + +// TestIsValidNodeID tests the isValidNodeID function with various inputs. +func TestIsValidNodeID(t *testing.T) { + testCases := []struct { + name string + node *drip.Node + expectedError string // include this field to specify what error message you expect + }{ + { + name: "Valid Node ID", + node: &drip.Node{Id: stringPtr("validnodeid1")}, + expectedError: "", + }, + { + name: "Node ID Too Long", + node: &drip.Node{Id: stringPtr("a12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901")}, + expectedError: "node id is too long", + }, + { + name: "Invalid Node ID", + node: &drip.Node{Id: stringPtr("123")}, + expectedError: "invalid node id", + }, + + { + name: "Valid Node ID", + node: &drip.Node{Id: stringPtr("node1")}, + expectedError: "", + }, + { + name: "Valid with dash", + node: &drip.Node{Id: stringPtr("node-1")}, + expectedError: "", + }, + { + name: "Invalid with uppercase", + node: &drip.Node{Id: stringPtr("Node")}, + expectedError: "invalid node id", + }, + { + name: "Invalid with special characters", + node: &drip.Node{Id: stringPtr("node_@")}, + expectedError: "invalid node id", + }, + { + name: "Invalid start with number", + node: &drip.Node{Id: stringPtr("1node")}, + expectedError: "invalid node id", + }, + { + name: "Invalid start with dash", + node: &drip.Node{Id: stringPtr("-node")}, + expectedError: "invalid node id", + }, + { + name: "Empty input", + node: &drip.Node{Id: stringPtr("")}, + expectedError: "invalid node id", + }, + { + name: "Valid all lowercase letters", + node: &drip.Node{Id: stringPtr("abcdefghijklmnopqrstuvwxyz")}, + expectedError: "", + }, + { + name: "Valid all uppercase letters", + node: &drip.Node{Id: stringPtr("ABCD")}, + expectedError: "invalid node id", + }, + { + name: "Valid containing underscore", + node: &drip.Node{Id: stringPtr("comfy_ui")}, + expectedError: "", + }, + { + name: "Valid ID with hyphen", + node: &drip.Node{Id: stringPtr("valid-node-id")}, + expectedError: "", + }, + { + name: "Valid ID with underscore", + node: &drip.Node{Id: stringPtr("valid_node_id")}, + expectedError: "", + }, + { + name: "Valid ID with dot", + node: &drip.Node{Id: stringPtr("valid.node.id")}, + expectedError: "", + }, + { + name: "Invalid ID with number first", + node: &drip.Node{Id: stringPtr("1invalidnodeid")}, + expectedError: "invalid node id", + }, + { + name: "Invalid ID with consecutive dots", + node: &drip.Node{Id: stringPtr("invalid..nodeid")}, + expectedError: "invalid node id", + }, + { + name: "Invalid ID with special character first", + node: &drip.Node{Id: stringPtr("-invalidnodeid")}, + expectedError: "invalid node id", + }, + { + name: "Valid complex ID", + node: &drip.Node{Id: stringPtr("valid-node.id_1")}, + expectedError: "", + }, + { + name: "Invalid ID with special characters only", + node: &drip.Node{Id: stringPtr("$$$$")}, + expectedError: "invalid node id", + }, + { + name: "Invalid ID with leading dot", + node: &drip.Node{Id: stringPtr(".invalidnodeid")}, + expectedError: "invalid node id", + }, + { + name: "Invalid ID with ending dot", + node: &drip.Node{Id: stringPtr("invalidnodeid.")}, + expectedError: "invalid node id", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := mapper.ValidateNode(tc.node) + if err != nil { + if tc.expectedError == "" { + t.Errorf("expected no error, got %v", err) + } else if err.Error() != tc.expectedError { + t.Errorf("expected error message %q, got %q", tc.expectedError, err.Error()) + } + } else if tc.expectedError != "" { + t.Errorf("expected error %q, got none", tc.expectedError) + } + }) + } +} + +func stringPtr(s string) *string { + return &s +} diff --git a/mapper/node_version.go b/mapper/node_version.go new file mode 100644 index 0000000..6931453 --- /dev/null +++ b/mapper/node_version.go @@ -0,0 +1,80 @@ +package mapper + +import ( + "fmt" + "registry-backend/drip" + "registry-backend/ent" + + "github.com/Masterminds/semver/v3" + "github.com/google/uuid" +) + +func ApiUpdateNodeVersionToUpdateFields(versionId string, updateRequest *drip.NodeVersionUpdateRequest, client *ent.Client) *ent.NodeVersionUpdateOne { + update := client.NodeVersion.UpdateOneID(uuid.MustParse(versionId)) + if updateRequest.Changelog != nil { + update.SetChangelog(*updateRequest.Changelog) + } + if updateRequest.Deprecated != nil { + update.SetDeprecated(*updateRequest.Deprecated) + } + return update +} + +func ValidatePublishNodeVersionRequest(request drip.PublishNodeVersionRequestObject) error { + if request.NodeId != *request.Body.Node.Id { + return fmt.Errorf("node ID in URL and body must be the same") + } + + return nil +} + +func ApiCreateNodeVersionToDb(nodeId string, nodeVersion *drip.NodeVersion, client *ent.Client) *ent.NodeVersionCreate { + create := client.NodeVersion.Create() + if nodeId != "" { + create.SetNodeID(nodeId) + } + if nodeVersion.Version != nil { + create.SetVersion(*nodeVersion.Version) + } + if nodeVersion.Changelog != nil { + create.SetChangelog(*nodeVersion.Changelog) + } + if nodeVersion.Dependencies != nil { + create.SetPipDependencies(*nodeVersion.Dependencies) + } + + return create +} + +func DbNodeVersionToApiNodeVersion(dbNodeVersion *ent.NodeVersion) *drip.NodeVersion { + if dbNodeVersion == nil { + return nil + } + id := dbNodeVersion.ID.String() + + if dbNodeVersion.Edges.StorageFile == nil { + return &drip.NodeVersion{ + Id: &id, + Version: &dbNodeVersion.Version, + Changelog: &dbNodeVersion.Changelog, + Deprecated: &dbNodeVersion.Deprecated, + Dependencies: &dbNodeVersion.PipDependencies, + CreatedAt: &dbNodeVersion.CreateTime, + } + } + + return &drip.NodeVersion{ + Id: &id, + Version: &dbNodeVersion.Version, + Changelog: &dbNodeVersion.Changelog, + DownloadUrl: &dbNodeVersion.Edges.StorageFile.FileURL, + Deprecated: &dbNodeVersion.Deprecated, + Dependencies: &dbNodeVersion.PipDependencies, + CreatedAt: &dbNodeVersion.CreateTime, + } +} + +func CheckValidSemv(version string) bool { + _, err := semver.NewVersion(version) + return err == nil +} diff --git a/mapper/personal_access_token.go b/mapper/personal_access_token.go new file mode 100644 index 0000000..4859902 --- /dev/null +++ b/mapper/personal_access_token.go @@ -0,0 +1,26 @@ +package mapper + +import ( + "registry-backend/drip" + "registry-backend/ent" + "strings" +) + +func DbToApiPersonalAccessToken(dbToken *ent.PersonalAccessToken) *drip.PersonalAccessToken { + maskedToken := maskToken(dbToken.Token) + return &drip.PersonalAccessToken{ + Id: &dbToken.ID, + Name: &dbToken.Name, + CreatedAt: &dbToken.CreateTime, + Description: &dbToken.Description, + Token: &maskedToken, + } +} + +func maskToken(token string) string { + tokenLength := len(token) + if tokenLength <= 8 { + return strings.Repeat("*", tokenLength) + } + return token[:4] + strings.Repeat("*", 3) +} diff --git a/mapper/publisher.go b/mapper/publisher.go new file mode 100644 index 0000000..6a02a00 --- /dev/null +++ b/mapper/publisher.go @@ -0,0 +1,120 @@ +package mapper + +import ( + "fmt" + "regexp" + "registry-backend/drip" + "registry-backend/ent" +) + +func ApiCreatePublisherToDb(publisher *drip.Publisher, client *ent.Client) (*ent.PublisherCreate, error) { + newPublisher := client.Publisher.Create() + if publisher.Description != nil { + newPublisher.SetDescription(*publisher.Description) + } + if publisher.Id != nil { + newPublisher.SetID(*publisher.Id) + } + if publisher.Logo != nil { + newPublisher.SetLogoURL(*publisher.Logo) + } + if publisher.Name != nil { + newPublisher.SetName(*publisher.Name) + } + if publisher.SourceCodeRepo != nil { + newPublisher.SetSourceCodeRepo(*publisher.SourceCodeRepo) + } + if publisher.Support != nil { + newPublisher.SetSupportEmail(*publisher.Support) + } + if publisher.Website != nil { + newPublisher.SetWebsite(*publisher.Website) + } + + return newPublisher, nil +} + +func ApiUpdatePublisherToUpdateFields(publisherId string, publisher *drip.Publisher, client *ent.Client) *ent.PublisherUpdateOne { + update := client.Publisher.UpdateOneID(publisherId) + if publisher.Description != nil { + update.SetDescription(*publisher.Description) + } + if publisher.Logo != nil { + update.SetLogoURL(*publisher.Logo) + } + if publisher.Name != nil { + update.SetName(*publisher.Name) + } + if publisher.SourceCodeRepo != nil { + update.SetSourceCodeRepo(*publisher.SourceCodeRepo) + } + if publisher.Support != nil { + update.SetSupportEmail(*publisher.Support) + } + if publisher.Website != nil { + update.SetWebsite(*publisher.Website) + } + + return update +} + +func ValidatePublisher(publisher *drip.Publisher) error { + if publisher.Id != nil { + if !IsValidPublisherID(*publisher.Id) { + return fmt.Errorf("invalid publisher id") + } + } + return nil +} + +func IsValidPublisherID(publisherID string) bool { + // Regular expression pattern for Publisher ID validation (lowercase letters only) + pattern := "^[a-z][a-z0-9-]*$" + // Compile the regular expression pattern + regex := regexp.MustCompile(pattern) + // Check if the string matches the pattern + return regex.MatchString(publisherID) +} + +func DbPublisherToApiPublisher(publisher *ent.Publisher, public bool) *drip.Publisher { + members := make([]drip.PublisherMember, 0) + + if publisher.Edges.PublisherPermissions != nil { + for _, permission := range publisher.Edges.PublisherPermissions { + if permission.Edges.User != nil { + member := drip.PublisherMember{} + // If the data is not public, include sensitive information. + if !public { + member.User = &drip.PublisherUser{ + Id: ToStringPointer(permission.Edges.User.ID), + Email: ToStringPointer(permission.Edges.User.Email), + Name: ToStringPointer(permission.Edges.User.Name), + } + member.Role = ToStringPointer(string(permission.Permission)) + } else { + member.User = &drip.PublisherUser{ + Name: ToStringPointer(permission.Edges.User.Name), + } + } + + members = append(members, member) + } + } + } + + return &drip.Publisher{ + Description: &publisher.Description, + Id: &publisher.ID, + Logo: &publisher.LogoURL, + Name: &publisher.Name, + SourceCodeRepo: &publisher.SourceCodeRepo, + Support: &publisher.SupportEmail, + Website: &publisher.Website, + CreatedAt: &publisher.CreateTime, + Members: &members, + } +} + +func ToStringPointer(s string) *string { + return &s +} diff --git a/mock/gateways/mock_slack_service.go b/mock/gateways/mock_slack_service.go new file mode 100644 index 0000000..3a52a3f --- /dev/null +++ b/mock/gateways/mock_slack_service.go @@ -0,0 +1,14 @@ +package gateways + +import ( + "github.com/stretchr/testify/mock" +) + +type MockSlackService struct { + mock.Mock +} + +func (m *MockSlackService) SendRegistryMessageToSlack(msg string) error { + args := m.Called(msg) + return args.Error(0) +} diff --git a/mock/gateways/mock_storage_service.go b/mock/gateways/mock_storage_service.go new file mode 100644 index 0000000..f05119c --- /dev/null +++ b/mock/gateways/mock_storage_service.go @@ -0,0 +1,33 @@ +package gateways + +import ( + "context" + "io" + + "github.com/stretchr/testify/mock" +) + +// MockStorageService is a mock of StorageService interface +type MockStorageService struct { + mock.Mock +} + +func (m *MockStorageService) UploadFile(ctx context.Context, bucket, object, filePath string) (string, error) { + args := m.Called(ctx, bucket, object, filePath) + return args.String(0), args.Error(1) +} + +func (m *MockStorageService) StreamFileUpload(w io.Writer, objectName, blob string) (string, string, error) { + args := m.Called(w, objectName, blob) + return args.String(0), args.String(1), args.Error(2) +} + +func (m *MockStorageService) GetFileUrl(ctx context.Context, bucketName, objectPath string) (string, error) { + args := m.Called(ctx, bucketName, objectPath) + return args.String(0), args.Error(1) +} + +func (m *MockStorageService) GenerateSignedURL(bucketName, objectName string) (string, error) { + args := m.Called(bucketName, objectName) + return args.String(0), args.Error(1) +} diff --git a/my_pg_hba.conf b/my_pg_hba.conf new file mode 100644 index 0000000..af53682 --- /dev/null +++ b/my_pg_hba.conf @@ -0,0 +1,2 @@ +# TYPE DATABASE USER ADDRESS METHOD +host all all 0.0.0.0/0 md5 diff --git a/openapi.yml b/openapi.yml new file mode 100644 index 0000000..90327c4 --- /dev/null +++ b/openapi.yml @@ -0,0 +1,1489 @@ +openapi: "3.0.2" +info: + title: Comfy API + version: "1.0" +servers: + - url: https://api.comfy.org +paths: + /users: + get: + summary: Get information about the calling user. + operationId: getUser + security: + - BearerAuth: [ ] + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/User" + + "404": + description: Not Found + "401": + description: Unauthorized + /upload-artifact: + post: + summary: Receive artifacts (output files) from the ComfyUI GitHub Action + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + repo: + type: string + description: Repository name + job_id: + type: string + description: Unique identifier for the job + run_id: + type: string + description: Unique identifier for the run + os: + type: string + description: Operating system used in the run + cuda_version: + type: string + description: Cuda version. + bucket_name: + type: string + description: The name of the bucket where the output files are stored + output_files_gcs_paths: + type: string + description: A comma separated string that contains GCS path(s) to output files. eg. gs://bucket-name/output, gs://bucket-name/output2 + comfy_logs_gcs_path: + type: string + description: The path to ComfyUI logs. eg. gs://bucket-name/logs + commit_hash: + type: string + commit_time: + type: string + description: The time of the commit in the format of "YYYY-MM-DDTHH:MM:SSZ" (2016-10-10T00:00:00Z) + commit_message: + type: string + description: The commit message + workflow_name: + type: string + description: The name of the workflow + branch_name: + type: string + start_time: + type: integer + format: int64 + description: The start time of the job as a Unix timestamp. + end_time: + type: integer + format: int64 + description: The end time of the job as a Unix timestamp. + required: + - repo + - job_id + - run_id + - os + - gpu_type + - commit_hash + - commit_time + - commit_message + - branch_name + - workflow_name + - start_time + - end_time + + responses: + '200': + description: Successfully received the artifact details + content: + application/json: + schema: + type: object + properties: + message: + type: string + '400': + description: Invalid request + '500': + description: Internal server error + /gitcommit: + get: + summary: Retrieve CI data for a given commit + description: Returns all runs, jobs, job results, and storage files associated with a given commit. + parameters: + - in: query + name: commitId + required: false + schema: + type: string + description: The ID of the commit to fetch data for. + - in: query + name: operatingSystem + required: false + schema: + type: string + description: The operating system to filter the CI data by. + - in: query + name: workflowName + required: false + schema: + type: string + description: The name of the workflow to filter the CI data by. + - in: query + name: branch + required: false + schema: + type: string + description: The branch of the gitcommit to filter the CI data by. + - in: query + name: page + required: false + schema: + type: integer + default: 1 + description: The page number to retrieve. + - in: query + name: pageSize + required: false + schema: + type: integer + default: 10 + description: The number of items to include per page. + - in: query + name: repoName + required: false + schema: + type: string + default: comfyanonymous/ComfyUI + description: The repo to filter by. + responses: + '200': + description: An object containing runs, jobs, job results, and storage files + content: + application/json: + schema: + type: object + properties: + jobResults: + type: array + items: + $ref: '#/components/schemas/ActionJobResult' + totalNumberOfPages: + type: integer + '404': + description: Commit not found + '500': + description: Internal server error + /branch: + get: + summary: Retrieve all distinct branches for a given repo + description: Returns all branches for a given repo. + parameters: + - in: query + name: repo_name + required: true + schema: + type: string + default: comfyanonymous/ComfyUI + description: The repo to filter by. + responses: + '200': + description: An array of branches + content: + application/json: + schema: + type: object + properties: + branches: + type: array + items: + type: string + '404': + description: Repo not found + '500': + description: Internal server error + + /users/publishers/: + get: + summary: Retrieve all publishers for a given user + operationId: listPublishersForUser + tags: + - Publishers + responses: + '200': + description: A list of publishers + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Publisher' + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/permissions: + get: + summary: Retrieve permissions the user has for a given publisher + operationId: getPermissionOnPublisher + tags: + - Publishers + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + responses: + '200': + description: A list of permissions + content: + application/json: + schema: + type: object + properties: + canEdit: + type: boolean + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /publishers/validate: + get: + summary: Validate if a publisher username is available + description: Checks if the publisher username is already taken. + operationId: validatePublisher + tags: + - Publishers + parameters: + - in: query + name: username + schema: + type: string + description: The publisher username to validate. + required: true + responses: + '200': + description: Username validation result + content: + application/json: + schema: + type: object + properties: + isAvailable: + type: boolean + description: True if the username is available, false otherwise. + '400': + description: Invalid input, such as missing username in the query. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers: + post: + summary: Create a new publisher + operationId: createPublisher + security: + - BearerAuth: [ ] + tags: + - Publishers + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Publisher' + responses: + '201': + description: Publisher created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Publisher' + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + get: + summary: Retrieve all publishers + operationId: listPublishers + tags: + - Publishers + responses: + '200': + description: A list of publishers + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Publisher' + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}: + get: + summary: Retrieve a publisher by ID + operationId: getPublisher + tags: + - Publishers + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + responses: + '200': + description: Publisher retrieved successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Publisher' + '404': + description: Publisher not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + put: + summary: Update a publisher + operationId: updatePublisher + security: + - BearerAuth: [ ] + tags: + - Publishers + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Publisher' + responses: + '200': + description: Publisher updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Publisher' + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + '404': + description: Publisher not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + delete: + summary: Delete a publisher + operationId: deletePublisher + security: + - BearerAuth: [ ] + tags: + - Publishers + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + responses: + '204': + description: Publisher deleted successfully + '404': + description: Publisher not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/nodes: + post: + summary: Create a new custom node + operationId: createNode + tags: + - Nodes + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + responses: + '201': + description: Node created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + '400': + description: Bad request, invalid input data. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + summary: Retrieve all nodes + operationId: listNodesForPublisher + security: + - BearerAuth: [ ] + tags: + - Nodes + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + responses: + '200': + description: List of all nodes + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Node' + '400': + description: Bad request, invalid input data. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/nodes/{nodeId}: + put: + summary: Update a specific node + operationId: updateNode + tags: + - Nodes + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + responses: + '200': + description: Node updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Node not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + summary: Delete a specific node + operationId: deleteNode + tags: + - Nodes + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + responses: + '204': + description: Node deleted successfully + '404': + description: Node not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + + /publishers/{publisherId}/nodes/{nodeId}/permissions: + get: + summary: Retrieve permissions the user has for a given publisher + operationId: getPermissionOnPublisherNodes + tags: + - Publishers + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + responses: + '200': + description: A list of permissions + content: + application/json: + schema: + type: object + properties: + canEdit: + type: boolean + '400': + description: Bad request, invalid input data + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/nodes/{nodeId}/versions: + post: + summary: Publish a new version of a node + operationId: publishNodeVersion + tags: + - Versions + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + personal_access_token: + type: string + node_version: + $ref: '#/components/schemas/NodeVersion' + node: + $ref: '#/components/schemas/Node' + required: + - node + - node_version + - personal_access_token + responses: + '201': + description: New version published successfully + content: + application/json: + schema: + type: object + properties: + signedUrl: + type: string + description: The signed URL to upload the node version token. + node_version: + $ref: '#/components/schemas/NodeVersion' + '400': + description: Bad request, invalid input data. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/nodes/{nodeId}/versions/{versionId}: + delete: + summary: Unpublish (delete) a specific version of a node + operationId: deleteNodeVersion + tags: + - Versions + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + - in: path + name: versionId + required: true + schema: + type: string + responses: + '204': + description: Version unpublished (deleted) successfully + '404': + description: Version not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + put: + summary: Update changelog and deprecation status of a node version + operationId: updateNodeVersion + description: Update only the changelog and deprecated status of a specific version of a node. + tags: + - Versions + security: + - BearerAuth: [ ] + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: nodeId + required: true + schema: + type: string + - in: path + name: versionId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NodeVersionUpdateRequest' + responses: + '200': + description: Version updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/NodeVersion' + '400': + description: Bad request, invalid input data. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Version not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/tokens: + post: + summary: Create a new personal access token + operationId: createPersonalAccessToken + security: + - BearerAuth: [ ] + tags: + - Token Management + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PersonalAccessToken' + responses: + '201': + description: Token created successfully + content: + application/json: + schema: + type: object + properties: + token: + type: string + description: The newly created personal access token. + '400': + description: Bad request, invalid input data. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + + get: + summary: Retrieve all personal access tokens for a publisher + operationId: listPersonalAccessTokens + security: + - BearerAuth: [ ] + tags: + - Token Management + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + responses: + '200': + description: List of all personal access tokens + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/PersonalAccessToken' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: No tokens found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /publishers/{publisherId}/tokens/{tokenId}: + delete: + summary: Delete a specific personal access token + operationId: deletePersonalAccessToken + security: + - BearerAuth: [ ] + tags: + - Token Management + parameters: + - in: path + name: publisherId + required: true + schema: + type: string + - in: path + name: tokenId + required: true + schema: + type: string + responses: + '204': + description: Token deleted successfully + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Token not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /nodes: + get: + summary: Retrieves a list of nodes + description: Returns a paginated list of nodes across all publishers. + operationId: listAllNodes + tags: + - Nodes + parameters: + - in: query + name: page + description: Page number of the nodes list + required: false + schema: + type: integer + default: 1 + - in: query + name: limit + description: Number of nodes to return per page + required: false + schema: + type: integer + default: 10 + responses: + '200': + description: A paginated list of nodes + content: + application/json: + schema: + type: object + properties: + total: + type: integer + description: Total number of nodes available + nodes: + type: array + items: + $ref: '#/components/schemas/Node' + page: + type: integer + description: Current page number + limit: + type: integer + description: Maximum number of nodes per page + totalPages: + type: integer + description: Total number of pages available + '400': + description: Invalid input, object invalid + '404': + description: Not found + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /nodes/{nodeId}: + get: + summary: Retrieve a specific node by ID + description: Returns the details of a specific node. + operationId: getNode + tags: + - Nodes + parameters: + - in: path + name: nodeId + required: true + schema: + type: string + responses: + '200': + description: Node details + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Node not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /nodes/{nodeId}/install: + get: + summary: Returns a node version to be installed. + description: Retrieves the node data for installation, either the latest or a specific version. + operationId: installNode + tags: + - Nodes + parameters: + - in: path + name: nodeId + required: true + description: The unique identifier of the node. + schema: + type: string + - in: query + name: version + required: false + description: Specific version of the node to retrieve. If omitted, the latest version is returned. + schema: + type: string + pattern: '^\d+\.\d+\.\d+$' + responses: + '200': + description: Node data returned successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/NodeVersion' + '400': + description: Invalid input, such as a bad version format. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Node not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /nodes/{nodeId}/versions: + get: + summary: List all versions of a node + operationId: listNodeVersions + tags: + - Versions + parameters: + - in: path + name: nodeId + required: true + schema: + type: string + responses: + '200': + description: List of all node versions + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NodeVersion' + '404': + description: Node not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /nodes/{nodeId}/versions/{versionId}: + get: + summary: Retrieve a specific version of a node + operationId: getNodeVersion + tags: + - Versions + parameters: + - in: path + name: nodeId + required: true + schema: + type: string + - in: path + name: versionId + description: The version of the node. (Not a UUID). + required: true + schema: + type: string + responses: + '200': + description: Detailed information about a specific node version + content: + application/json: + schema: + $ref: '#/components/schemas/NodeVersion' + '404': + description: Node version not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' +components: + schemas: + PersonalAccessToken: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the GitCommit + name: + type: string + description: Required. The name of the token. Can be a simple description. + description: + type: string + description: Optional. A more detailed description of the token's intended use. + createdAt: + type: string + format: date-time + description: "[Output Only]The date and time the token was created." + token: + type: string + description: "[Output Only]. The personal access token. Only returned during creation." + GitCommit: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the GitCommit + commit_hash: + type: string + description: The hash of the commit + commit_name: + type: string + description: The name of the commit + branch_name: + type: string + description: The branch where the commit was made + author: + type: string + description: The author of the commit + timestamp: + type: string + format: date-time + description: The timestamp when the commit was made + User: + type: object + properties: + id: + type: string + description: The unique id for this user. + email: + type: string + description: The email address for this user. + name: + type: string + description: The name for this user. + isApproved: + type: boolean + description: Indicates if the user is approved. + isAdmin: + type: boolean + description: Indicates if the user has admin privileges. + PublisherUser: + type: object + properties: + id: + type: string + description: The unique id for this user. + email: + type: string + description: The email address for this user. + name: + type: string + description: The name for this user. + ErrorResponse: + type: object + properties: + error: + type: string + message: + type: string + required: + - error + - message + ActionJobResult: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the job result + workflow_name: + type: string + description: Name of the workflow + operating_system: + type: string + description: Operating system used + gpu_type: + type: string + description: GPU type used + pytorch_version: + type: string + description: PyTorch version used + action_run_id: + type: string + description: Identifier of the run this result belongs to + commit_hash: + type: string + description: The hash of the commit + commit_id: + type: string + description: The ID of the commit + commit_time: + type: integer + format: int64 + description: The Unix timestamp when the commit was made + commit_message: + type: string + description: The message of the commit + git_repo: + type: string + description: The repository name + start_time: + type: integer + format: int64 + description: The start time of the job as a Unix timestamp. + end_time: + type: integer + format: int64 + description: The end time of the job as a Unix timestamp. + storage_file: + $ref: "#/components/schemas/StorageFile" + StorageFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the storage file + file_path: + type: string + description: Path to the file in storage + public_url: + type: string + description: Public URL + Publisher: + type: object + properties: + name: + type: string + id: + type: string + description: The unique identifier for the publisher. It's akin to a username. Should be lowercase. + description: + type: string + website: + type: string + support: + type: string + source_code_repo: + type: string + logo: + type: string + description: URL to the publisher's logo. + createdAt: + type: string + format: date-time + description: The date and time the publisher was created. + members: + type: array + items: + $ref: "#/components/schemas/PublisherMember" + description: A list of members in the publisher. + PublisherMember: + type: object + properties: + id: + type: string + description: The unique identifier for the publisher member. + user: + $ref: "#/components/schemas/PublisherUser" + description: The user associated with this publisher member. + role: + type: string + description: The role of the user in the publisher. + Node: + type: object + properties: + id: + type: string + description: "The unique identifier of the node." + name: + type: string + description: The display name of the node. + description: + type: string + author: + type: string + license: + type: string + description: The path to the LICENSE file in the node's repository. + icon: + type: string + description: URL to the node's icon. + repository: + type: string + description: URL to the node's repository. + tags: + type: array + items: + type: string + latest_version: + $ref: "#/components/schemas/NodeVersion" + description: The latest version of the node. + rating: + type: number + description: The average rating of the node. + downloads: + type: integer + description: The number of downloads of the node. + publisher: + $ref: "#/components/schemas/Publisher" + description: The publisher of the node. + NodeVersion: + type: object + properties: + id: + type: string + version: + type: string + description: The version identifier, following semantic versioning. Must be unique for the node. + createdAt: + type: string + format: date-time + description: The date and time the version was created. + changelog: + type: string + description: Summary of changes made in this version + dependencies: + type: array + items: + type: string + description: A list of pip dependencies required by the node. + downloadUrl: + type: string + description: "[Output Only] URL to download this version of the node" + deprecated: + type: boolean + description: Indicates if this version is deprecated. + Error: + type: object + properties: + message: + type: string + description: A clear and concise description of the error. + details: + type: array + items: + type: string + description: Optional detailed information about the error or hints for resolving it. + # ======= Request body Definitions ======================= + NodeVersionUpdateRequest: + type: object + properties: + changelog: + type: string + description: The changelog describing the version changes. + deprecated: + type: boolean + description: Whether the version is deprecated. + securitySchemes: + BearerAuth: + type: http + scheme: bearer + bearerFormat: JWT diff --git a/run-service-prod.yaml b/run-service-prod.yaml new file mode 100644 index 0000000..878ea7c --- /dev/null +++ b/run-service-prod.yaml @@ -0,0 +1,29 @@ +# Prod Cloud Run + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: prod-comfy-backend +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/min-scale: "1" + autoscaling.knative.dev/minScale: '1' + autoscaling.knative.dev/maxScale: '20' + spec: + containers: + - image: registry-backend-image-substitute + env: + - name: DRIP_ENV + value: prod + - name: DB_CONNECTION_STRING + valueFrom: + secretKeyRef: + key: 1 + name: PROD_SUPABASE_CONNECTION_STRING + - name: PROJECT_ID + value: dreamboothy + # TODO(robinhuang): Switch to a list of strings + - name: CORS_ORIGIN + value: https://comfyregistry.org \ No newline at end of file diff --git a/run-service-staging.yaml b/run-service-staging.yaml new file mode 100644 index 0000000..a882731 --- /dev/null +++ b/run-service-staging.yaml @@ -0,0 +1,31 @@ +# Dev Cloud Run + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: staging-comfy-backend +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/min-scale: "1" + autoscaling.knative.dev/minScale: '1' + autoscaling.knative.dev/maxScale: '2' + run.googleapis.com/cpu-throttling: 'false' + run.googleapis.com/startup-cpu-boost: 'false' + spec: + containers: + - image: registry-backend-image-substitute + env: + - name: DRIP_ENV + value: staging + - name: DB_CONNECTION_STRING + valueFrom: + secretKeyRef: + key: 1 + name: STAGING_SUPABASE_CONNECTION_STRING + - name: PROJECT_ID + value: dreamboothy + # TODO(robinhuang): Switch to a list of strings + - name: CORS_ORIGIN + value: https://staging.comfyregistry.org \ No newline at end of file diff --git a/server/handlers/openapi_handler.go b/server/handlers/openapi_handler.go new file mode 100644 index 0000000..76c625a --- /dev/null +++ b/server/handlers/openapi_handler.go @@ -0,0 +1,18 @@ +package handler + +import ( + "net/http" + "registry-backend/drip" + + "github.com/labstack/echo/v4" + // other imports +) + +func SwaggerHandler(c echo.Context) error { + swagger, err := drip.GetSwagger() + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) + } + + return c.JSON(http.StatusOK, swagger) +} diff --git a/server/implementation/api.implementation.go b/server/implementation/api.implementation.go new file mode 100644 index 0000000..bbccb9f --- /dev/null +++ b/server/implementation/api.implementation.go @@ -0,0 +1,25 @@ +package implementation + +import ( + "registry-backend/config" + "registry-backend/ent" + gateway "registry-backend/gateways/slack" + "registry-backend/gateways/storage" + dripservices_comfyci "registry-backend/services/comfy_ci" + dripservices_registry "registry-backend/services/registry" +) + +type DripStrictServerImplementation struct { + Client *ent.Client + ComfyCIService *dripservices_comfyci.ComfyCIService + RegistryService *dripservices_registry.RegistryService +} + +func NewStrictServerImplementation(client *ent.Client, config *config.Config, storageService storage.StorageService, slackService gateway.SlackService) *DripStrictServerImplementation { + + return &DripStrictServerImplementation{ + Client: client, + ComfyCIService: dripservices_comfyci.NewComfyCIService(config), + RegistryService: dripservices_registry.NewRegistryService(storageService, slackService), + } +} diff --git a/server/implementation/cicd.go b/server/implementation/cicd.go new file mode 100644 index 0000000..7e52045 --- /dev/null +++ b/server/implementation/cicd.go @@ -0,0 +1,167 @@ +package implementation + +import ( + "context" + "registry-backend/drip" + "registry-backend/ent" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "strings" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +func (impl *DripStrictServerImplementation) GetGitcommit(ctx context.Context, request drip.GetGitcommitRequestObject) (drip.GetGitcommitResponseObject, error) { + var commitId uuid.UUID = uuid.Nil + if request.Params.CommitId != nil { + log.Ctx(ctx).Info().Msgf("getting commit data for %s", *request.Params.CommitId) + commitId = uuid.MustParse(*request.Params.CommitId) + } + + if request.Params.OperatingSystem != nil { + log.Ctx(ctx).Info().Msgf("getting commit data for %s", *request.Params.OperatingSystem) + } + + repoName := "comfyanonymous/ComfyUI" + if request.Params.RepoName != nil { + repoName = *request.Params.RepoName + } + repoName = strings.ToLower(repoName) + + var operatingSystem string + if request.Params.OperatingSystem != nil { + operatingSystem = *request.Params.OperatingSystem + } else { + operatingSystem = "" // Assign a default value if nil + } + var branchName string + if request.Params.Branch != nil { + branchName = *request.Params.Branch + } else { + branchName = "" // Assign a default value if nil + } + + var workflowName string + if request.Params.WorkflowName != nil { + workflowName = *request.Params.WorkflowName + } else { + workflowName = "" // Assign a default value if nil + } + log.Ctx(ctx).Info().Msgf("Querying database...") + + query := impl.Client.CIWorkflowResult.Query(). + WithGitcommit(). + WithStorageFile() + + query.Where(ciworkflowresult.HasGitcommitWith(gitcommit.RepoNameEQ(repoName))) + query.Order(ciworkflowresult.ByGitcommitField(gitcommit.FieldCommitTimestamp, sql.OrderDesc())) + log.Ctx(ctx).Info().Msgf("Filtering git commit by repo name %s", repoName) + + // Conditionally add the commitId filter + if commitId != uuid.Nil { + log.Ctx(ctx).Info().Msgf("Filtering git commit by commit hash %s", commitId) + query.Where(ciworkflowresult.HasGitcommitWith(gitcommit.IDEQ(commitId))) + } + + if branchName != "" { + log.Ctx(ctx).Info().Msgf("Filtering git commit by branch %s", branchName) + query.Where(ciworkflowresult.HasGitcommitWith(gitcommit.BranchNameEQ(branchName))) + } + + // Continue building the query + if operatingSystem != "" { + log.Ctx(ctx).Info().Msgf("Filtering git commit by OS %s", operatingSystem) + query.Where(ciworkflowresult.OperatingSystemEQ(operatingSystem)) + } + if workflowName != "" { + log.Ctx(ctx).Info().Msgf("Filtering git commit by workflow name %s", workflowName) + query.Where(ciworkflowresult.WorkflowNameEQ(workflowName)) + } + + // Get total number of pages + count, err := query.Count(ctx) + log.Ctx(ctx).Info().Msgf("Got %d runs", count) + if err != nil { + return drip.GetGitcommit500Response{}, err + } + + // Pagination + page := 1 + pageSize := 10 + if request.Params.Page != nil { + page = *request.Params.Page + } + if request.Params.PageSize != nil { + pageSize = *request.Params.PageSize + } + query.Offset((page - 1) * pageSize).Limit(pageSize) + + numberOfPages := (count + pageSize - 1) / pageSize + + // Execute the query + runs, err := query.All(ctx) + if err != nil { + return drip.GetGitcommit500Response{}, err + } + + results := mapRunsToResponse(runs) + return drip.GetGitcommit200JSONResponse{ + JobResults: &results, + TotalNumberOfPages: &numberOfPages, + }, nil +} + +func mapRunsToResponse(results []*ent.CIWorkflowResult) []drip.ActionJobResult { + var jobResultsData []drip.ActionJobResult + + for _, result := range results { + storageFileData := drip.StorageFile{ + PublicUrl: &result.Edges.StorageFile.FileURL, + } + commitId := result.Edges.Gitcommit.ID.String() + commitUnixTime := result.Edges.Gitcommit.CommitTimestamp.Unix() + jobResultData := drip.ActionJobResult{ + WorkflowName: &result.WorkflowName, + OperatingSystem: &result.OperatingSystem, + GpuType: &result.GpuType, + PytorchVersion: &result.PytorchVersion, + StorageFile: &storageFileData, + CommitHash: &result.Edges.Gitcommit.CommitHash, + CommitId: &commitId, + CommitTime: &commitUnixTime, + CommitMessage: &result.Edges.Gitcommit.CommitMessage, + GitRepo: &result.Edges.Gitcommit.RepoName, + ActionRunId: &result.RunID, + StartTime: &result.StartTime, + EndTime: &result.EndTime, + } + jobResultsData = append(jobResultsData, jobResultData) + } + return jobResultsData +} + +func (impl *DripStrictServerImplementation) GetBranch(ctx context.Context, request drip.GetBranchRequestObject) (drip.GetBranchResponseObject, error) { + repoNameFilter := strings.ToLower(request.Params.RepoName) + + branches, err := impl.Client.GitCommit. + Query(). + Where(gitcommit.RepoNameEQ(repoNameFilter)). + GroupBy(gitcommit.FieldBranchName). + Strings(ctx) + if err != nil { + return drip.GetBranch500Response{}, err + } + + return drip.GetBranch200JSONResponse{Branches: &branches}, nil +} + +func (impl *DripStrictServerImplementation) PostUploadArtifact(ctx context.Context, request drip.PostUploadArtifactRequestObject) (drip.PostUploadArtifactResponseObject, error) { + err := impl.ComfyCIService.ProcessCIRequest(ctx, impl.Client, &request) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msg("failed to process CI request") + return drip.PostUploadArtifact500Response{}, err + } + return drip.PostUploadArtifact200JSONResponse{}, nil +} diff --git a/server/implementation/registry.go b/server/implementation/registry.go new file mode 100644 index 0000000..7aa22ff --- /dev/null +++ b/server/implementation/registry.go @@ -0,0 +1,906 @@ +package implementation + +import ( + "context" + "registry-backend/drip" + "registry-backend/ent" + "registry-backend/ent/publisher" + "registry-backend/ent/schema" + "registry-backend/mapper" + drip_services "registry-backend/services/registry" + + "github.com/google/uuid" + "github.com/mixpanel/mixpanel-go" + "github.com/rs/zerolog/log" + "google.golang.org/protobuf/proto" +) + +func (impl *DripStrictServerImplementation) ListPublishersForUser( + ctx context.Context, request drip.ListPublishersForUserRequestObject) (drip.ListPublishersForUserResponseObject, error) { + log.Ctx(ctx).Debug().Msg("ListPublishersForUser called.") + + // Extract user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.ListPublishersForUser400JSONResponse{Message: "Invalid user ID"}, err + } + + // Call the service to list publishers + log.Ctx(ctx).Info().Msgf("Fetching publishers for user %s", userId) + publishers, err := impl.RegistryService.ListPublishers(ctx, impl.Client, &drip_services.PublisherFilter{ + UserID: userId, + }) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to list publishers w/ err: %v", err) + return drip.ListPublishersForUser500JSONResponse{ + Message: "Failed to fetch list of publishers", Error: err.Error()}, err + } + + // Map the publishers to API format + apiPublishers := make([]drip.Publisher, 0, len(publishers)) + log.Ctx(ctx).Info().Msgf( + "Successfully fetched publishers for user %s, count %d", userId, len(apiPublishers)) + for _, dbPublisher := range publishers { + apiPublishers = append(apiPublishers, *mapper.DbPublisherToApiPublisher(dbPublisher, true)) + } + + return drip.ListPublishersForUser200JSONResponse(apiPublishers), nil +} + +func (s *DripStrictServerImplementation) ValidatePublisher( + ctx context.Context, request drip.ValidatePublisherRequestObject) (drip.ValidatePublisherResponseObject, error) { + // Log the incoming request for validation + log.Ctx(ctx).Info().Msgf("ValidatePublisher request with username: %s", request.Params.Username) + + // Check if the username is empty + name := request.Params.Username + if name == "" { + log.Ctx(ctx).Error().Msg("Username parameter is missing") + return drip.ValidatePublisher400JSONResponse{Message: "Username parameter is required"}, nil + } + + isValid := mapper.IsValidPublisherID(name) + if !isValid { + return drip.ValidatePublisher400JSONResponse{ + Message: "Must start with a lowercase letter and can only contain lowercase letters, digits, and hyphens.", + }, nil + } + + // Note: username = id field in publisher table, display = name field in publisher table + count, err := s.Client.Publisher.Query().Where(publisher.ID(name)).Count(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to query username %s w/ err: %v", name, err) + return drip.ValidatePublisher500JSONResponse{Message: "Failed to query username", Error: err.Error()}, err + } + + // Log the result of the count query + log.Ctx(ctx).Info().Msgf("Count for username %s: %d", name, count) + if count > 0 { + return drip.ValidatePublisher400JSONResponse{ + Message: "Publisher ID already exists.", + }, nil + } + + return drip.ValidatePublisher200JSONResponse{ + IsAvailable: proto.Bool(true), + }, nil +} + +func (s *DripStrictServerImplementation) CreatePublisher( + ctx context.Context, request drip.CreatePublisherRequestObject) (drip.CreatePublisherResponseObject, error) { + // Log the incoming request + log.Ctx(ctx).Info().Msgf("CreatePublisher request called") + + // Extract user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.CreatePublisher400JSONResponse{Message: "Invalid user ID"}, err + } + + log.Ctx(ctx).Info().Msgf("Checking if user ID %s has reached the maximum number of publishers", userId) + userPublishers, err := s.RegistryService.ListPublishers( + ctx, s.Client, &drip_services.PublisherFilter{UserID: userId}) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to list publishers for user ID %s w/ err: %v", userId, err) + return drip.CreatePublisher500JSONResponse{Message: "Failed to list publishers", Error: err.Error()}, err + } + if len(userPublishers) >= 5 { + log.Ctx(ctx).Info().Msgf("User ID %s has reached the maximum number of publishers", userId) + return drip.CreatePublisher403JSONResponse{ + Message: "User has reached the maximum number of publishers.", + }, nil + } + + // Create a new publisher + log.Ctx(ctx).Info().Msgf("Creating publisher for user ID %s", userId) + publisher, err := s.RegistryService.CreatePublisher(ctx, s.Client, userId, request.Body) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to create publisher for user ID %s w/ err: %v", userId, err) + if ent.IsConstraintError(err) { + return drip.CreatePublisher400JSONResponse{Message: "Constraint error", Error: err.Error()}, nil + } + + return drip.CreatePublisher500JSONResponse{Message: "Internal server error", Error: err.Error()}, err + } + + // Log the successful creation + log.Ctx(ctx).Info().Msgf("Publisher created successfully for user ID: %s", userId) + return drip.CreatePublisher201JSONResponse(*mapper.DbPublisherToApiPublisher(publisher, true)), nil +} + +func (s *DripStrictServerImplementation) ListPublishers( + ctx context.Context, request drip.ListPublishersRequestObject) (drip.ListPublishersResponseObject, error) { + pubs, err := s.RegistryService.ListPublishers(ctx, s.Client, nil) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to retrieve list of publishers w/ err: %v", err) + return drip.ListPublishers500JSONResponse{Message: "Failed to get publisher", Error: err.Error()}, err + } + + res := drip.ListPublishers200JSONResponse{} + for _, pub := range pubs { + res = append(res, *mapper.DbPublisherToApiPublisher(pub, false)) + } + + log.Ctx(ctx).Info().Msgf("List of Publishers retrieved successfully") + return res, nil +} + +func (s *DripStrictServerImplementation) DeletePublisher( + ctx context.Context, request drip.DeletePublisherRequestObject) (drip.DeletePublisherResponseObject, error) { + err := s.RegistryService.DeletePublisher(ctx, s.Client, request.PublisherId) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to delete publisher with ID %s w/ err: %v", request.PublisherId, err) + return drip.DeletePublisher500JSONResponse{}, nil + } + + log.Ctx(ctx).Info().Msgf("Publisher with ID %s deleted successfully", request.PublisherId) + return drip.DeletePublisher204Response{}, nil +} + +func (s *DripStrictServerImplementation) GetPublisher( + ctx context.Context, request drip.GetPublisherRequestObject) (drip.GetPublisherResponseObject, error) { + publisherId := request.PublisherId + log.Ctx(ctx).Info().Msgf("GetPublisher request received for publisher ID: %s", publisherId) + + publisher, err := s.RegistryService.GetPublisher(ctx, s.Client, publisherId) + if ent.IsNotFound(err) { + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", publisherId) + return drip.GetPublisher404JSONResponse{Message: "Publisher not found"}, nil + } + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to retrieve publisher with ID %s w/ err: %v", publisherId, err) + return drip.GetPublisher500JSONResponse{Message: "Failed to get publisher", Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Publisher with ID %s retrieved successfully", publisherId) + return drip.GetPublisher200JSONResponse(*mapper.DbPublisherToApiPublisher(publisher, false)), nil +} + +func (s *DripStrictServerImplementation) UpdatePublisher( + ctx context.Context, request drip.UpdatePublisherRequestObject) (drip.UpdatePublisherResponseObject, error) { + log.Ctx(ctx).Info().Msgf("UpdatePublisher called with publisher ID: %s", request.PublisherId) + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.UpdatePublisher400JSONResponse{Message: "Invalid user ID"}, err + } + + log.Ctx(ctx).Info().Msgf("Checking if user ID %s has permission to update publisher ID %s", userId, request.PublisherId) + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.UpdatePublisher404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf("Permission denied for user ID %s on "+ + "publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.UpdatePublisher401Response{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf( + "Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.UpdatePublisher500JSONResponse{ + Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Updating publisher with ID %s", request.PublisherId) + updateOne := mapper.ApiUpdatePublisherToUpdateFields(request.PublisherId, request.Body, s.Client) + updatedPublisher, err := s.RegistryService.UpdatePublisher(ctx, s.Client, updateOne) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to update publisher with ID %s w/ err: %v", request.PublisherId, err) + return drip.UpdatePublisher500JSONResponse{Message: "Failed to update publisher", Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Publisher with ID %s updated successfully", request.PublisherId) + return drip.UpdatePublisher200JSONResponse(*mapper.DbPublisherToApiPublisher(updatedPublisher, true)), nil +} + +func (s *DripStrictServerImplementation) CreateNode( + ctx context.Context, request drip.CreateNodeRequestObject) (drip.CreateNodeResponseObject, error) { + log.Ctx(ctx).Info().Msgf("CreateNode called with publisher ID: %s", request.PublisherId) + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.CreateNode400JSONResponse{Message: "Invalid user ID"}, err + } + + log.Ctx(ctx).Info().Msgf( + "Checking if user ID %s has permission to create node for publisher ID %s", userId, request.PublisherId) + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.CreateNode400JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.CreateNode401Response{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf( + "Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.CreateNode500JSONResponse{ + Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + node, err := s.RegistryService.CreateNode(ctx, s.Client, request.PublisherId, request.Body) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to create node for publisher ID %s w/ err: %v", request.PublisherId, err) + return drip.CreateNode500JSONResponse{Message: "Failed to create node", Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Node created successfully for publisher ID: %s", request.PublisherId) + return drip.CreateNode201JSONResponse(*mapper.DbNodeToApiNode(node)), nil +} + +func (s *DripStrictServerImplementation) ListNodesForPublisher( + ctx context.Context, request drip.ListNodesForPublisherRequestObject) (drip.ListNodesForPublisherResponseObject, error) { + log.Ctx(ctx).Info().Msgf("ListNodesForPublisher request received for publisher ID: %s", request.PublisherId) + + nodeResults, err := s.RegistryService.ListNodes( + ctx, s.Client /*page=*/, 1 /*limit=*/, 10, &drip_services.NodeFilter{ + PublisherID: request.PublisherId, + }) + if err != nil { + log.Ctx(ctx).Error().Msgf( + "Failed to list nodes for publisher ID %s w/ err: %v", request.PublisherId, err) + return drip.ListNodesForPublisher500JSONResponse{Message: "Failed to list nodes", Error: err.Error()}, err + } + + if len(nodeResults.Nodes) == 0 { + log.Ctx(ctx).Info().Msgf("No nodes found for publisher ID: %s", request.PublisherId) + return drip.ListNodesForPublisher200JSONResponse([]drip.Node{}), nil + } + + apiNodes := make([]drip.Node, 0, len(nodeResults.Nodes)) + for _, dbNode := range nodeResults.Nodes { + apiNodes = append(apiNodes, *mapper.DbNodeToApiNode(dbNode)) + } + + log.Ctx(ctx).Info().Msgf( + "Found %d nodes for publisher ID: %s", len(apiNodes), request.PublisherId) + return drip.ListNodesForPublisher200JSONResponse(apiNodes), nil +} + +func (s *DripStrictServerImplementation) ListAllNodes( + ctx context.Context, request drip.ListAllNodesRequestObject) (drip.ListAllNodesResponseObject, error) { + log.Ctx(ctx).Info().Msg("ListAllNodes request received") + + // Set default values for pagination parameters + page := 1 + if request.Params.Page != nil { + page = *request.Params.Page + } + limit := 10 + if request.Params.Limit != nil { + limit = *request.Params.Limit + } + + // List nodes from the registry service + nodeResults, err := s.RegistryService.ListNodes(ctx, s.Client, page, limit, &drip_services.NodeFilter{}) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to list nodes w/ err: %v", err) + return drip.ListAllNodes500JSONResponse{Message: "Failed to list nodes", Error: err.Error()}, err + } + + if len(nodeResults.Nodes) == 0 { + log.Ctx(ctx).Info().Msg("No nodes found") + return drip.ListAllNodes200JSONResponse{ + Nodes: &[]drip.Node{}, + Total: &nodeResults.Total, + Page: &nodeResults.Page, + Limit: &nodeResults.Limit, + TotalPages: &nodeResults.TotalPages, + }, nil + } + + apiNodes := make([]drip.Node, 0, len(nodeResults.Nodes)) + for _, dbNode := range nodeResults.Nodes { + apiNode := mapper.DbNodeToApiNode(dbNode) + if dbNode.Edges.Versions != nil && len(dbNode.Edges.Versions) > 0 { + latestVersion := dbNode.Edges.Versions[0] + apiNode.LatestVersion = mapper.DbNodeVersionToApiNodeVersion(latestVersion) + } + apiNode.Publisher = mapper.DbPublisherToApiPublisher(dbNode.Edges.Publisher, false) + apiNodes = append(apiNodes, *apiNode) + } + + log.Ctx(ctx).Info().Msgf("Found %d nodes", len(apiNodes)) + return drip.ListAllNodes200JSONResponse{ + Nodes: &apiNodes, + Total: &nodeResults.Total, + Page: &nodeResults.Page, + Limit: &nodeResults.Limit, + TotalPages: &nodeResults.TotalPages, + }, nil +} + +func (s *DripStrictServerImplementation) DeleteNode( + ctx context.Context, request drip.DeleteNodeRequestObject) (drip.DeleteNodeResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("DeleteNode request received for node ID: %s", request.NodeId) + + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.DeleteNode404JSONResponse{Message: "Invalid user ID"}, err + } + + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.DeleteNode404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.DeleteNode403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.DeleteNode500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + err = s.RegistryService.AssertNodeBelongsToPublisher(ctx, s.Client, request.PublisherId, request.NodeId) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.DeleteNode404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.DeleteNode403JSONResponse{}, err + + case err != nil: + return drip.DeleteNode500JSONResponse{Message: "Failed to assert publisher permission"}, err + } + + err = s.RegistryService.DeleteNode(ctx, s.Client, request.NodeId) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to delete node %s w/ err: %v", request.NodeId, err) + return drip.DeleteNode500JSONResponse{Message: "Internal server error"}, err + } + + log.Ctx(ctx).Info().Msgf("Node %s deleted successfully", request.NodeId) + return drip.DeleteNode204Response{}, nil +} + +func (s *DripStrictServerImplementation) GetNode( + ctx context.Context, request drip.GetNodeRequestObject) (drip.GetNodeResponseObject, error) { + log.Ctx(ctx).Info().Msgf("GetNode request received for node ID: %s", request.NodeId) + + node, err := s.RegistryService.GetNode(ctx, s.Client, request.NodeId) + if ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Failed to get node %s w/ err: %v", request.NodeId, err) + return drip.GetNode404JSONResponse{Message: "Node not found"}, nil + } + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get node %s w/ err: %v", request.NodeId, err) + return drip.GetNode500JSONResponse{Message: "Failed to get node"}, err + } + + nodeVersion, err := s.RegistryService.GetLatestNodeVersion(ctx, s.Client, request.NodeId) + if err != nil { + log.Ctx(ctx).Error().Msgf( + "Failed to get latest node version for node %s w/ err: %v", request.NodeId, err) + return drip.GetNode500JSONResponse{Message: "Failed to get latest node version", Error: err.Error()}, err + } + + apiNode := mapper.DbNodeToApiNode(node) + apiNode.LatestVersion = mapper.DbNodeVersionToApiNodeVersion(nodeVersion) + + log.Ctx(ctx).Info().Msgf("Node %s retrieved successfully", request.NodeId) + return drip.GetNode200JSONResponse(*apiNode), nil +} + +func (s *DripStrictServerImplementation) UpdateNode( + ctx context.Context, request drip.UpdateNodeRequestObject) (drip.UpdateNodeResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("UpdateNode request received for node ID: %s", request.NodeId) + + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.UpdateNode404JSONResponse{Message: "Invalid user ID"}, err + } + + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.UpdateNode404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.UpdateNode403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.UpdateNode500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + err = s.RegistryService.AssertNodeBelongsToPublisher(ctx, s.Client, request.PublisherId, request.NodeId) + if ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Node %s not found w/ err: %v", request.NodeId, err) + return drip.UpdateNode404JSONResponse{Message: "Not Found"}, nil + } else if err != nil { + log.Ctx(ctx).Error().Msgf("Node %s does not belong to publisher "+ + "%s w/ err: %v", request.NodeId, request.PublisherId, err) + return drip.UpdateNode403JSONResponse{Message: "Forbidden"}, err + } + + updateOne := mapper.ApiUpdateNodeToUpdateFields(request.NodeId, request.Body, s.Client) + updatedNode, err := s.RegistryService.UpdateNode(ctx, s.Client, updateOne) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to update node %s w/ err: %v", request.NodeId, err) + return drip.UpdateNode500JSONResponse{Message: "Failed to update node", Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Node %s updated successfully", request.NodeId) + return drip.UpdateNode200JSONResponse(*mapper.DbNodeToApiNode(updatedNode)), nil +} + +func (s *DripStrictServerImplementation) ListNodeVersions( + ctx context.Context, request drip.ListNodeVersionsRequestObject) (drip.ListNodeVersionsResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("ListNodeVersions request received for node ID: %s", request.NodeId) + + nodeVersions, err := s.RegistryService.ListNodeVersions(ctx, s.Client, request.NodeId) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to list node versions for node %s w/ err: %v", request.NodeId, err) + return drip.ListNodeVersions500JSONResponse{Message: "Failed to list node versions", Error: err.Error()}, err + } + + apiNodeVersions := make([]drip.NodeVersion, 0, len(nodeVersions)) + for _, dbNodeVersion := range nodeVersions { + apiNodeVersions = append(apiNodeVersions, *mapper.DbNodeVersionToApiNodeVersion(dbNodeVersion)) + } + + log.Ctx(ctx).Info().Msgf("Found %d versions for node %s", len(apiNodeVersions), request.NodeId) + return drip.ListNodeVersions200JSONResponse(apiNodeVersions), nil +} + +func (s *DripStrictServerImplementation) PublishNodeVersion( + ctx context.Context, request drip.PublishNodeVersionRequestObject) (drip.PublishNodeVersionResponseObject, error) { + log.Ctx(ctx).Info().Msgf("PublishNodeVersion request received for node ID: %s", request.NodeId) + + tokenValid, err := s.RegistryService.IsPersonalAccessTokenValidForPublisher( + ctx, s.Client, request.PublisherId, request.Body.PersonalAccessToken) + if err != nil { + log.Ctx(ctx).Error().Msgf("Token validation failed w/ err: %v", err) + return drip.PublishNodeVersion400JSONResponse{Message: "Failed to validate token", Error: err.Error()}, nil + } + if !tokenValid { + errMessage := "Invalid personal access token" + log.Ctx(ctx).Error().Msg(errMessage) + return drip.PublishNodeVersion400JSONResponse{Message: errMessage}, nil + } + + // Check if node exists, create if not + node, err := s.RegistryService.GetNode(ctx, s.Client, request.NodeId) + if err != nil && !ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Failed to get node w/ err: %v", err) + // TODO(James): create a new error code for this. + return drip.PublishNodeVersion500JSONResponse{}, err + } else if err != nil { + node, err = s.RegistryService.CreateNode(ctx, s.Client, request.PublisherId, &request.Body.Node) + if err != nil { + log.Ctx(ctx).Error().Msgf("Node creation failed w/ err: %v", err) + return drip.PublishNodeVersion500JSONResponse{Message: "Failed to create node", Error: err.Error()}, nil + } + + log.Ctx(ctx).Info().Msgf("Node %s created successfully", node.ID) + } else { + // TODO(james): distinguish between not found vs. nodes that belong to other publishers + // If node already exists, validate ownership + err = s.RegistryService.AssertNodeBelongsToPublisher(ctx, s.Client, request.PublisherId, node.ID) + if err != nil { + errMessage := "Node does not belong to Publisher." + log.Ctx(ctx).Error().Msgf("Node ownership validation failed w/ err: %v", err) + return drip.PublishNodeVersion403JSONResponse{Message: errMessage}, err + } + updateOne := mapper.ApiUpdateNodeToUpdateFields(node.ID, &request.Body.Node, s.Client) + _, err = s.RegistryService.UpdateNode(ctx, s.Client, updateOne) + if err != nil { + errMessage := "Failed to update node: " + err.Error() + log.Ctx(ctx).Error().Msgf("Node update failed w/ err: %v", err) + return drip.PublishNodeVersion400JSONResponse{Message: errMessage}, err + } + log.Ctx(ctx).Info().Msgf("Node %s updated successfully", node.ID) + } + + // Create node version + nodeVersionCreation, err := s.RegistryService.CreateNodeVersion(ctx, s.Client, request.PublisherId, node.ID, &request.Body.NodeVersion) + if err != nil { + errMessage := "Failed to create node version: " + err.Error() + log.Ctx(ctx).Error().Msgf("Node version creation failed w/ err: %v", err) + return drip.PublishNodeVersion400JSONResponse{Message: errMessage}, err + } + + apiNodeVersion := mapper.DbNodeVersionToApiNodeVersion(nodeVersionCreation.NodeVersion) + log.Ctx(ctx).Info().Msgf("Node version %s published successfully", nodeVersionCreation.NodeVersion.ID) + return drip.PublishNodeVersion201JSONResponse{ + NodeVersion: apiNodeVersion, + SignedUrl: &nodeVersionCreation.SignedUrl, + }, nil +} + +func (s *DripStrictServerImplementation) UpdateNodeVersion( + ctx context.Context, request drip.UpdateNodeVersionRequestObject) (drip.UpdateNodeVersionResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("UpdateNodeVersion request received for node ID: "+ + "%s, version ID: %s", request.NodeId, request.VersionId) + + // Retrieve user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.UpdateNodeVersion404JSONResponse{Message: "Invalid user ID"}, err + } + + // Assert publisher permissions + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.UpdateNodeVersion404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.UpdateNodeVersion403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.UpdateNodeVersion500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + // Assert node belongs to publisher + err = s.RegistryService.AssertNodeBelongsToPublisher(ctx, s.Client, request.PublisherId, request.NodeId) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.UpdateNodeVersion404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + errMessage := "Node does not belong to Publisher." + log.Ctx(ctx).Error().Msgf("Node ownership validation failed w/ err: %v", err) + return drip.UpdateNodeVersion404JSONResponse{Message: errMessage}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.UpdateNodeVersion500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + // Update node version + updateOne := mapper.ApiUpdateNodeVersionToUpdateFields(request.VersionId, request.Body, s.Client) + version, err := s.RegistryService.UpdateNodeVersion(ctx, s.Client, updateOne) + if err != nil { + errMessage := "Failed to update node version" + log.Ctx(ctx).Error().Msgf("Node version update failed w/ err: %v", err) + return drip.UpdateNodeVersion500JSONResponse{Message: errMessage, Error: err.Error()}, err + } + + log.Ctx(ctx).Info().Msgf("Node version %s updated successfully", request.VersionId) + return drip.UpdateNodeVersion200JSONResponse{ + Changelog: &version.Changelog, + Deprecated: &version.Deprecated, + }, nil +} + +func (s *DripStrictServerImplementation) DeleteNodeVersion( + ctx context.Context, request drip.DeleteNodeVersionRequestObject) (drip.DeleteNodeVersionResponseObject, error) { + log.Ctx(ctx).Info().Msgf("DeleteNodeVersion request received for node ID: "+ + "%s, version ID: %s", request.NodeId, request.VersionId) + + // Directly return the message that node versions cannot be deleted + errMessage := "Cannot delete node versions. Please deprecate it instead." + log.Ctx(ctx).Warn().Msg(errMessage) + return drip.DeleteNodeVersion404JSONResponse{ + Message: proto.String(errMessage), + }, nil +} + +func (s *DripStrictServerImplementation) GetNodeVersion( + ctx context.Context, request drip.GetNodeVersionRequestObject) (drip.GetNodeVersionResponseObject, error) { + log.Ctx(ctx).Info().Msgf("GetNodeVersion request received for "+ + "node ID: %s, version ID: %s", request.NodeId, request.VersionId) + + nodeVersion, err := s.RegistryService.GetNodeVersion(ctx, s.Client, request.NodeId, request.VersionId) + if ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Error retrieving node version w/ err: %v", err) + return drip.GetNodeVersion404JSONResponse{}, nil + } + if err != nil { + errMessage := "Failed to get node version" + log.Ctx(ctx).Error().Msgf("Error retrieving node version w/ err: %v", err) + return drip.GetNodeVersion500JSONResponse{ + Message: errMessage, + Error: err.Error(), + }, err + } + + apiNodeVersion := mapper.DbNodeVersionToApiNodeVersion(nodeVersion) + log.Ctx(ctx).Info().Msgf("Node version %s retrieved successfully", request.VersionId) + return drip.GetNodeVersion200JSONResponse(*apiNodeVersion), nil +} + +func (s *DripStrictServerImplementation) ListPersonalAccessTokens( + ctx context.Context, request drip.ListPersonalAccessTokensRequestObject) (drip.ListPersonalAccessTokensResponseObject, error) { + log.Ctx(ctx).Info().Msgf("ListPersonalAccessTokens request received for publisher ID: %s", request.PublisherId) + + // Retrieve user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.ListPersonalAccessTokens404JSONResponse{Message: "Invalid user ID"}, err + } + + // Assert publisher permissions + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + if err != nil { + errMessage := "User does not have the necessary permissions: " + err.Error() + log.Ctx(ctx).Error().Msgf("Permission assertion failed w/ err: %v", err) + return drip.ListPersonalAccessTokens403JSONResponse{Message: errMessage}, err + } + + // List personal access tokens + personalAccessTokens, err := s.RegistryService.ListPersonalAccessTokens(ctx, s.Client, request.PublisherId) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to list personal access tokens w/ err: %v", err) + errMessage := "Failed to list personal access tokens." + return drip.ListPersonalAccessTokens500JSONResponse{Message: errMessage, Error: err.Error()}, err + } + + apiTokens := make([]drip.PersonalAccessToken, 0, len(personalAccessTokens)) + for _, dbToken := range personalAccessTokens { + apiTokens = append(apiTokens, *mapper.DbToApiPersonalAccessToken(dbToken)) + } + + log.Ctx(ctx).Info().Msgf("Listed %d personal access tokens for "+ + "publisher ID: %s", len(apiTokens), request.PublisherId) + return drip.ListPersonalAccessTokens200JSONResponse(apiTokens), nil +} + +func (s *DripStrictServerImplementation) CreatePersonalAccessToken( + ctx context.Context, request drip.CreatePersonalAccessTokenRequestObject) (drip.CreatePersonalAccessTokenResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("CreatePersonalAccessToken request received "+ + "for publisher ID: %s", request.PublisherId) + + // Retrieve user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.CreatePersonalAccessToken400JSONResponse{Message: "Invalid user ID"}, err + } + + // Assert publisher permissions + err = s.RegistryService.AssertPublisherPermissions(ctx, s.Client, request.PublisherId, + userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.CreatePersonalAccessToken400JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.CreatePersonalAccessToken403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.CreatePersonalAccessToken500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + // Create personal access token + description := "" + if request.Body.Description != nil { + description = *request.Body.Description + } + + personalAccessToken, err := s.RegistryService.CreatePersonalAccessToken( + ctx, s.Client, request.PublisherId, *request.Body.Name, description) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to create personal access token w/ err: %v", err) + errMessage := "Failed to create personal access token: " + err.Error() + return drip.CreatePersonalAccessToken500JSONResponse{Message: errMessage}, err + } + + log.Ctx(ctx).Info().Msgf("Personal access token created "+ + "successfully for publisher ID: %s", request.PublisherId) + return drip.CreatePersonalAccessToken201JSONResponse{ + Token: &personalAccessToken.Token, + }, nil +} + +func (s *DripStrictServerImplementation) DeletePersonalAccessToken( + ctx context.Context, request drip.DeletePersonalAccessTokenRequestObject) (drip.DeletePersonalAccessTokenResponseObject, error) { + + log.Ctx(ctx).Info().Msgf("DeletePersonalAccessToken request received for token ID: %s", request.TokenId) + + // Retrieve user ID from context + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.DeletePersonalAccessToken404JSONResponse{Message: "Invalid user ID"}, err + } + + // Assert publisher permissions + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.DeletePersonalAccessToken404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.DeletePersonalAccessToken403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.DeletePersonalAccessToken500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + // Assert access token belongs to publisher + err = s.RegistryService.AssertAccessTokenBelongsToPublisher(ctx, s.Client, request.PublisherId, uuid.MustParse(request.TokenId)) + switch { + case ent.IsNotFound(err): + log.Ctx(ctx).Info().Msgf("Publisher with ID %s not found", request.PublisherId) + return drip.DeletePersonalAccessToken404JSONResponse{Message: "Publisher not found"}, nil + + case drip_services.IsPermissionError(err): + log.Ctx(ctx).Error().Msgf( + "Permission denied for user ID %s on publisher ID %s w/ err: %v", userId, request.PublisherId, err) + return drip.DeletePersonalAccessToken403JSONResponse{}, err + + case err != nil: + log.Ctx(ctx).Error().Msgf("Failed to assert publisher permission %s w/ err: %v", request.PublisherId, err) + return drip.DeletePersonalAccessToken500JSONResponse{Message: "Failed to assert publisher permission", Error: err.Error()}, err + } + + // Delete personal access token + err = s.RegistryService.DeletePersonalAccessToken(ctx, s.Client, uuid.MustParse(request.TokenId)) + if err != nil { + errMessage := "Failed to delete personal access token: " + err.Error() + log.Ctx(ctx).Error().Msgf("Token deletion failed w/ err: %v", err) + return drip.DeletePersonalAccessToken500JSONResponse{Message: errMessage}, err + } + + log.Ctx(ctx).Info().Msgf("Personal access token %s deleted successfully", request.TokenId) + return drip.DeletePersonalAccessToken204Response{}, nil +} + +func (s *DripStrictServerImplementation) InstallNode( + ctx context.Context, request drip.InstallNodeRequestObject) (drip.InstallNodeResponseObject, error) { + // TODO(robinhuang): Refactor to separate class + mp := mixpanel.NewApiClient("f919d1b9da9a57482453c72ef7b16d88") + log.Ctx(ctx).Info().Msgf("InstallNode request received for node ID: %s", request.NodeId) + + // Get node + _, err := s.RegistryService.GetNode(ctx, s.Client, request.NodeId) + if ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Error retrieving node w/ err: %v", err) + return drip.InstallNode404JSONResponse{Message: "Node not found"}, nil + } + if err != nil { + log.Ctx(ctx).Error().Msgf("Error retrieving node w/ err: %v", err) + return drip.InstallNode500JSONResponse{Message: "Failed to get node"}, err + } + + // Install node version + if request.Params.Version == nil { + nodeVersion, err := s.RegistryService.GetLatestNodeVersion(ctx, s.Client, request.NodeId) + if err == nil && nodeVersion == nil { + log.Ctx(ctx).Error().Msgf("Latest node version not found") + return drip.InstallNode404JSONResponse{Message: "Not found"}, nil + } + if err != nil { + errMessage := "Failed to get latest node version: " + err.Error() + log.Ctx(ctx).Error().Msgf("Error retrieving latest node version w/ err: %v", err) + return drip.InstallNode500JSONResponse{Message: errMessage}, err + } + mp.Track(ctx, []*mixpanel.Event{ + mp.NewEvent("Install Node Latest", "", map[string]any{ + "Node ID": request.NodeId, + "Version": nodeVersion.Version, + }), + }) + return drip.InstallNode200JSONResponse( + *mapper.DbNodeVersionToApiNodeVersion(nodeVersion), + ), nil + } else { + nodeVersion, err := s.RegistryService.GetNodeVersion(ctx, s.Client, request.NodeId, *request.Params.Version) + if ent.IsNotFound(err) { + log.Ctx(ctx).Error().Msgf("Error retrieving node version w/ err: %v", err) + return drip.InstallNode404JSONResponse{Message: "Not found"}, nil + } + if err != nil { + errMessage := "Failed to get specified node version: " + err.Error() + log.Ctx(ctx).Error().Msgf("Error retrieving node version w/ err: %v", err) + return drip.InstallNode500JSONResponse{Message: errMessage}, err + } + mp.Track(ctx, []*mixpanel.Event{ + mp.NewEvent("Install Node", "", map[string]any{ + "Node ID": request.NodeId, + "Version": nodeVersion.Version, + }), + }) + return drip.InstallNode200JSONResponse( + *mapper.DbNodeVersionToApiNodeVersion(nodeVersion), + ), nil + } +} + +func (s *DripStrictServerImplementation) GetPermissionOnPublisherNodes( + ctx context.Context, request drip.GetPermissionOnPublisherNodesRequestObject) (drip.GetPermissionOnPublisherNodesResponseObject, error) { + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + return drip.GetPermissionOnPublisherNodes200JSONResponse{CanEdit: proto.Bool(false)}, nil + } + + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + if err != nil { + return drip.GetPermissionOnPublisherNodes200JSONResponse{CanEdit: proto.Bool(false)}, nil + } + + err = s.RegistryService.AssertNodeBelongsToPublisher(ctx, s.Client, request.PublisherId, request.NodeId) + if err != nil { + return drip.GetPermissionOnPublisherNodes200JSONResponse{CanEdit: proto.Bool(false)}, nil + } + + return drip.GetPermissionOnPublisherNodes200JSONResponse{CanEdit: proto.Bool(true)}, nil +} + +func (s *DripStrictServerImplementation) GetPermissionOnPublisher( + ctx context.Context, request drip.GetPermissionOnPublisherRequestObject) (drip.GetPermissionOnPublisherResponseObject, error) { + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Msgf("Failed to get user ID from context w/ err: %v", err) + return drip.GetPermissionOnPublisher200JSONResponse{CanEdit: proto.Bool(false)}, err + } + + err = s.RegistryService.AssertPublisherPermissions( + ctx, s.Client, request.PublisherId, userId, []schema.PublisherPermissionType{schema.PublisherPermissionTypeOwner}) + if err != nil { + return drip.GetPermissionOnPublisher200JSONResponse{CanEdit: proto.Bool(false)}, nil + } + + return drip.GetPermissionOnPublisher200JSONResponse{CanEdit: proto.Bool(true)}, nil +} diff --git a/server/implementation/user.go b/server/implementation/user.go new file mode 100644 index 0000000..16e2ada --- /dev/null +++ b/server/implementation/user.go @@ -0,0 +1,32 @@ +package implementation + +import ( + "context" + "registry-backend/drip" + "registry-backend/ent/user" + "registry-backend/mapper" + + "github.com/rs/zerolog/log" +) + +func (impl *DripStrictServerImplementation) GetUser(ctx context.Context, request drip.GetUserRequestObject) (drip.GetUserResponseObject, error) { + userId, err := mapper.GetUserIDFromContext(ctx) + if err != nil { + log.Ctx(ctx).Error().Stack().Err(err).Msg("") + return drip.GetUser401Response{}, err + } + + user, err := impl.Client.User.Query().Where(user.IDEQ(userId)).Only(ctx) + + if (err != nil) || (user == nil) { + return drip.GetUser404Response{}, err + } + + return drip.GetUser200JSONResponse{ + Id: &user.ID, + Email: &user.Email, + Name: &user.Name, + IsApproved: &user.IsApproved, + IsAdmin: &user.IsAdmin, + }, nil +} diff --git a/server/middleware/error_logger.go b/server/middleware/error_logger.go new file mode 100644 index 0000000..6fa22d3 --- /dev/null +++ b/server/middleware/error_logger.go @@ -0,0 +1,24 @@ +package drip_middleware + +import ( + "github.com/rs/zerolog/log" + + "github.com/labstack/echo/v4" +) + +func ErrorLoggingMiddleware() echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + err := next(c) + + if err != nil { + log.Ctx(c.Request().Context()). + Error(). + Err(err). + Msgf("Error occurred Path: %s, Method: %s\n", c.Path(), c.Request().Method) + } + + return err + } + } +} diff --git a/server/middleware/firebase_auth.go b/server/middleware/firebase_auth.go new file mode 100644 index 0000000..8d75a25 --- /dev/null +++ b/server/middleware/firebase_auth.go @@ -0,0 +1,120 @@ +package drip_middleware + +import ( + "context" + "net/http" + "regexp" + "registry-backend/db" + "registry-backend/ent" + "strings" + + "github.com/rs/zerolog/log" + + firebase "firebase.google.com/go" + "firebase.google.com/go/auth" + "github.com/labstack/echo/v4" +) + +// TODO(robinhuang): Have this middleware only validate and extract the user details. Move all authorization logic to another middleware. +func FirebaseMiddleware(entClient *ent.Client) echo.MiddlewareFunc { + // Handlers in here should bypass this middleware. + var allowlist = map[*regexp.Regexp][]string{ + regexp.MustCompile(`^/openapi$`): {"GET"}, + regexp.MustCompile(`^/users/sessions$`): {"DELETE"}, + regexp.MustCompile(`^/vm$`): {"ANY"}, + regexp.MustCompile(`^/health$`): {"GET"}, + regexp.MustCompile(`^/upload-artifact$`): {"POST"}, + regexp.MustCompile(`^/gitcommit$`): {"POST", "GET"}, + regexp.MustCompile(`^/branch$`): {"GET"}, + regexp.MustCompile(`^/publishers/[^/]+/nodes/[^/]+/versions$`): {"POST"}, + regexp.MustCompile(`^/publishers/[^/]+/nodes$`): {"GET"}, + regexp.MustCompile(`^/publishers/[^/]+$`): {"GET"}, + regexp.MustCompile(`^/nodes$`): {"GET"}, + regexp.MustCompile(`^/nodes/[^/]+$`): {"GET"}, + regexp.MustCompile(`^/nodes/[^/]+/versions$`): {"GET"}, + regexp.MustCompile(`^/nodes/[^/]+/install$`): {"GET"}, + } + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(ctx echo.Context) error { + // Check if the request is in the allowlist. + reqPath := ctx.Request().URL.Path + reqMethod := ctx.Request().Method + for basePathRegex, methods := range allowlist { + if basePathRegex.MatchString(reqPath) { + for _, method := range methods { + if method == "ANY" || reqMethod == method { + log.Ctx(ctx.Request().Context()).Debug(). + Msgf("Letting through %s request to %s", reqMethod, reqPath) + return next(ctx) + } + } + } + } + + // If header is present, extract the token and verify it. + header := ctx.Request().Header.Get("Authorization") + if header != "" { + + // Extract the JWT token from the header + splitToken := strings.Split(header, "Bearer ") + if len(splitToken) != 2 { + return echo.NewHTTPError(http.StatusUnauthorized, "token is not in Bearer format") + } + idToken := splitToken[1] + + // Initialize Firebase + app, err := firebase.NewApp(context.Background(), nil) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, "firebase initialization failed") + } + + client, err := app.Auth(context.Background()) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, "firebase auth client failed") + } + + // Verify ID token + token, err := client.VerifyIDToken(context.Background(), idToken) + if err != nil { + // print the error + log.Ctx(ctx.Request().Context()).Error().Err(err).Msg("error verifying ID token") + return echo.NewHTTPError(http.StatusUnauthorized, "invalid auth token") + } + + userDetails := extractUserDetails(token) + log.Ctx(ctx.Request().Context()).Debug().Msg("Authenticated user " + userDetails.Email) + authdCtx := context.WithValue(ctx.Request().Context(), UserContextKey, userDetails) + ctx.SetRequest(ctx.Request().WithContext(authdCtx)) + newUserError := db.UpsertUser(ctx.Request().Context(), entClient, token.UID, userDetails.Email, userDetails.Name) + if newUserError != nil { + log.Ctx(ctx.Request().Context()).Error().Err(newUserError).Msg("error User upserted successfully.") + } + return next(ctx) + } + + return echo.NewHTTPError(http.StatusUnauthorized, "missing auth token") + } + } +} + +type ContextKey string + +const UserContextKey ContextKey = "user" + +type UserDetails struct { + ID string + Email string + Name string +} + +func extractUserDetails(token *auth.Token) *UserDetails { + claims := token.Claims + email, _ := claims["email"].(string) + name, _ := claims["name"].(string) + + return &UserDetails{ + ID: token.UID, + Email: email, + Name: name, + } +} diff --git a/server/middleware/firebase_auth_test.go b/server/middleware/firebase_auth_test.go new file mode 100644 index 0000000..715b941 --- /dev/null +++ b/server/middleware/firebase_auth_test.go @@ -0,0 +1,69 @@ +package drip_middleware_test + +import ( + "net/http" + "net/http/httptest" + "registry-backend/ent" + drip_middleware "registry-backend/server/middleware" + "testing" + + "github.com/labstack/echo/v4" + "github.com/stretchr/testify/assert" +) + +func TestAllowlist(t *testing.T) { + e := echo.New() + req := httptest.NewRequest(http.MethodGet, "/", nil) + rec := httptest.NewRecorder() + c := e.NewContext(req, rec) + + // Mock ent.Client + mockEntClient := &ent.Client{} + + middleware := drip_middleware.FirebaseMiddleware(mockEntClient) + + tests := []struct { + name string + path string + method string + allowed bool + }{ + {"OpenAPI GET", "/openapi", "GET", true}, + {"Session DELETE", "/users/sessions", "DELETE", true}, + {"Health GET", "/health", "GET", true}, + {"VM ANY", "/vm", "POST", true}, + {"VM ANY GET", "/vm", "GET", true}, + {"Artifact POST", "/upload-artifact", "POST", true}, + {"Git Commit POST", "/gitcommit", "POST", true}, + {"Git Commit GET", "/gitcommit", "GET", true}, + {"Branch GET", "/branch", "GET", true}, + {"Node Version Path POST", "/publishers/pub123/nodes/node456/versions", "POST", true}, + {"Publisher POST", "/publishers", "POST", false}, + {"Unauthorized Path", "/nonexistent", "GET", false}, + {"Get All Nodes", "/nodes", "GET", true}, + {"Install Nodes", "/nodes/node-id/install", "GET", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(tt.method, tt.path, nil) + c.SetRequest(req) + handled := false + next := echo.HandlerFunc(func(c echo.Context) error { + handled = true + return nil + }) + err := middleware(next)(c) + if tt.allowed { + assert.True(t, handled, "Request should be allowed through") + assert.Nil(t, err) + } else { + assert.False(t, handled, "Request should not be allowed through") + assert.NotNil(t, err) + httpError, ok := err.(*echo.HTTPError) + assert.True(t, ok, "Error should be HTTPError") + assert.Equal(t, http.StatusUnauthorized, httpError.Code) + } + }) + } +} diff --git a/server/middleware/metric_middleware.go b/server/middleware/metric_middleware.go new file mode 100644 index 0000000..c7e63de --- /dev/null +++ b/server/middleware/metric_middleware.go @@ -0,0 +1,184 @@ +package drip_middleware + +import ( + "context" + "os" + "registry-backend/config" + "strconv" + "sync" + "sync/atomic" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + metricpb "google.golang.org/genproto/googleapis/api/metric" + + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/labstack/echo/v4" + "github.com/rs/zerolog/log" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const MetricTypePrefix = "custom.googleapis.com/comfy_api_frontend" + +var environment = os.Getenv("DRIP_ENV") + +// MetricsMiddleware creates a middleware to capture and send metrics for HTTP requests. +func MetricsMiddleware(client *monitoring.MetricClient, config *config.Config) echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + startTime := time.Now() + err := next(c) + endTime := time.Now() + + // Generate metrics for the request duration, count, and errors. + if config.DripEnv != "localdev" { + sendMetrics(c.Request().Context(), client, config, + createDurationMetric(c, startTime, endTime), + createRequestMetric(c), + createErrorMetric(c, err), + ) + } + return err + } + } +} + +// CounterMetric safely increments counters using concurrent maps and atomic operations. +type CounterMetric struct{ sync.Map } + +func (c *CounterMetric) increment(key any, i int64) int64 { + v, loaded := c.LoadOrStore(key, new(atomic.Int64)) + ai := v.(*atomic.Int64) + if !loaded { + ai.Add(i) // Initialize and increment atomically + } + return ai.Load() +} + +// EndpointMetricKey provides a unique identifier for metrics based on request properties. +type EndpointMetricKey struct { + endpoint string + method string + statusCode string +} + +func endpointMetricKeyFromEcho(c echo.Context) EndpointMetricKey { + return EndpointMetricKey{ + endpoint: c.Path(), + method: c.Request().Method, + statusCode: strconv.Itoa(c.Response().Status), + } +} + +func (e EndpointMetricKey) toLabels() map[string]string { + return map[string]string{ + "endpoint": e.endpoint, + "method": e.method, + "statusCode": e.statusCode, + "env": environment, + } +} + +// sendMetrics sends a batch of time series data to Cloud Monitoring. +func sendMetrics( + ctx context.Context, + client *monitoring.MetricClient, + config *config.Config, + series ...*monitoringpb.TimeSeries, +) { + req := &monitoringpb.CreateTimeSeriesRequest{ + Name: "projects/" + config.ProjectID, + TimeSeries: make([]*monitoringpb.TimeSeries, 0, len(series)), + } + + for _, s := range series { + if s != nil { + req.TimeSeries = append(req.TimeSeries, s) + } + } + + if err := client.CreateTimeSeries(ctx, req); err != nil { + log.Ctx(ctx).Error().Err(err).Msg("Failed to create time series") + } +} + +// createDurationMetric constructs a metric for the request processing duration. +func createDurationMetric(c echo.Context, startTime, endTime time.Time) *monitoringpb.TimeSeries { + key := endpointMetricKeyFromEcho(c) + return &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: MetricTypePrefix + "/request_duration", + Labels: key.toLabels(), + }, + Points: []*monitoringpb.Point{{ + Interval: &monitoringpb.TimeInterval{ + EndTime: timestamppb.New(endTime), + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: endTime.Sub(startTime).Seconds(), + }, + }, + }}, + } +} + +var reqCountMetric = CounterMetric{Map: sync.Map{}} + +// createRequestMetric constructs a cumulative metric for counting requests. +func createRequestMetric(c echo.Context) *monitoringpb.TimeSeries { + key := endpointMetricKeyFromEcho(c) + val := reqCountMetric.increment(key, 1) + return &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: MetricTypePrefix + "/request_count", + Labels: key.toLabels(), + }, + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: timestamppb.New(time.Now().Add(-time.Second)), + EndTime: timestamppb.New(time.Now()), + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: val, + }, + }, + }, + }, + } +} + +var reqErrCountMetric = CounterMetric{Map: sync.Map{}} + +// createErrorMetric constructs a cumulative metric for counting request errors. +func createErrorMetric(c echo.Context, err error) *monitoringpb.TimeSeries { + if c.Response().Status < 400 && err == nil { + return nil // No error occurred, no metric needed + } + + key := endpointMetricKeyFromEcho(c) + val := reqErrCountMetric.increment(key, 1) + return &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: MetricTypePrefix + "/request_errors", + Labels: key.toLabels(), + }, + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: timestamppb.New(time.Now().Add(-time.Second)), + EndTime: timestamppb.New(time.Now()), + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: val, + }, + }, + }, + }, + } +} diff --git a/server/middleware/service_account_auth.go b/server/middleware/service_account_auth.go new file mode 100644 index 0000000..cd35d94 --- /dev/null +++ b/server/middleware/service_account_auth.go @@ -0,0 +1,60 @@ +package drip_middleware + +import ( + "net/http" + "strings" + + "github.com/labstack/echo/v4" + "github.com/rs/zerolog/log" + "google.golang.org/api/idtoken" +) + +func ServiceAccountAuthMiddleware() echo.MiddlewareFunc { + // Handlers in here should be checked by this middleware. + var checklist = map[string][]string{ + "/users/sessions": {"DELETE"}, + } + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(ctx echo.Context) error { + path := ctx.Request().URL.Path + method := ctx.Request().Method + + // Check if the request path and method are in the checklist + if methods, ok := checklist[path]; ok { + for _, allowMethod := range methods { + if method == allowMethod { + authHeader := ctx.Request().Header.Get("Authorization") + token := "" + if strings.HasPrefix(authHeader, "Bearer ") { + token = authHeader[7:] // Skip the "Bearer " part + } + + if token == "" { + return echo.NewHTTPError(http.StatusUnauthorized, "Missing token") + } + + log.Ctx(ctx.Request().Context()).Info().Msgf("Validating google id token %s for path %s and method %s", token, path, method) + + payload, err := idtoken.Validate(ctx.Request().Context(), token, "https://api.comfy.org") + + if err == nil { + if email, ok := payload.Claims["email"].(string); ok { + log.Ctx(ctx.Request().Context()).Info().Msgf("Service Account Email: %s", email) + // TODO(robinhuang): Make service account an environment variable. + if email == "stop-vm-sa@dreamboothy.iam.gserviceaccount.com" { + return next(ctx) + } + } + } + + log.Ctx(ctx.Request().Context()).Error().Err(err).Msg("Invalid token") + return ctx.JSON(http.StatusUnauthorized, "Invalid token") + } + } + } + + // Proceed with the next middleware or handler + return next(ctx) + } + } +} diff --git a/server/middleware/tracing_middleware.go b/server/middleware/tracing_middleware.go new file mode 100644 index 0000000..d7c79cd --- /dev/null +++ b/server/middleware/tracing_middleware.go @@ -0,0 +1,46 @@ +package drip_middleware + +import ( + "context" + drip_logging "registry-backend/logging" + + "github.com/google/uuid" + "github.com/labstack/echo/v4" + "github.com/rs/zerolog" +) + +const CorrelationIDKey = "x-correlation-id" +const EndpointKey = "endpoint" + +func generateFallbackCorrelationID() string { + return "fallback-" + uuid.New().String() +} + +// TracingMiddleware is a middleware that adds a trace ID to the context +func TracingMiddleware(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + traceID := c.Request().Header.Get("X-Cloud-Trace-Context") + + if traceID == "" { + // Generate a fallback ID if no trace ID is present + traceID = generateFallbackCorrelationID() + } + + // Set trace and span IDs in the context + reqCtx := context.WithValue(c.Request().Context(), CorrelationIDKey, traceID) + + // Set the endpoint in the context + endpoint := c.Path() + reqCtx = context.WithValue(reqCtx, EndpointKey, endpoint) + + requestLogger := drip_logging.SetupLogger() + requestLogger.UpdateContext(func(c zerolog.Context) zerolog.Context { + return c. + Str(CorrelationIDKey, traceID). + Str(EndpointKey, endpoint) + }) + + c.SetRequest(c.Request().WithContext(requestLogger.WithContext(reqCtx))) + return next(c) + } +} diff --git a/server/server.go b/server/server.go new file mode 100644 index 0000000..d804f74 --- /dev/null +++ b/server/server.go @@ -0,0 +1,112 @@ +package server + +import ( + "context" + "os" + "registry-backend/config" + generated "registry-backend/drip" + "registry-backend/ent" + gateway "registry-backend/gateways/slack" + "registry-backend/gateways/storage" + handler "registry-backend/server/handlers" + "registry-backend/server/implementation" + drip_middleware "registry-backend/server/middleware" + "strings" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + + "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog/log" + + "github.com/labstack/echo/v4" +) + +type Server struct { + Client *ent.Client + Config *config.Config +} + +func NewServer(client *ent.Client, config *config.Config) *Server { + return &Server{ + Client: client, + Config: config, + } +} + +func (s *Server) Start() error { + e := echo.New() + e.HideBanner = true + e.Use(drip_middleware.TracingMiddleware) + //e.Use(middleware.Logger()) // Useful for debugging + e.Use(middleware.CORSWithConfig(middleware.CORSConfig{ + AllowMethods: []string{echo.GET, echo.PUT, echo.POST, echo.DELETE}, + AllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept, echo.HeaderAuthorization}, + AllowCredentials: true, + AllowOriginFunc: func(origin string) (bool, error) { + allowedOrigins := []string{ + ".comfyci.org", // Any subdomain of comfyci.org + os.Getenv("CORS_ORIGIN"), // Environment-specific allowed origin + ".comfyregistry.org", + } + + for _, allowed := range allowedOrigins { + if strings.HasSuffix(origin, allowed) || origin == allowed { + log.Debug().Msg("[CORSWithConfig] Allowing origin " + origin) + return true, nil + } + } + + log.Debug().Msg("[CORSWithConfig] Rejecting origin " + origin) + return false, nil + }, + })) + + e.Use(middleware.RequestLoggerWithConfig(middleware.RequestLoggerConfig{ + LogURI: true, + LogStatus: true, + LogValuesFunc: func(c echo.Context, v middleware.RequestLoggerValues) error { + // Ignore when url is path /vm/{sessionId} + if strings.HasPrefix(c.Request().URL.Path, "/vm/") { + return nil + } + + log.Ctx(c.Request().Context()).Debug(). + Str("URI: ", v.URI). + Int("status", v.Status).Msg("") + return nil + }, + })) + + storageService, err := storage.NewGCPStorageService(context.Background()) + if err != nil { + return err + } + + slackService := gateway.NewSlackService() + + mon, err := monitoring.NewMetricClient(context.Background()) + if err != nil { + return err + } + + // Attach implementation of generated oapi strict server. + impl := implementation.NewStrictServerImplementation(s.Client, s.Config, storageService, slackService) + + var middlewares []generated.StrictMiddlewareFunc + wrapped := generated.NewStrictHandler(impl, middlewares) + generated.RegisterHandlers(e, wrapped) + + e.GET("/openapi", handler.SwaggerHandler) + e.GET("/health", func(c echo.Context) error { + return c.String(200, "OK") + }) + + // Global Middlewares + e.Use(drip_middleware.MetricsMiddleware(mon, s.Config)) + e.Use(drip_middleware.FirebaseMiddleware(s.Client)) + e.Use(drip_middleware.ServiceAccountAuthMiddleware()) + e.Use(drip_middleware.ErrorLoggingMiddleware()) + + e.Logger.Fatal(e.Start(":8080")) + return nil +} diff --git a/services/comfy_ci/comfy_ci_svc.go b/services/comfy_ci/comfy_ci_svc.go new file mode 100644 index 0000000..f1d6b69 --- /dev/null +++ b/services/comfy_ci/comfy_ci_svc.go @@ -0,0 +1,164 @@ +package dripservices + +import ( + "fmt" + "registry-backend/config" + "registry-backend/db" + "registry-backend/drip" + "registry-backend/ent" + "registry-backend/ent/ciworkflowresult" + "registry-backend/ent/gitcommit" + "strings" + "time" + + "context" + + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +// ComfyCIService provides methods to interact with CI-related data in the database. +type ComfyCIService struct { + Config *config.Config +} + +// NewComfyCIService creates a new instance of ComfyCIService. +func NewComfyCIService(config *config.Config) *ComfyCIService { + return &ComfyCIService{ + Config: config, + } +} + +// ProcessCIRequest handles the incoming request and creates/updates the necessary entities. +func (s *ComfyCIService) ProcessCIRequest(ctx context.Context, client *ent.Client, req *drip.PostUploadArtifactRequestObject) error { + // Check if git commit exists + // If it does, remove all CiWorkflowRuns associated with it. + existingCommit, err := client.GitCommit.Query().Where(gitcommit.CommitHashEQ(req.Body.CommitHash)).Where(gitcommit.RepoNameEQ(req.Body.Repo)).Only(ctx) + if ent.IsNotSingular(err) { + log.Ctx(ctx).Error().Err(err).Msgf("Failed to query git commit %s", req.Body.CommitHash) + } + if existingCommit != nil { + _, err := client.CIWorkflowResult.Delete().Where(ciworkflowresult.HasGitcommitWith(gitcommit.IDEQ(existingCommit.ID))).Exec(ctx) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("Failed to delete existing run results for git commit %s", req.Body.CommitHash) + return err + } + } + + return db.WithTx(ctx, client, func(tx *ent.Tx) error { + id, err := s.UpsertCommit(ctx, tx.Client(), req.Body.CommitHash, req.Body.BranchName, req.Body.Repo, req.Body.CommitTime, req.Body.CommitMessage) + if err != nil { + return err + } + gitcommit := tx.Client().GitCommit.GetX(ctx, id) + if req.Body.OutputFilesGcsPaths != nil && req.Body.BucketName != nil { + files, err := GetPublicUrlForOutputFiles(ctx, *req.Body.BucketName, *req.Body.OutputFilesGcsPaths) + if err != nil { + return err + } + + for _, file := range files { + // TODO(robinhuang): Get real filetype. + file, err := s.UpsertStorageFile(ctx, tx.Client(), file.PublicURL, file.BucketName, file.FilePath, "image") + + if err != nil { + log.Ctx(ctx).Error().Err(err).Msg("Failed to upsert storage file") + continue + } + + cudaVersion := "" + if req.Body.CudaVersion != nil { + cudaVersion = *req.Body.CudaVersion + } + + _, err = s.UpsertRunResult(ctx, tx.Client(), file, gitcommit, req.Body.Os, cudaVersion, req.Body.WorkflowName, req.Body.RunId, req.Body.StartTime, req.Body.EndTime) + if err != nil { + return err + } + } + } + return nil + }) +} + +// UpsertCommit creates or updates a GitCommit entity. +func (s *ComfyCIService) UpsertCommit(ctx context.Context, client *ent.Client, hash, branchName, repoName string, commitIsoTime string, commitMessage string) (uuid.UUID, error) { + log.Ctx(ctx).Info().Msgf("Upserting commit %s", hash) + commitTime, err := time.Parse(time.RFC3339, commitIsoTime) + if err != nil { + return uuid.Nil, err + } + + id, err := client.GitCommit. + Create(). + SetCommitHash(hash). + SetBranchName(branchName). + SetRepoName(strings.ToLower(repoName)). // TODO(robinhuang): Write test for this. + SetCommitTimestamp(commitTime). + SetCommitMessage(commitMessage). + OnConflict( + // Careful, the order matters here. + sql.ConflictColumns(gitcommit.FieldRepoName, gitcommit.FieldCommitHash), + ). + UpdateNewValues(). + ID(ctx) + + if err != nil { + return uuid.Nil, fmt.Errorf("GitCommit.Create: %w", err) + } + return id, nil +} + +// UpsertRunResult creates or updates a ActionRunResult entity. +func (s *ComfyCIService) UpsertRunResult(ctx context.Context, client *ent.Client, file *ent.StorageFile, gitcommit *ent.GitCommit, os, gpuType, workflowName, runId string, startTime, endTime int64) (uuid.UUID, error) { + log.Ctx(ctx).Info().Msgf("Upserting workflow result for commit %s", gitcommit.CommitHash) + return client.CIWorkflowResult. + Create(). + SetGitcommit(gitcommit). + SetStorageFile(file). + SetOperatingSystem(os). + SetWorkflowName(workflowName). + SetRunID(runId). + SetStartTime(startTime). + SetEndTime(endTime). + OnConflict( + sql.ConflictColumns(ciworkflowresult.FieldID), + ). + UpdateNewValues(). + ID(ctx) +} + +// UpsertStorageFile creates or updates a RunFile entity. +func (s *ComfyCIService) UpsertStorageFile(ctx context.Context, client *ent.Client, publicUrl, bucketName, filePath, fileType string) (*ent.StorageFile, error) { + log.Ctx(ctx).Info().Msgf("Upserting storage file for URL %s", publicUrl) + return client.StorageFile. + Create(). + SetFileURL(publicUrl). + SetFilePath(filePath). + SetBucketName(bucketName). + SetFileType(fileType). + Save(ctx) +} + +type ObjectInfo struct { + BucketName string + FilePath string + PublicURL string +} + +// GetPublicUrlForOutputFiles downloads the artifact, extracts it, and uploads each file to GCS +func GetPublicUrlForOutputFiles(ctx context.Context, bucketName, objects string) ([]ObjectInfo, error) { + objectArr := strings.Split(objects, ",") + var result []ObjectInfo + for _, object := range objectArr { + publicURL := fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucketName, object) + log.Ctx(ctx).Info().Msgf("Public URL: %v", publicURL) + result = append(result, ObjectInfo{ + BucketName: bucketName, + FilePath: object, + PublicURL: publicURL, + }) + } + return result, nil +} diff --git a/services/registry/registry_svc.go b/services/registry/registry_svc.go new file mode 100644 index 0000000..0eff8c9 --- /dev/null +++ b/services/registry/registry_svc.go @@ -0,0 +1,509 @@ +package drip_services + +import ( + "context" + "errors" + "fmt" + "registry-backend/db" + "registry-backend/drip" + "registry-backend/ent" + "registry-backend/ent/node" + "registry-backend/ent/nodeversion" + "registry-backend/ent/personalaccesstoken" + "registry-backend/ent/publisher" + "registry-backend/ent/publisherpermission" + "registry-backend/ent/schema" + gateway "registry-backend/gateways/slack" + "registry-backend/gateways/storage" + "registry-backend/mapper" + + "github.com/Masterminds/semver/v3" + "google.golang.org/protobuf/proto" + + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +type RegistryService struct { + storageService storage.StorageService + slackService gateway.SlackService +} + +func NewRegistryService(storageSvc storage.StorageService, slackSvc gateway.SlackService) *RegistryService { + return &RegistryService{ + storageService: storageSvc, + slackService: slackSvc, + } +} + +type PublisherFilter struct { + UserID string +} + +// NodeFilter holds optional parameters for filtering node results +type NodeFilter struct { + PublisherID string + // Add more filter fields here +} + +type NodeData struct { + ID string `json:"id"` + Name string `json:"name"` + PublisherID string `json:"publisherId"` + // Add other fields as necessary +} + +// ListNodesResult is the structure that holds the paginated result of nodes +type ListNodesResult struct { + Total int `json:"total"` + Nodes []*ent.Node `json:"nodes"` + Page int `json:"page"` + Limit int `json:"limit"` + TotalPages int `json:"totalPages"` +} + +// ListNodes retrieves a paginated list of nodes with optional filtering. +func (s *RegistryService) ListNodes(ctx context.Context, client *ent.Client, page, limit int, filter *NodeFilter) (*ListNodesResult, error) { + if page < 1 { + page = 1 + } + if limit < 1 { + limit = 10 + } + + query := client.Node.Query().WithPublisher().WithVersions( + func(q *ent.NodeVersionQuery) { + q.Order(ent.Desc(nodeversion.FieldCreateTime)) + }, + ) + if filter != nil { + if filter.PublisherID != "" { + query.Where(node.PublisherID(filter.PublisherID)) + } + } + offset := (page - 1) * limit + total, err := query.Count(ctx) + if err != nil { + return nil, fmt.Errorf("failed to count nodes: %w", err) + } + + nodes, err := query. + Offset(offset). + Limit(limit). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %w", err) + } + + totalPages := total / limit + if total%limit != 0 { + totalPages += 1 + } + + return &ListNodesResult{ + Total: total, + Nodes: nodes, + Page: page, + Limit: limit, + TotalPages: totalPages, + }, nil +} + +// ListPublishers queries the Publisher table with an optional user ID filter via PublisherPermission +func (s *RegistryService) ListPublishers(ctx context.Context, client *ent.Client, filter *PublisherFilter) ([]*ent.Publisher, error) { + log.Ctx(ctx).Info().Msg("Listing publishers") + + query := client.Publisher.Query() + + if filter != nil && filter.UserID != "" { + query = query.Where( + // Ensure that the publisher has the associated permission with the specific user ID + publisher.HasPublisherPermissionsWith(publisherpermission.UserIDEQ(filter.UserID)), + ) + } + + publishers, err := query.All(ctx) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msg("Failed to list publishers") + return nil, err + } + + log.Ctx(ctx).Info().Msgf("Found %d publishers", len(publishers)) + return publishers, nil +} + +func (s *RegistryService) CreatePublisher(ctx context.Context, client *ent.Client, userId string, publisher *drip.Publisher) (*ent.Publisher, error) { + publisherValid := mapper.ValidatePublisher(publisher) + if publisherValid != nil { + return nil, fmt.Errorf("invalid publisher: %w", publisherValid) + } + return db.WithTxResult(ctx, client, func(tx *ent.Tx) (*ent.Publisher, error) { + newPublisher, err := mapper.ApiCreatePublisherToDb(publisher, tx.Client()) + log.Ctx(ctx).Info().Msgf("creating publisher with fields: %v", newPublisher.Mutation().Fields()) + if err != nil { + return nil, fmt.Errorf("failed to map publisher: %w", err) + } + publisher, err := newPublisher.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create publisher: %w", err) + } + + _, err = tx.PublisherPermission.Create().SetPublisherID(publisher.ID). + SetUserID(userId). + SetPermission(schema.PublisherPermissionTypeOwner). + Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create publisher permissions: %w", err) + } + return publisher, nil + }) +} + +func (s *RegistryService) UpdatePublisher(ctx context.Context, client *ent.Client, update *ent.PublisherUpdateOne) (*ent.Publisher, error) { + log.Ctx(ctx).Info().Msgf("updating publisher fields: %v", update.Mutation().Fields()) + publisher, err := update.Save(ctx) + log.Ctx(ctx).Info().Msgf("success: updated publisher: %v", publisher) + if err != nil { + return nil, fmt.Errorf("failed to create publisher: %w", err) + } + + return publisher, nil +} + +func (s *RegistryService) GetPublisher(ctx context.Context, client *ent.Client, publisherID string) (*ent.Publisher, error) { + log.Ctx(ctx).Info().Msgf("getting publisher: %v", publisherID) + publisher, err := client.Publisher. + Query(). + Where(publisher.IDEQ(publisherID)). + WithPublisherPermissions(func(ppq *ent.PublisherPermissionQuery) { + ppq.WithUser() + }). + Only(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get publisher: %w", err) + } + return publisher, nil +} + +func (s *RegistryService) CreatePersonalAccessToken(ctx context.Context, client *ent.Client, publisherID, name, description string) (*ent.PersonalAccessToken, error) { + log.Ctx(ctx).Info().Msgf("creating personal access token for publisher: %v", publisherID) + token := uuid.New().String() + pat, err := client.PersonalAccessToken. + Create(). + SetPublisherID(publisherID). + SetName(name). + SetDescription(description). + SetToken(token). + Save(ctx) + + if err != nil { + return nil, fmt.Errorf("failed to create personal access token: %w", err) + } + return pat, nil +} + +func (s *RegistryService) ListPersonalAccessTokens(ctx context.Context, client *ent.Client, publisherID string) ([]*ent.PersonalAccessToken, error) { + pats, err := client.PersonalAccessToken.Query(). + Where(personalaccesstoken.PublisherIDEQ(publisherID)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list personal access tokens: %w", err) + } + return pats, nil +} + +func (s *RegistryService) DeletePersonalAccessToken(ctx context.Context, client *ent.Client, tokenID uuid.UUID) error { + log.Ctx(ctx).Info().Msgf("deleting personal access token: %v", tokenID) + err := client.PersonalAccessToken. + DeleteOneID(tokenID). + Exec(ctx) + if err != nil { + return fmt.Errorf("failed to delete personal access token: %w", err) + } + return nil +} + +func (s *RegistryService) CreateNode(ctx context.Context, client *ent.Client, publisherId string, node *drip.Node) (*ent.Node, error) { + validNode := mapper.ValidateNode(node) + if validNode != nil { + return nil, fmt.Errorf("invalid node: %w", validNode) + } + + createNode, err := mapper.ApiCreateNodeToDb(publisherId, node, client) + log.Ctx(ctx).Info().Msgf("creating node with fields: %v", createNode.Mutation().Fields()) + if err != nil { + return nil, fmt.Errorf("failed to map node: %w", err) + } + + createdNode, err := createNode.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create node: %w", err) + } + + return createdNode, nil +} + +func (s *RegistryService) UpdateNode(ctx context.Context, client *ent.Client, update *ent.NodeUpdateOne) (*ent.Node, error) { + log.Ctx(ctx).Info().Msgf("updating node fields: %v", update.Mutation().Fields()) + node, err := update. + Save(ctx) + + if err != nil { + return nil, fmt.Errorf("failed to update node: %w", err) + } + return node, nil +} + +func (s *RegistryService) GetNode(ctx context.Context, client *ent.Client, nodeID string) (*ent.Node, error) { + log.Ctx(ctx).Info().Msgf("getting node: %v", nodeID) + node, err := client.Node.Get(ctx, nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get node: %w", err) + } + return node, nil +} + +func (s *RegistryService) CreateNodeVersion( + ctx context.Context, + client *ent.Client, + publisherID, nodeID string, + nodeVersion *drip.NodeVersion) (*NodeVersionCreation, error) { + log.Ctx(ctx).Info().Msgf("creating node version: %v for nodeId %v", nodeVersion, nodeID) + bucketName := "comfy-registry" + return db.WithTxResult(ctx, client, func(tx *ent.Tx) (*NodeVersionCreation, error) { + // If the node version is not provided, we will generate a new version + if nodeVersion.Version != nil { + defaultVersion, err := semver.NewVersion(*nodeVersion.Version) + if err != nil { + return nil, err + } + + nodeVersion.Version = proto.String(defaultVersion.String()) + } + + // Create a new storage file for the node version + objectPath := fmt.Sprintf("%s/%s/%s/%s", publisherID, nodeID, *nodeVersion.Version, "node.tar.gz") + storageFile := tx.StorageFile.Create(). + SetBucketName(bucketName). + SetFilePath(objectPath). + SetFileType("zip"). + // Sample URL: https://storage.googleapis.com/comfy-registry/james-test-publisher/comfyui-inspire-pack/1.0.0/node.tar.gz + SetFileURL(fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucketName, objectPath)). + SaveX(ctx) + signedUrl, err := s.storageService.GenerateSignedURL(bucketName, objectPath) + if err != nil { + return nil, fmt.Errorf("failed to generate signed url: %w", err) + } + log.Ctx(ctx).Info().Msgf("generated signed url: %v", signedUrl) + + newNodeVersion := mapper.ApiCreateNodeVersionToDb(nodeID, nodeVersion, tx.Client()) + newNodeVersion.SetStorageFile(storageFile) + createdNodeVersion, err := newNodeVersion.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create node version: %w", err) + } + + slackErr := s.slackService.SendRegistryMessageToSlack(fmt.Sprintf("Version %s of node %s was published successfully. Publisher: %s. https://comfyregistry.org/nodes/%s", createdNodeVersion.Version, createdNodeVersion.NodeID, publisherID, nodeID)) + if slackErr != nil { + log.Ctx(ctx).Error().Msgf("Failed to send message to Slack w/ err: %v", slackErr) + } + + return &NodeVersionCreation{ + NodeVersion: createdNodeVersion, + SignedUrl: signedUrl, + }, nil + }) +} + +type NodeVersionCreation struct { + NodeVersion *ent.NodeVersion + SignedUrl string +} + +func (s *RegistryService) ListNodeVersions(ctx context.Context, client *ent.Client, nodeID string) ([]*ent.NodeVersion, error) { + log.Ctx(ctx).Info().Msgf("listing node versions: %v", nodeID) + versions, err := client.NodeVersion.Query(). + Where(nodeversion.NodeIDEQ(nodeID)). + WithStorageFile(). + Order(ent.Desc(nodeversion.FieldCreateTime)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list node versions: %w", err) + } + return versions, nil +} + +func (s *RegistryService) GetNodeVersion(ctx context.Context, client *ent.Client, nodeId, nodeVersion string) (*ent.NodeVersion, error) { + log.Ctx(ctx).Info().Msgf("getting node version: %v", nodeVersion) + return client.NodeVersion. + Query(). + Where(nodeversion.VersionEQ(nodeVersion)). + Where(nodeversion.NodeIDEQ(nodeId)). + WithStorageFile(). + Only(ctx) +} + +func (s *RegistryService) UpdateNodeVersion(ctx context.Context, client *ent.Client, update *ent.NodeVersionUpdateOne) (*ent.NodeVersion, error) { + log.Ctx(ctx).Info().Msgf("updating node version fields: %v", update.Mutation().Fields()) + node, err := update.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to update node version: %w", err) + } + return node, nil +} + +func (s *RegistryService) GetLatestNodeVersion(ctx context.Context, client *ent.Client, nodeId string) (*ent.NodeVersion, error) { + log.Ctx(ctx).Info().Msgf("getting latest version of node: %v", nodeId) + nodeVersion, err := client.NodeVersion. + Query(). + Where(nodeversion.NodeIDEQ(nodeId)). + Order(ent.Desc(nodeversion.FieldCreateTime)). + WithStorageFile(). + First(ctx) + + if err != nil { + if ent.IsNotFound(err) { + + return nil, nil + } + return nil, err + } + return nodeVersion, nil +} + +func (s *RegistryService) AssertPublisherPermissions(ctx context.Context, + client *ent.Client, + publisherID string, + userID string, + permissions []schema.PublisherPermissionType, +) (err error) { + w, err := client.Publisher.Get(ctx, publisherID) + if err != nil { + return fmt.Errorf("fail to query publisher by id: %s %w", publisherID, err) + } + wp, err := w.QueryPublisherPermissions(). + Where( + publisherpermission.PermissionIn(permissions...), + publisherpermission.UserIDEQ(userID), + ). + Count(ctx) + if err != nil { + return fmt.Errorf("fail to query publisher permission :%w", err) + } + if wp < 1 { + return newErrorPermission("user '%s' doesn't have required permission on publisher '%s' ", userID, publisherID) + } + return +} + +func (s *RegistryService) IsPersonalAccessTokenValidForPublisher(ctx context.Context, + client *ent.Client, + publisherID string, + accessToken string, +) (bool, error) { + w, err := client.Publisher.Get(ctx, publisherID) + if err != nil { + log.Ctx(ctx).Error().Err(err).Msgf("fail to find publisher by id: %s", publisherID) + return false, fmt.Errorf("fail to find publisher by id: %s", publisherID) + } + exists, err := w.QueryPersonalAccessTokens(). + Where( + personalaccesstoken.And( + personalaccesstoken.PublisherIDEQ(publisherID), + personalaccesstoken.TokenEQ(accessToken), + ), + ). + Exist(ctx) + if err != nil { + return false, fmt.Errorf("fail to query publisher permission :%w", err) + } + return exists, nil +} + +func (s *RegistryService) AssertNodeBelongsToPublisher(ctx context.Context, client *ent.Client, publisherID string, nodeID string) error { + node, err := client.Node.Get(ctx, nodeID) + if err != nil { + return fmt.Errorf("failed to get node: %w", err) + } + if node.PublisherID != publisherID { + return newErrorPermission("node %s does not belong to publisher %s", nodeID, publisherID) + } + return nil +} + +func (s *RegistryService) AssertAccessTokenBelongsToPublisher(ctx context.Context, client *ent.Client, publisherID string, tokenId uuid.UUID) error { + pat, err := client.PersonalAccessToken.Query().Where( + personalaccesstoken.IDEQ(tokenId), + personalaccesstoken.PublisherIDEQ(publisherID), + ).Only(ctx) + if err != nil { + return fmt.Errorf("failed to get personal access token: %w", err) + } + if pat.PublisherID != publisherID { + return newErrorPermission("personal access token %s does not belong to publisher %s", tokenId, publisherID) + } + return nil +} + +func (s *RegistryService) DeletePublisher(ctx context.Context, client *ent.Client, publisherID string) error { + log.Ctx(ctx).Info().Msgf("deleting publisher: %v", publisherID) + return db.WithTx(ctx, client, func(tx *ent.Tx) error { + client = tx.Client() + + _, err := client.PublisherPermission. + Delete(). + Where(publisherpermission.PublisherIDEQ(publisherID)). + Exec(ctx) + if err != nil { + return fmt.Errorf("failed to delete publisher permissions: %w", err) + } + + _, err = client.PersonalAccessToken.Delete(). + Where(personalaccesstoken. + PublisherIDEQ(publisherID)). + Exec(ctx) + if err != nil { + return fmt.Errorf("failed to delete publisher access token: %w", err) + } + + _, err = client.Publisher. + Delete(). + Where(publisher.IDEQ(publisherID)). + Exec(ctx) + if err != nil { + return fmt.Errorf("failed to delete publisher: %w", err) + } + return nil + }) + +} + +func (s *RegistryService) DeleteNode(ctx context.Context, client *ent.Client, nodeID string) error { + log.Ctx(ctx).Info().Msgf("deleting node: %v", nodeID) + err := client.Node.DeleteOneID(nodeID).Exec(ctx) + if err != nil { + return fmt.Errorf("failed to delete node: %w", err) + } + return nil +} + +type errorPermission string + +// Error implements error. +func (p errorPermission) Error() string { + return string(p) +} + +func newErrorPermission(tmpl string, args ...interface{}) errorPermission { + return errorPermission(fmt.Sprintf(tmpl, args...)) +} + +var _ error = errorPermission("") + +func IsPermissionError(err error) bool { + if err == nil { + return false + } + var e *errorPermission + return errors.As(err, &e) +} diff --git a/skaffold.yaml b/skaffold.yaml new file mode 100644 index 0000000..bb8d74a --- /dev/null +++ b/skaffold.yaml @@ -0,0 +1,15 @@ +apiVersion: skaffold/v4beta2 +kind: Config +metadata: + name: deploy-comfy-backend +profiles: +- name: staging + manifests: + rawYaml: + - run-service-staging.yaml +- name: prod + manifests: + rawYaml: + - run-service-prod.yaml +deploy: + cloudrun: {} \ No newline at end of file diff --git a/supabase/.gitignore b/supabase/.gitignore new file mode 100644 index 0000000..a3ad880 --- /dev/null +++ b/supabase/.gitignore @@ -0,0 +1,4 @@ +# Supabase +.branches +.temp +.env diff --git a/supabase/config.toml b/supabase/config.toml new file mode 100644 index 0000000..9873a0b --- /dev/null +++ b/supabase/config.toml @@ -0,0 +1,159 @@ +# A string used to distinguish different Supabase projects on the same host. Defaults to the +# working directory name when running `supabase init`. +project_id = "registry-backend" + +[api] +enabled = true +# Port to use for the API URL. +port = 54321 +# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API +# endpoints. public and storage are always included. +schemas = ["public", "storage", "graphql_public"] +# Extra schemas to add to the search_path of every request. public is always included. +extra_search_path = ["public", "extensions"] +# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size +# for accidental or malicious requests. +max_rows = 1000 + +[db] +# Port to use for the local database URL. +port = 54322 +# Port used by db diff command to initialize the shadow database. +shadow_port = 54320 +# The database major version to use. This has to be the same as your remote database's. Run `SHOW +# server_version;` on the remote database to check. +major_version = 15 + +[db.pooler] +enabled = false +# Port to use for the local connection pooler. +port = 54329 +# Specifies when a server connection can be reused by other clients. +# Configure one of the supported pooler modes: `transaction`, `session`. +pool_mode = "transaction" +# How many server connections to allow per user/database pair. +default_pool_size = 20 +# Maximum number of client connections allowed. +max_client_conn = 100 + +[realtime] +enabled = true +# Bind realtime via either IPv4 or IPv6. (default: IPv6) +# ip_version = "IPv6" +# The maximum length in bytes of HTTP request headers. (default: 4096) +# max_header_length = 4096 + +[studio] +enabled = true +# Port to use for Supabase Studio. +port = 54323 +# External URL of the API server that frontend connects to. +api_url = "http://127.0.0.1" + +# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they +# are monitored, and you can view the emails that would have been sent from the web interface. +[inbucket] +enabled = true +# Port to use for the email testing server web interface. +port = 54324 +# Uncomment to expose additional ports for testing user applications that send emails. +# smtp_port = 54325 +# pop3_port = 54326 + +[storage] +enabled = true +# The maximum file size allowed (e.g. "5MB", "500KB"). +file_size_limit = "50MiB" + +[auth] +enabled = true +# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used +# in emails. +site_url = "http://127.0.0.1:3000" +# A list of *exact* URLs that auth providers are permitted to redirect to post authentication. +additional_redirect_urls = ["https://127.0.0.1:3000"] +# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week). +jwt_expiry = 3600 +# If disabled, the refresh token will never expire. +enable_refresh_token_rotation = true +# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds. +# Requires enable_refresh_token_rotation = true. +refresh_token_reuse_interval = 10 +# Allow/disallow new user signups to your project. +enable_signup = true +# Allow/disallow testing manual linking of accounts +enable_manual_linking = false + +[auth.email] +# Allow/disallow new user signups via email to your project. +enable_signup = true +# If enabled, a user will be required to confirm any email change on both the old, and new email +# addresses. If disabled, only the new email is required to confirm. +double_confirm_changes = true +# If enabled, users need to confirm their email address before signing in. +enable_confirmations = false + +# Uncomment to customize email template +# [auth.email.template.invite] +# subject = "You have been invited" +# content_path = "./supabase/templates/invite.html" + +[auth.sms] +# Allow/disallow new user signups via SMS to your project. +enable_signup = true +# If enabled, users need to confirm their phone number before signing in. +enable_confirmations = false +# Template for sending OTP to users +template = "Your code is {{ .Code }} ." + +# Use pre-defined map of phone number to OTP for testing. +[auth.sms.test_otp] +# 4152127777 = "123456" + +# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used. +[auth.hook.custom_access_token] +# enabled = true +# uri = "pg-functions:////" + + +# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`. +[auth.sms.twilio] +enabled = false +account_sid = "" +message_service_sid = "" +# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead: +auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)" + +# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`, +# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`, +# `twitter`, `slack`, `spotify`, `workos`, `zoom`. +[auth.external.apple] +enabled = false +client_id = "" +# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead: +secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)" +# Overrides the default auth redirectUrl. +redirect_uri = "" +# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure, +# or any other third-party OIDC providers. +url = "" + +[analytics] +enabled = false +port = 54327 +vector_port = 54328 +# Configure one of the supported backends: `postgres`, `bigquery`. +backend = "postgres" + +# Experimental features may be deprecated any time +[experimental] +# Configures Postgres storage engine to use OrioleDB (S3) +orioledb_version = "" +# Configures S3 bucket URL, eg. .s3-.amazonaws.com +s3_host = "env(S3_HOST)" +# Configures S3 bucket region, eg. us-east-1 +s3_region = "env(S3_REGION)" +# Configures AWS_ACCESS_KEY_ID for S3 bucket +s3_access_key = "env(S3_ACCESS_KEY)" +# Configures AWS_SECRET_ACCESS_KEY for S3 bucket +s3_secret_key = "env(S3_SECRET_KEY)" diff --git a/supabase/seed.sql b/supabase/seed.sql new file mode 100644 index 0000000..e69de29 diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..67a500f --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen" +)