diff --git a/charts/aiven-operator-crds/Chart.yaml b/charts/aiven-operator-crds/Chart.yaml index 7dc1f31..c3a2683 100644 --- a/charts/aiven-operator-crds/Chart.yaml +++ b/charts/aiven-operator-crds/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: aiven-operator-crds description: A Helm chart to deploy the aiven operator custom resource definitions type: application -version: v0.24.0 -appVersion: v0.24.0 +version: v0.25.0 +appVersion: v0.25.0 maintainers: - name: byashimov url: https://www.aiven.io diff --git a/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml b/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml index af1ed8b..091b772 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml @@ -825,6 +825,11 @@ spec: Users with view-only permission can edit but not save dashboards type: boolean + wal: + description: + Setting to enable/disable Write-Ahead Logging. The + default value is false (disabled). + type: boolean type: object required: - plan diff --git a/charts/aiven-operator-crds/templates/aiven.io_kafkaconnects.yaml b/charts/aiven-operator-crds/templates/aiven.io_kafkaconnects.yaml index d416cf4..0414c14 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_kafkaconnects.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_kafkaconnects.yaml @@ -467,6 +467,13 @@ spec: - 1 - 2 type: integer + prefix_path_depth: + description: + Prefix path depth of the secrets Engine. + Default is 1. If the secrets engine path has more + than one segment it has to be increased to the number + of segments. + type: integer token: description: Token used to authenticate with vault and diff --git a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml index 48ecc53..81af8ee 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml @@ -246,6 +246,13 @@ spec: to the Aiven DNS name maxLength: 255 type: string + follower_fetching: + description: Enable follower fetching + properties: + enabled: + description: Whether to enable the follower fetching functionality + type: boolean + type: object ip_filter: description: Allow incoming connections from CIDR address block, @@ -272,16 +279,16 @@ spec: description: Kafka broker configuration values properties: auto_create_topics_enable: - description: Enable auto creation of topics + description: "Enable auto-creation of topics. (Default: true)" type: boolean compression_type: description: - Specify the final compression type for a given + "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression - codec set by the producer. + codec set by the producer.(Default: producer)" enum: - gzip - snappy @@ -294,76 +301,83 @@ spec: description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer - than this." + than this. (Default: 600000 ms (10 minutes))" maximum: 3600000 minimum: 1000 type: integer default_replication_factor: - description: Replication factor for autocreated topics + description: + "Replication factor for auto-created topics (Default: + 3)" maximum: 10 minimum: 1 type: integer group_initial_rebalance_delay_ms: description: - The amount of time, in milliseconds, the group + "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + (Default: 3000 ms (3 seconds))" maximum: 300000 minimum: 0 type: integer group_max_session_timeout_ms: description: - The maximum allowed session timeout for registered + "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. Default: 1800000 ms (30 minutes)" maximum: 1800000 minimum: 0 type: integer group_min_session_timeout_ms: description: - The minimum allowed session timeout for registered + "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. (Default: 6000 ms (6 seconds))" maximum: 60000 minimum: 0 type: integer log_cleaner_delete_retention_ms: - description: How long are delete records retained? + description: + "How long are delete records retained? (Default: + 86400000 (1 day))" maximum: 315569260000 minimum: 0 type: integer log_cleaner_max_compaction_lag_ms: description: - The maximum amount of time message will remain - uncompacted. Only applicable for logs that are being compacted + "The maximum amount of time message will remain + uncompacted. Only applicable for logs that are being compacted. + (Default: 9223372036854775807 ms (Long.MAX_VALUE))" minimum: 30000 type: integer log_cleaner_min_cleanable_ratio: description: - Controls log compactor frequency. Larger value + "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very - high value for this option. + high value for this option. (Default: 0.5)" maximum: 0.9 minimum: 0.2 type: number log_cleaner_min_compaction_lag_ms: description: - The minimum time a message will remain uncompacted + "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + (Default: 0 ms)" minimum: 0 type: integer log_cleanup_policy: description: - The default cleanup policy for segments beyond - the retention window + "The default cleanup policy for segments beyond + the retention window (Default: delete)" enum: - delete - compact @@ -371,238 +385,257 @@ spec: type: string log_flush_interval_messages: description: - The number of messages accumulated on a log partition - before messages are flushed to disk + "The number of messages accumulated on a log + partition before messages are flushed to disk (Default: + 9223372036854775807 (Long.MAX_VALUE))" minimum: 1 type: integer log_flush_interval_ms: description: - The maximum time in ms that a message in any - topic is kept in memory before flushed to disk. If not set, - the value in log.flush.scheduler.interval.ms is used + "The maximum time in ms that a message in any + topic is kept in memory (page-cache) before flushed to disk. + If not set, the value in log.flush.scheduler.interval.ms + is used (Default: null)" minimum: 0 type: integer log_index_interval_bytes: description: - The interval with which Kafka adds an entry to - the offset index + "The interval with which Kafka adds an entry + to the offset index (Default: 4096 bytes (4 kibibytes))" maximum: 104857600 minimum: 0 type: integer log_index_size_max_bytes: - description: The maximum size in bytes of the offset index + description: + "The maximum size in bytes of the offset index + (Default: 10485760 (10 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer log_local_retention_bytes: description: - The maximum size of local log segments that can - grow for a partition before it gets eligible for deletion. + "The maximum size of local log segments that + can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal - to log.retention.bytes value. + to log.retention.bytes value. (Default: -2)" minimum: -2 type: integer log_local_retention_ms: description: - The number of milliseconds to keep the local + "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms - value. + value. (Default: -2)" minimum: -2 type: integer log_message_downconversion_enable: description: - This configuration controls whether down-conversion + "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + (Default: true)" type: boolean log_message_timestamp_difference_max_ms: description: - The maximum difference allowed between the timestamp + "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified - in the message + in the message (Default: 9223372036854775807 (Long.MAX_VALUE))" minimum: 0 type: integer log_message_timestamp_type: description: - Define whether the timestamp in the message is - message create time or log append time. + "Define whether the timestamp in the message + is message create time or log append time. (Default: CreateTime)" enum: - CreateTime - LogAppendTime type: string log_preallocate: - description: Should pre allocate file when create new segment? + description: + "Should pre allocate file when create new segment? + (Default: false)" type: boolean log_retention_bytes: - description: The maximum size of the log before deleting messages + description: + "The maximum size of the log before deleting + messages (Default: -1)" minimum: -1 type: integer log_retention_hours: description: - The number of hours to keep a log file before - deleting it + "The number of hours to keep a log file before + deleting it (Default: 168 hours (1 week))" maximum: 2147483647 minimum: -1 type: integer log_retention_ms: description: - The number of milliseconds to keep a log file + "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time - limit is applied. + limit is applied. (Default: null, log.retention.hours applies)" minimum: -1 type: integer log_roll_jitter_ms: description: - The maximum jitter to subtract from logRollTimeMillis + "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours - is used + is used (Default: null)" minimum: 0 type: integer log_roll_ms: description: - The maximum time before a new log segment is - rolled out (in milliseconds). + "The maximum time before a new log segment is + rolled out (in milliseconds). (Default: null, log.roll.hours + applies (Default: 168, 7 days))" minimum: 1 type: integer log_segment_bytes: - description: The maximum size of a single log file + description: + "The maximum size of a single log file (Default: + 1073741824 bytes (1 gibibyte))" maximum: 1073741824 minimum: 10485760 type: integer log_segment_delete_delay_ms: description: - The amount of time to wait before deleting a - file from the filesystem + "The amount of time to wait before deleting a + file from the filesystem (Default: 60000 ms (1 minute))" maximum: 3600000 minimum: 0 type: integer max_connections_per_ip: description: - The maximum number of connections allowed from - each ip address (defaults to 2147483647). + "The maximum number of connections allowed from + each ip address (Default: 2147483647)." maximum: 2147483647 minimum: 256 type: integer max_incremental_fetch_session_cache_slots: description: - The maximum number of incremental fetch sessions - that the broker will maintain. + "The maximum number of incremental fetch sessions + that the broker will maintain. (Default: 1000)" maximum: 10000 minimum: 1000 type: integer message_max_bytes: description: - The maximum size of message that the server can - receive. + "The maximum size of message that the server + can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes))" maximum: 100001200 minimum: 0 type: integer min_insync_replicas: description: - When a producer sets acks to 'all' (or '-1'), + "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered - successful. + successful. (Default: 1)" maximum: 7 minimum: 1 type: integer num_partitions: - description: Number of partitions for autocreated topics + description: + "Number of partitions for auto-created topics + (Default: 1)" maximum: 1000 minimum: 1 type: integer offsets_retention_minutes: - description: Log retention window in minutes for offsets topic + description: + "Log retention window in minutes for offsets + topic (Default: 10080 minutes (7 days))" maximum: 2147483647 minimum: 1 type: integer producer_purgatory_purge_interval_requests: description: - The purge interval (in number of requests) of - the producer request purgatory(defaults to 1000). + "The purge interval (in number of requests) of + the producer request purgatory (Default: 1000)." maximum: 10000 minimum: 10 type: integer replica_fetch_max_bytes: description: - The number of bytes of messages to attempt to - fetch for each partition (defaults to 1048576). This is - not an absolute maximum, if the first record batch in the - first non-empty partition of the fetch is larger than this - value, the record batch will still be returned to ensure - that progress can be made. + "The number of bytes of messages to attempt to + fetch for each partition . This is not an absolute maximum, + if the first record batch in the first non-empty partition + of the fetch is larger than this value, the record batch + will still be returned to ensure that progress can be made. + (Default: 1048576 bytes (1 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer replica_fetch_response_max_bytes: description: - Maximum bytes expected for the entire fetch response - (defaults to 10485760). Records are fetched in batches, - and if the first record batch in the first non-empty partition - of the fetch is larger than this value, the record batch - will still be returned to ensure that progress can be made. - As such, this is not an absolute maximum. + "Maximum bytes expected for the entire fetch + response. Records are fetched in batches, and if the first + record batch in the first non-empty partition of the fetch + is larger than this value, the record batch will still be + returned to ensure that progress can be made. As such, this + is not an absolute maximum. (Default: 10485760 bytes (10 + mebibytes))" maximum: 1048576000 minimum: 10485760 type: integer sasl_oauthbearer_expected_audience: description: - The (optional) comma-delimited setting for the + "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one - of the expected audiences. + of the expected audiences. (Default: null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_expected_issuer: description: - Optional setting for the broker to use to verify - that the JWT was created by the expected issuer. + "Optional setting for the broker to use to verify + that the JWT was created by the expected issuer.(Default: + null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_jwks_endpoint_url: description: - OIDC JWKS endpoint URL. By setting this the SASL - SSL OAuth2/OIDC authentication is enabled. See also other - options for SASL OAuth2/OIDC. + "OIDC JWKS endpoint URL. By setting this the + SASL SSL OAuth2/OIDC authentication is enabled. See also + other options for SASL OAuth2/OIDC. (Default: null)" maxLength: 2048 type: string sasl_oauthbearer_sub_claim_name: description: - Name of the scope from which to extract the subject - claim from the JWT. Defaults to sub. + "Name of the scope from which to extract the + subject claim from the JWT.(Default: sub)" maxLength: 128 - pattern: ^[^\r\n]*$ + pattern: ^[^\r\n]*\S[^\r\n]*$ type: string socket_request_max_bytes: description: - The maximum number of bytes in a socket request - (defaults to 104857600). + "The maximum number of bytes in a socket request + (Default: 104857600 bytes)." maximum: 209715200 minimum: 10485760 type: integer transaction_partition_verification_enable: description: - Enable verification that checks that the partition + "Enable verification that checks that the partition has been added to the transaction before writing transactional - records to the partition + records to the partition. (Default: true)" type: boolean transaction_remove_expired_transaction_cleanup_interval_ms: description: - The interval at which to remove transactions + "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms - passing (defaults to 3600000 (1 hour)). + passing (Default: 3600000 ms (1 hour))." maximum: 3600000 minimum: 600000 type: integer transaction_state_log_segment_bytes: description: - The transaction topic segment bytes should be + "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log - compaction and cache loads (defaults to 104857600 (100 mebibytes)). + compaction and cache loads (Default: 104857600 bytes (100 + mebibytes))." maximum: 2147483647 minimum: 1048576 type: integer @@ -834,6 +867,13 @@ spec: - 1 - 2 type: integer + prefix_path_depth: + description: + Prefix path depth of the secrets Engine. + Default is 1. If the secrets engine path has more + than one segment it has to be increased to the number + of segments. + type: integer token: description: Token used to authenticate with vault and @@ -944,6 +984,19 @@ spec: minimum: 10 type: integer type: object + kafka_sasl_mechanisms: + description: Kafka SASL mechanisms + properties: + plain: + description: Enable PLAIN mechanism + type: boolean + scram_sha_256: + description: Enable SCRAM-SHA-256 mechanism + type: boolean + scram_sha_512: + description: Enable SCRAM-SHA-512 mechanism + type: boolean + type: object kafka_version: description: Kafka major version enum: diff --git a/charts/aiven-operator-crds/templates/aiven.io_kafkaschemas.yaml b/charts/aiven-operator-crds/templates/aiven.io_kafkaschemas.yaml index d227a40..5e34946 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_kafkaschemas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_kafkaschemas.yaml @@ -92,6 +92,13 @@ spec: Kafka Schema configuration should be a valid Avro Schema JSON format type: string + schemaType: + description: Schema type + enum: + - AVRO + - JSON + - PROTOBUF + type: string serviceName: description: Specifies the name of the service that this resource diff --git a/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml b/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml index d786c9a..b7eefee 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml @@ -318,6 +318,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml b/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml index 6be0fa4..3b23172 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml @@ -230,6 +230,61 @@ spec: type: string maxItems: 1 type: array + azure_migration: + properties: + account: + description: Azure account name + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + container: + description: Azure container name + pattern: ^[^\r\n]*$ + type: string + endpoint_suffix: + description: Defines the DNS suffix for Azure Storage endpoints. + pattern: ^[^\r\n]*$ + type: string + key: + description: + Azure account secret key. One of key or sas_token + should be specified + pattern: ^[^\r\n]*$ + type: string + sas_token: + description: + A shared access signatures (SAS) token. One of + key or sas_token should be specified + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - account + - base_path + - container + - snapshot_name + type: object custom_domain: description: Serve the web frontend using a custom CNAME pointing @@ -244,6 +299,45 @@ spec: to potential data loss in case of losing a service node, this setting can no longer be activated." type: boolean + gcs_migration: + properties: + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: The path to the repository data within its container + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + credentials: + description: Google Cloud Storage credentials file content + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - base_path + - bucket + - credentials + - snapshot_name + type: object index_patterns: description: Index patterns items: @@ -279,6 +373,39 @@ spec: type: object maxItems: 512 type: array + index_rollup: + description: Index rollup settings + properties: + rollup_dashboards_enabled: + description: + Whether rollups are enabled in OpenSearch Dashboards. + Defaults to true. + type: boolean + rollup_enabled: + description: + Whether the rollup plugin is enabled. Defaults + to true. + type: boolean + rollup_search_backoff_count: + description: + How many retries the plugin should attempt for + failed rollup jobs. Defaults to 5. + minimum: 1 + type: integer + rollup_search_backoff_millis: + description: + The backoff time between retries for failed rollup + jobs. Defaults to 1000ms. + minimum: 1 + type: integer + rollup_search_search_all_jobs: + description: + Whether OpenSearch should return all jobs that + match all specified search terms. If disabled, OpenSearch + returns just one, as opposed to all, of the jobs that matches + the search terms. Defaults to false. + type: boolean + type: object index_template: description: Template settings for all new indexes properties: @@ -361,6 +488,7 @@ spec: The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. maxLength: 2048 + pattern: ^[^\r\n]*$ type: string enabled: description: @@ -744,6 +872,7 @@ spec: number of inline script compilations within a period of time. Default is use-context maxLength: 1024 + pattern: ^[^\r\n]*$ type: string search_max_buckets: description: @@ -951,6 +1080,67 @@ spec: maxLength: 128 pattern: ^[a-zA-Z0-9-_:.]+$ type: string + s3_migration: + properties: + access_key: + description: AWS Access key + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: S3 bucket name + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + endpoint: + description: + The S3 service endpoint to connect to. If you + are using an S3-compatible service then you should set this + to the service’s endpoint + pattern: ^[^\r\n]*$ + type: string + region: + description: S3 region + pattern: ^[^\r\n]*$ + type: string + secret_key: + description: AWS secret key + pattern: ^[^\r\n]*$ + type: string + server_side_encryption: + description: + When set to true files are encrypted on server + side + type: boolean + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - access_key + - base_path + - bucket + - region + - secret_key + - snapshot_name + type: object saml: description: OpenSearch SAML configuration properties: @@ -976,6 +1166,7 @@ spec: with the IdP. maxLength: 2048 minLength: 1 + pattern: ^[^\r\n]*$ type: string idp_pemtrustedcas_content: description: diff --git a/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml b/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml index 3310fd8..dbec011 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml @@ -314,6 +314,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported @@ -909,6 +916,17 @@ spec: type: string maxItems: 32 type: array + max_prepared_statements: + description: + PgBouncer tracks protocol-level named prepared + statements related commands sent by the client in transaction + and statement pooling modes when max_prepared_statements + is set to a non-zero value. Setting it to 0 disables prepared + statements. max_prepared_statements defaults to 100, and + its maximum is 3000. + maximum: 3000 + minimum: 0 + type: integer min_pool_size: description: Add more server connections to pool if below diff --git a/charts/aiven-operator-crds/templates/aiven.io_redis.yaml b/charts/aiven-operator-crds/templates/aiven.io_redis.yaml index 7692367..dbf2f82 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_redis.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_redis.yaml @@ -230,6 +230,22 @@ spec: type: string maxItems: 1 type: array + backup_hour: + description: + The hour of day (in UTC) when backup for the service + is started. New backup is only started if previous backup has + already completed. + maximum: 23 + minimum: 0 + type: integer + backup_minute: + description: + The minute of an hour when backup for the service + is started. New backup is only started if previous backup has + already completed. + maximum: 59 + minimum: 0 + type: integer ip_filter: description: Allow incoming connections from CIDR address block, @@ -272,6 +288,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml b/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml index 3ebab27..825f727 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml @@ -181,6 +181,14 @@ spec: maximum: 1000000000 minimum: 0 type: integer + poll_max_timeout_ms: + description: + Timeout in milliseconds for a single poll from + Kafka. Takes the value of the stream_flush_interval_ms + server setting by default (500ms). + maximum: 30000 + minimum: 0 + type: integer skip_broken_messages: description: Skip at least this number of broken messages @@ -188,6 +196,11 @@ spec: maximum: 1000000000 minimum: 0 type: integer + thread_per_consumer: + description: + Provide an independent thread for each consumer. + All consumers run in the same thread by default. + type: boolean topics: description: Kafka topics items: diff --git a/charts/aiven-operator/Chart.yaml b/charts/aiven-operator/Chart.yaml index 148d565..f8dc920 100644 --- a/charts/aiven-operator/Chart.yaml +++ b/charts/aiven-operator/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: aiven-operator description: A Helm chart to deploy the aiven operator type: application -version: v0.24.0 -appVersion: v0.24.0 +version: v0.25.0 +appVersion: v0.25.0 maintainers: - name: byashimov url: https://www.aiven.io