From cd4303f1d8c4030d2f79d463d6c209e32e58ff2b Mon Sep 17 00:00:00 2001
From: Josh Wong <23216828+josh-wong@users.noreply.github.com>
Date: Mon, 30 Sep 2024 11:31:36 +0900
Subject: [PATCH 01/10] Add file to `versioned_docs/version-3.13` folder
---
.../add-scalardb-to-your-build.mdx | 40 +
versioned_docs/version-3.13/api-guide.mdx | 1608 ++++++++++++
.../version-3.13/backup-restore.mdx | 177 ++
.../version-3.13/configurations.mdx | 294 +++
versioned_docs/version-3.13/data-modeling.mdx | 131 +
.../version-3.13/database-configurations.mdx | 119 +
.../version-3.13/deploy-overview.mdx | 18 +
versioned_docs/version-3.13/design.mdx | 13 +
.../version-3.13/develop-overview.mdx | 28 +
...-started-with-scalardb-by-using-kotlin.mdx | 416 +++
.../getting-started-with-scalardb.mdx | 536 ++++
.../conf/scalar-loki-stack-custom-values.yaml | 80 +
.../conf/scalar-prometheus-custom-values.yaml | 167 ++
.../configure-custom-values-envoy.mdx | 391 +++
.../configure-custom-values-file.mdx | 20 +
...tom-values-scalar-admin-for-kubernetes.mdx | 135 +
...configure-custom-values-scalar-manager.mdx | 126 +
...m-values-scalardb-analytics-postgresql.mdx | 188 ++
...nfigure-custom-values-scalardb-cluster.mdx | 375 +++
...nfigure-custom-values-scalardb-graphql.mdx | 223 ++
.../configure-custom-values-scalardb.mdx | 201 ++
...nfigure-custom-values-scalardl-auditor.mdx | 362 +++
...onfigure-custom-values-scalardl-ledger.mdx | 335 +++
...e-custom-values-scalardl-schema-loader.mdx | 87 +
.../helm-charts/getting-started-logging.mdx | 96 +
.../getting-started-monitoring.mdx | 266 ++
.../getting-started-scalar-helm-charts.mdx | 81 +
.../getting-started-scalar-manager.mdx | 181 ++
...-started-scalardb-analytics-postgresql.mdx | 510 ++++
...rted-scalardb-cluster-tls-cert-manager.mdx | 596 +++++
.../getting-started-scalardb-cluster-tls.mdx | 633 +++++
.../helm-charts/getting-started-scalardb.mdx | 382 +++
...rted-scalardl-auditor-tls-cert-manager.mdx | 948 +++++++
.../getting-started-scalardl-auditor-tls.mdx | 1030 ++++++++
.../getting-started-scalardl-auditor.mdx | 906 +++++++
.../getting-started-scalardl-ledger.mdx | 611 +++++
...-to-deploy-scalar-admin-for-kubernetes.mdx | 33 +
.../how-to-deploy-scalar-manager.mdx | 62 +
.../how-to-deploy-scalar-products.mdx | 71 +
...o-deploy-scalardb-analytics-postgresql.mdx | 40 +
.../how-to-deploy-scalardb-cluster.mdx | 73 +
.../how-to-deploy-scalardb-graphql.mdx | 45 +
.../helm-charts/how-to-deploy-scalardb.mdx | 36 +
.../how-to-deploy-scalardl-auditor.mdx | 41 +
.../how-to-deploy-scalardl-ledger.mdx | 43 +
.../mount-files-or-volumes-on-scalar-pods.mdx | 142 ++
.../use-secret-for-credentials.mdx | 245 ++
.../version-3.13/images/data_model.png | Bin 0 -> 256199 bytes
.../images/getting-started-ERD.png | Bin 0 -> 10063 bytes
.../version-3.13/images/scalardb.png | Bin 0 -> 113307 bytes
.../images/scalardb_data_model.png | Bin 0 -> 259602 bytes
.../version-3.13/images/software_stack.png | Bin 0 -> 48864 bytes
.../two_phase_commit_load_balancing.png | Bin 0 -> 40021 bytes
.../two_phase_commit_sequence_diagram.png | Bin 0 -> 39137 bytes
versioned_docs/version-3.13/index.mdx | 40 +
.../manage-backup-and-restore.mdx | 22 +
.../monitor-by-using-scalar-manager.mdx | 21 +
.../multi-storage-transactions.mdx | 67 +
versioned_docs/version-3.13/overview.mdx | 70 +
.../version-3.13/quick-start-overview.mdx | 36 +
.../version-3.13/releases/release-notes.mdx | 115 +
.../releases/release-support-policy.mdx | 104 +
versioned_docs/version-3.13/requirements.mdx | 220 ++
versioned_docs/version-3.13/roadmap.mdx | 101 +
...nal-storage-operations-through-library.mdx | 271 ++
...tions-through-primitive-crud-interface.mdx | 861 +++++++
...sactions-through-scalardb-core-library.mdx | 218 ++
.../AccessScalarProducts.mdx | 194 ++
.../scalar-kubernetes/AwsMarketplaceGuide.mdx | 427 ++++
.../AzureMarketplaceGuide.mdx | 234 ++
.../scalar-kubernetes/BackupNoSQL.mdx | 148 ++
.../scalar-kubernetes/BackupRDB.mdx | 20 +
.../scalar-kubernetes/BackupRestoreGuide.mdx | 47 +
.../CreateAKSClusterForScalarDB.mdx | 103 +
.../CreateAKSClusterForScalarDL.mdx | 106 +
.../CreateAKSClusterForScalarDLAuditor.mdx | 125 +
.../CreateAKSClusterForScalarProducts.mdx | 19 +
.../scalar-kubernetes/CreateBastionServer.mdx | 46 +
.../CreateEKSClusterForScalarDB.mdx | 85 +
.../CreateEKSClusterForScalarDBCluster.mdx | 85 +
.../CreateEKSClusterForScalarDL.mdx | 83 +
.../CreateEKSClusterForScalarDLAuditor.mdx | 102 +
.../CreateEKSClusterForScalarProducts.mdx | 20 +
.../HowToCreateKeyAndCertificateFiles.mdx | 146 ++
.../HowToGetContainerImages.mdx | 24 +
.../scalar-kubernetes/HowToScaleScalarDB.mdx | 45 +
.../scalar-kubernetes/HowToScaleScalarDL.mdx | 52 +
.../HowToUpgradeScalarDB.mdx | 99 +
.../HowToUpgradeScalarDL.mdx | 111 +
.../HowToUseContainerImages.mdx | 136 +
.../K8sLogCollectionGuide.mdx | 182 ++
.../scalar-kubernetes/K8sMonitorGuide.mdx | 155 ++
...ualDeploymentGuideScalarDBClusterOnEKS.mdx | 65 +
...nualDeploymentGuideScalarDBServerOnAKS.mdx | 62 +
...nualDeploymentGuideScalarDBServerOnEKS.mdx | 62 +
...ualDeploymentGuideScalarDLAuditorOnAKS.mdx | 98 +
...ualDeploymentGuideScalarDLAuditorOnEKS.mdx | 98 +
.../ManualDeploymentGuideScalarDLOnAKS.mdx | 50 +
.../ManualDeploymentGuideScalarDLOnEKS.mdx | 50 +
.../NetworkPeeringForScalarDLAuditor.mdx | 54 +
.../ProductionChecklistForScalarDBCluster.mdx | 152 ++
.../ProductionChecklistForScalarDLAuditor.mdx | 169 ++
.../ProductionChecklistForScalarDLLedger.mdx | 155 ++
.../ProductionChecklistForScalarProducts.mdx | 13 +
.../scalar-kubernetes/RegularCheck.mdx | 94 +
.../scalar-kubernetes/RestoreDatabase.mdx | 159 ++
.../scalar-kubernetes/SetupDatabase.mdx | 12 +
.../scalar-kubernetes/SetupDatabaseForAWS.mdx | 181 ++
.../SetupDatabaseForAzure.mdx | 205 ++
.../scalar-kubernetes/alerts/Envoy.mdx | 152 ++
.../scalar-kubernetes/alerts/Ledger.mdx | 149 ++
.../scalar-kubernetes/alerts/README.mdx | 12 +
..._Cluster_Direct_Kubernetes_Mode.drawio.png | Bin 0 -> 559913 bytes
..._ScalarDB_Cluster_Indirect_Mode.drawio.png | Bin 0 -> 575817 bytes
..._ScalarDB_Server_App_In_Cluster.drawio.png | Bin 0 -> 554468 bytes
...ScalarDB_Server_App_Out_Cluster.drawio.png | Bin 0 -> 574621 bytes
..._ScalarDL_Auditor_Multi_Account.drawio.png | Bin 0 -> 1332596 bytes
...calarDL_Auditor_Multi_Namespace.drawio.png | Bin 0 -> 1181669 bytes
...AKS_ScalarDL_Auditor_Multi_VNet.drawio.png | Bin 0 -> 1322420 bytes
.../images/png/AKS_ScalarDL_Ledger.drawio.png | Bin 0 -> 599746 bytes
..._Cluster_Direct_Kubernetes_Mode.drawio.png | Bin 0 -> 543821 bytes
..._ScalarDB_Cluster_Indirect_Mode.drawio.png | Bin 0 -> 566602 bytes
..._ScalarDB_Server_App_In_Cluster.drawio.png | Bin 0 -> 539665 bytes
...ScalarDB_Server_App_Out_Cluster.drawio.png | Bin 0 -> 566083 bytes
..._ScalarDL_Auditor_Multi_Account.drawio.png | Bin 0 -> 1292574 bytes
...calarDL_Auditor_Multi_Namespace.drawio.png | Bin 0 -> 1149218 bytes
.../EKS_ScalarDL_Auditor_Multi_VPC.drawio.png | Bin 0 -> 1270602 bytes
.../images/png/EKS_ScalarDL_Ledger.drawio.png | Bin 0 -> 583499 bytes
.../version-3.13/scalar-licensing/README.mdx | 106 +
.../backup-and-restore-check-pauses.png | Bin 0 -> 130024 bytes
.../backup-and-restore-create-pauses.png | Bin 0 -> 147876 bytes
.../images/dashboard-cluster.png | Bin 0 -> 150985 bytes
.../images/dashboard-pod-list.png | Bin 0 -> 187668 bytes
.../scalar-manager/images/logs.png | Bin 0 -> 322954 bytes
.../scalar-manager/images/metrics.png | Bin 0 -> 130974 bytes
.../version-3.13/scalar-manager/overview.mdx | 43 +
.../getting-started.mdx | 97 +
.../images/imported-schema.png | Bin 0 -> 52511 bytes
.../images/multi-storage-overview.png | Bin 0 -> 51475 bytes
.../installation.mdx | 60 +
.../scalardb-fdw.mdx | 179 ++
.../schema-importer.mdx | 65 +
.../scalardb-analytics-spark/README.mdx | 24 +
.../configuration.mdx | 126 +
.../getting-started.mdx | 180 ++
.../version-compatibility.mdx | 17 +
.../scalardb-benchmarks/README.mdx | 213 ++
.../common-reference.mdx | 160 ++
.../exception-handling.mdx | 173 ++
.../getting-started-with-admin-api.mdx | 127 +
.../getting-started-with-aspnet-and-di.mdx | 77 +
.../getting-started-with-auth.mdx | 66 +
...rted-with-distributed-sql-transactions.mdx | 188 ++
...-started-with-distributed-transactions.mdx | 223 ++
.../getting-started-with-linq.mdx | 372 +++
...with-scalardb-tables-as-csharp-classes.mdx | 196 ++
...ted-with-two-phase-commit-transactions.mdx | 141 ++
.../overview.mdx | 21 +
.../scalardb-cluster/compatibility.mdx | 31 +
...ide-for-scalardb-cluster-with-java-api.mdx | 308 +++
...-started-with-scalardb-cluster-graphql.mdx | 339 +++
...started-with-scalardb-cluster-sql-jdbc.mdx | 233 ++
...-scalardb-cluster-sql-spring-data-jdbc.mdx | 269 ++
.../getting-started-with-scalardb-cluster.mdx | 406 +++
...ted-with-using-go-for-scalardb-cluster.mdx | 443 ++++
...with-using-python-for-scalardb-cluster.mdx | 486 ++++
.../images/direct-kubernetes-client-mode.png | Bin 0 -> 64538 bytes
.../images/getting-started-ERD.png | Bin 0 -> 10063 bytes
.../images/indirect-client-mode.png | Bin 0 -> 57621 bytes
.../images/scalardb-cluster-architecture.png | Bin 0 -> 50669 bytes
.../version-3.13/scalardb-cluster/index.mdx | 67 +
...ge-operations-through-scalardb-cluster.mdx | 310 +++
...orage-operations-through-sql-interface.mdx | 392 +++
...nsactions-through-scalardb-cluster-sql.mdx | 297 +++
...-transactions-through-scalardb-cluster.mdx | 296 +++
.../scalardb-auth-status-codes.mdx | 301 +++
.../scalardb-auth-with-sql.mdx | 180 ++
.../scalardb-cluster-configurations.mdx | 241 ++
.../scalardb-cluster-grpc-api-guide.mdx | 235 ++
.../scalardb-cluster-sql-grpc-api-guide.mdx | 218 ++
.../scalardb-cluster-status-codes.mdx | 360 +++
...ster-on-kubernetes-by-using-helm-chart.mdx | 255 ++
.../scalardb-cluster/standalone-mode.mdx | 235 ++
.../standalone-mode/docker-compose.yaml | 28 +
.../scalardb-cluster-node.properties | 5 +
.../scalardb-core-status-codes.mdx | 1604 ++++++++++++
.../scalardb-graphql/aws-deployment-guide.mdx | 298 +++
.../getting-started-with-scalardb-graphql.mdx | 235 ++
.../scalardb-graphql/how-to-run-server.mdx | 71 +
...ow-to-run-two-phase-commit-transaction.mdx | 156 ++
.../images/cluster-iam-role.png | Bin 0 -> 384551 bytes
.../images/iam-role-for-serviceaccount.png | Bin 0 -> 295886 bytes
.../images/lb-controller-iam-policy.png | Bin 0 -> 156691 bytes
.../scalardb-graphql/images/node-iam-role.png | Bin 0 -> 447576 bytes
.../scalardb-graphql/images/oidc-provider.png | Bin 0 -> 397957 bytes
.../version-3.13/scalardb-graphql/index.mdx | 101 +
.../scalardb-graphql-status-codes.mdx | 241 ++
.../version-3.13/scalardb-samples/README.mdx | 17 +
.../README.mdx | 535 ++++
.../images/ERD.png | Bin 0 -> 10993 bytes
.../images/overview.png | Bin 0 -> 50911 bytes
.../images/sequence_diagram.png | Bin 0 -> 35855 bytes
.../README.mdx | 315 +++
.../images/ERD.png | Bin 0 -> 9786 bytes
.../images/overview.png | Bin 0 -> 64426 bytes
.../README.mdx | 304 +++
.../README.mdx | 280 +++
.../README.mdx | 530 ++++
.../images/ERD.png | Bin 0 -> 10993 bytes
.../images/overview.png | Bin 0 -> 55118 bytes
.../images/seq-diagram-high-level-2pc-api.png | Bin 0 -> 219961 bytes
.../images/sequence_diagram.png | Bin 0 -> 35855 bytes
.../README.mdx | 334 +++
.../images/ERD.png | Bin 0 -> 9786 bytes
.../images/overview.png | Bin 0 -> 62938 bytes
.../version-3.13/scalardb-server.mdx | 172 ++
.../add-scalardb-sql-to-your-build.mdx | 142 ++
.../scalardb-sql/configurations.mdx | 171 ++
.../getting-started-with-jdbc.mdx | 230 ++
.../scalardb-sql/getting-started-with-sql.mdx | 218 ++
.../version-3.13/scalardb-sql/grammar.mdx | 2228 +++++++++++++++++
.../spring_data_ingegration_overall_arch.png | Bin 0 -> 97888 bytes
.../version-3.13/scalardb-sql/index.mdx | 137 +
.../version-3.13/scalardb-sql/jdbc-guide.mdx | 194 ++
.../scalardb-sql/migration-guide.mdx | 114 +
.../scalardb-sql-status-codes.mdx | 561 +++++
.../scalardb-sql/spring-data-guide.mdx | 816 ++++++
.../scalardb-sql/sql-api-guide.mdx | 359 +++
.../version-3.13/scalardb-sql/sql-server.mdx | 163 ++
.../version-3.13/schema-loader-import.mdx | 275 ++
versioned_docs/version-3.13/schema-loader.mdx | 686 +++++
.../TransactionManagementOnCassandra.pdf | Bin 0 -> 831161 bytes
.../two-phase-commit-transactions.mdx | 736 ++++++
versioned_sidebars/version-3.13-sidebars.json | 834 ++++++
234 files changed, 41919 insertions(+)
create mode 100644 versioned_docs/version-3.13/add-scalardb-to-your-build.mdx
create mode 100644 versioned_docs/version-3.13/api-guide.mdx
create mode 100644 versioned_docs/version-3.13/backup-restore.mdx
create mode 100644 versioned_docs/version-3.13/configurations.mdx
create mode 100644 versioned_docs/version-3.13/data-modeling.mdx
create mode 100644 versioned_docs/version-3.13/database-configurations.mdx
create mode 100644 versioned_docs/version-3.13/deploy-overview.mdx
create mode 100644 versioned_docs/version-3.13/design.mdx
create mode 100644 versioned_docs/version-3.13/develop-overview.mdx
create mode 100644 versioned_docs/version-3.13/getting-started-with-scalardb-by-using-kotlin.mdx
create mode 100644 versioned_docs/version-3.13/getting-started-with-scalardb.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/conf/scalar-loki-stack-custom-values.yaml
create mode 100644 versioned_docs/version-3.13/helm-charts/conf/scalar-prometheus-custom-values.yaml
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-envoy.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-file.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-manager.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-analytics-postgresql.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-graphql.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-auditor.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-ledger.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-schema-loader.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-logging.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-monitoring.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalar-helm-charts.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalar-manager.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardb-analytics-postgresql.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls-cert-manager.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardb.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls-cert-manager.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/getting-started-scalardl-ledger.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-manager.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-products.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-analytics-postgresql.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-graphql.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardl-auditor.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardl-ledger.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/mount-files-or-volumes-on-scalar-pods.mdx
create mode 100644 versioned_docs/version-3.13/helm-charts/use-secret-for-credentials.mdx
create mode 100644 versioned_docs/version-3.13/images/data_model.png
create mode 100644 versioned_docs/version-3.13/images/getting-started-ERD.png
create mode 100644 versioned_docs/version-3.13/images/scalardb.png
create mode 100644 versioned_docs/version-3.13/images/scalardb_data_model.png
create mode 100644 versioned_docs/version-3.13/images/software_stack.png
create mode 100644 versioned_docs/version-3.13/images/two_phase_commit_load_balancing.png
create mode 100644 versioned_docs/version-3.13/images/two_phase_commit_sequence_diagram.png
create mode 100644 versioned_docs/version-3.13/index.mdx
create mode 100644 versioned_docs/version-3.13/manage-backup-and-restore.mdx
create mode 100644 versioned_docs/version-3.13/monitor-by-using-scalar-manager.mdx
create mode 100644 versioned_docs/version-3.13/multi-storage-transactions.mdx
create mode 100644 versioned_docs/version-3.13/overview.mdx
create mode 100644 versioned_docs/version-3.13/quick-start-overview.mdx
create mode 100644 versioned_docs/version-3.13/releases/release-notes.mdx
create mode 100644 versioned_docs/version-3.13/releases/release-support-policy.mdx
create mode 100644 versioned_docs/version-3.13/requirements.mdx
create mode 100644 versioned_docs/version-3.13/roadmap.mdx
create mode 100644 versioned_docs/version-3.13/run-non-transactional-storage-operations-through-library.mdx
create mode 100644 versioned_docs/version-3.13/run-non-transactional-storage-operations-through-primitive-crud-interface.mdx
create mode 100644 versioned_docs/version-3.13/run-transactions-through-scalardb-core-library.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/AccessScalarProducts.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/AwsMarketplaceGuide.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/AzureMarketplaceGuide.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/BackupNoSQL.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/BackupRDB.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/BackupRestoreGuide.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateAKSClusterForScalarDB.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateAKSClusterForScalarDL.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateAKSClusterForScalarDLAuditor.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateAKSClusterForScalarProducts.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateBastionServer.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateEKSClusterForScalarDB.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateEKSClusterForScalarDBCluster.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateEKSClusterForScalarDL.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateEKSClusterForScalarDLAuditor.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/CreateEKSClusterForScalarProducts.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToGetContainerImages.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToScaleScalarDB.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToScaleScalarDL.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToUpgradeScalarDB.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToUpgradeScalarDL.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/HowToUseContainerImages.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/K8sLogCollectionGuide.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/K8sMonitorGuide.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDBClusterOnEKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnAKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnEKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnAKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnEKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDLOnAKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ManualDeploymentGuideScalarDLOnEKS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/NetworkPeeringForScalarDLAuditor.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ProductionChecklistForScalarDBCluster.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ProductionChecklistForScalarDLAuditor.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ProductionChecklistForScalarDLLedger.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/ProductionChecklistForScalarProducts.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/RegularCheck.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/RestoreDatabase.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/SetupDatabase.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/SetupDatabaseForAWS.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/SetupDatabaseForAzure.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/alerts/Envoy.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/alerts/Ledger.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/alerts/README.mdx
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Indirect_Mode.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_In_Cluster.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_Out_Cluster.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Account.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Namespace.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_VNet.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/AKS_ScalarDL_Ledger.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Indirect_Mode.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_In_Cluster.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_Out_Cluster.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Account.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Namespace.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_VPC.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-kubernetes/images/png/EKS_ScalarDL_Ledger.drawio.png
create mode 100644 versioned_docs/version-3.13/scalar-licensing/README.mdx
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/backup-and-restore-check-pauses.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/backup-and-restore-create-pauses.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/dashboard-cluster.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/dashboard-pod-list.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/logs.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/images/metrics.png
create mode 100644 versioned_docs/version-3.13/scalar-manager/overview.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/getting-started.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/images/imported-schema.png
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/images/multi-storage-overview.png
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/installation.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/scalardb-fdw.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-postgresql/schema-importer.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-spark/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-spark/configuration.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-spark/getting-started.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-analytics-spark/version-compatibility.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-benchmarks/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/common-reference.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/exception-handling.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-admin-api.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-aspnet-and-di.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-auth.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-sql-transactions.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-transactions.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-linq.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-scalardb-tables-as-csharp-classes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/getting-started-with-two-phase-commit-transactions.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster-dotnet-client-sdk/overview.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/compatibility.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-scalardb-cluster-graphql.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-scalardb-cluster-sql-jdbc.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-scalardb-cluster-sql-spring-data-jdbc.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-using-go-for-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/getting-started-with-using-python-for-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/images/direct-kubernetes-client-mode.png
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/images/getting-started-ERD.png
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/images/indirect-client-mode.png
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/images/scalardb-cluster-architecture.png
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/index.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/run-non-transactional-storage-operations-through-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/run-non-transactional-storage-operations-through-sql-interface.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/run-transactions-through-scalardb-cluster-sql.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/run-transactions-through-scalardb-cluster.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-auth-status-codes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-auth-with-sql.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-cluster-configurations.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-cluster-grpc-api-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/scalardb-cluster-status-codes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/standalone-mode.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/standalone-mode/docker-compose.yaml
create mode 100644 versioned_docs/version-3.13/scalardb-cluster/standalone-mode/scalardb-cluster-node.properties
create mode 100644 versioned_docs/version-3.13/scalardb-core-status-codes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/aws-deployment-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/getting-started-with-scalardb-graphql.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/how-to-run-server.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/how-to-run-two-phase-commit-transaction.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/images/cluster-iam-role.png
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/images/iam-role-for-serviceaccount.png
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/images/lb-controller-iam-policy.png
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/images/node-iam-role.png
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/images/oidc-provider.png
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/index.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-graphql/scalardb-graphql-status-codes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/microservice-transaction-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/microservice-transaction-sample/images/ERD.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/microservice-transaction-sample/images/overview.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/microservice-transaction-sample/images/sequence_diagram.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/multi-storage-transaction-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/multi-storage-transaction-sample/images/ERD.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/multi-storage-transaction-sample/images/overview.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/scalardb-analytics-postgresql-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/scalardb-analytics-spark-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-microservice-transaction-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-microservice-transaction-sample/images/ERD.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-microservice-transaction-sample/images/overview.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-microservice-transaction-sample/images/seq-diagram-high-level-2pc-api.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-microservice-transaction-sample/images/sequence_diagram.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-multi-storage-transaction-sample/README.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-multi-storage-transaction-sample/images/ERD.png
create mode 100644 versioned_docs/version-3.13/scalardb-samples/spring-data-multi-storage-transaction-sample/images/overview.png
create mode 100644 versioned_docs/version-3.13/scalardb-server.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/add-scalardb-sql-to-your-build.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/configurations.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/getting-started-with-jdbc.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/getting-started-with-sql.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/grammar.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/images/spring_data_ingegration_overall_arch.png
create mode 100644 versioned_docs/version-3.13/scalardb-sql/index.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/jdbc-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/migration-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/scalardb-sql-status-codes.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/spring-data-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/sql-api-guide.mdx
create mode 100644 versioned_docs/version-3.13/scalardb-sql/sql-server.mdx
create mode 100644 versioned_docs/version-3.13/schema-loader-import.mdx
create mode 100644 versioned_docs/version-3.13/schema-loader.mdx
create mode 100644 versioned_docs/version-3.13/slides/TransactionManagementOnCassandra.pdf
create mode 100644 versioned_docs/version-3.13/two-phase-commit-transactions.mdx
create mode 100644 versioned_sidebars/version-3.13-sidebars.json
diff --git a/versioned_docs/version-3.13/add-scalardb-to-your-build.mdx b/versioned_docs/version-3.13/add-scalardb-to-your-build.mdx
new file mode 100644
index 00000000..ed18f718
--- /dev/null
+++ b/versioned_docs/version-3.13/add-scalardb-to-your-build.mdx
@@ -0,0 +1,40 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Add ScalarDB to Your Build
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+The ScalarDB library is available on the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb). You can add the library as a build dependency to your application by using Gradle or Maven.
+
+## Configure your application based on your build tool
+
+Select your build tool, and follow the instructions to add the build dependency for ScalarDB to your application.
+
+
+
+ To add the build dependency for ScalarDB by using Gradle, add the following to `build.gradle` in your application, replacing `` with the version of ScalarDB that you want to use:
+
+ ```gradle
+ dependencies {
+ implementation 'com.scalar-labs:scalardb:'
+ }
+ ```
+
+
+ To add the build dependency for ScalarDB by using Maven, add the following to `pom.xml` in your application, replacing `` with the version of ScalarDB that you want to use:
+
+ ```xml
+
+ com.scalar-labs
+ scalardb
+
+
+ ```
+
+
diff --git a/versioned_docs/version-3.13/api-guide.mdx b/versioned_docs/version-3.13/api-guide.mdx
new file mode 100644
index 00000000..0fd037aa
--- /dev/null
+++ b/versioned_docs/version-3.13/api-guide.mdx
@@ -0,0 +1,1608 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# ScalarDB Java API Guide
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+The ScalarDB Java API is mainly composed of the Administrative API and Transactional API. This guide briefly explains what kinds of APIs exist, how to use them, and related topics like how to handle exceptions.
+
+## Administrative API
+
+This section explains how to execute administrative operations programmatically by using the Administrative API in ScalarDB.
+
+:::note
+
+Another method for executing administrative operations is to use [Schema Loader](schema-loader.mdx).
+
+:::
+
+### Get a `DistributedTransactionAdmin` instance
+
+You first need to get a `DistributedTransactionAdmin` instance to execute administrative operations.
+
+To get a `DistributedTransactionAdmin` instance, you can use `TransactionFactory` as follows:
+
+```java
+TransactionFactory transactionFactory = TransactionFactory.create("");
+DistributedTransactionAdmin admin = transactionFactory.getTransactionAdmin();
+```
+
+For details about configurations, see [ScalarDB Configurations](configurations.mdx).
+
+After you have executed all administrative operations, you should close the `DistributedTransactionAdmin` instance as follows:
+
+```java
+admin.close();
+```
+
+### Create a namespace
+
+Before creating tables, namespaces must be created since a table belongs to one namespace.
+
+You can create a namespace as follows:
+
+```java
+// Create the namespace "ns". If the namespace already exists, an exception will be thrown.
+admin.createNamespace("ns");
+
+// Create the namespace only if it does not already exist.
+boolean ifNotExists = true;
+admin.createNamespace("ns", ifNotExists);
+
+// Create the namespace with options.
+Map options = ...;
+admin.createNamespace("ns", options);
+```
+
+#### Creation options
+
+In the creation operations, like creating a namespace and creating a table, you can specify options that are maps of option names and values (`Map`). By using the options, you can set storage adapter–specific configurations.
+
+Select your database to see the options available:
+
+
+
+ No options are available for JDBC databases.
+
+
+ | Name | Description | Default |
+ |------------|-----------------------------------------|---------|
+ | no-scaling | Disable auto-scaling for DynamoDB. | false |
+ | no-backup | Disable continuous backup for DynamoDB. | false |
+ | ru | Base resource unit. | 10 |
+
+
+ | Name | Description | Default |
+ |------------|-----------------------------------------------------|---------|
+ | ru | Base resource unit. | 400 |
+ | no-scaling | Disable auto-scaling for Cosmos DB for NoSQL. | false |
+
+
+ | Name | Description | Default |
+ |----------------------|----------------------------------------------------------------------------------------|------------------|
+ | replication-strategy | Cassandra replication strategy. Must be `SimpleStrategy` or `NetworkTopologyStrategy`. | `SimpleStrategy` |
+ | compaction-strategy | Cassandra compaction strategy, Must be `LCS`, `STCS` or `TWCS`. | `STCS` |
+ | replication-factor | Cassandra replication factor. | 1 |
+
+
+
+### Create a table
+
+When creating a table, you should define the table metadata and then create the table.
+
+To define the table metadata, you can use `TableMetadata`. The following shows how to define the columns, partition key, clustering key including clustering orders, and secondary indexes of a table:
+
+```java
+// Define the table metadata.
+TableMetadata tableMetadata =
+ TableMetadata.newBuilder()
+ .addColumn("c1", DataType.INT)
+ .addColumn("c2", DataType.TEXT)
+ .addColumn("c3", DataType.BIGINT)
+ .addColumn("c4", DataType.FLOAT)
+ .addColumn("c5", DataType.DOUBLE)
+ .addPartitionKey("c1")
+ .addClusteringKey("c2", Scan.Ordering.Order.DESC)
+ .addClusteringKey("c3", Scan.Ordering.Order.ASC)
+ .addSecondaryIndex("c4")
+ .build();
+```
+
+For details about the data model of ScalarDB, see [Data Model](design.mdx#data-model).
+
+Then, create a table as follows:
+
+```java
+// Create the table "ns.tbl". If the table already exists, an exception will be thrown.
+admin.createTable("ns", "tbl", tableMetadata);
+
+// Create the table only if it does not already exist.
+boolean ifNotExists = true;
+admin.createTable("ns", "tbl", tableMetadata, ifNotExists);
+
+// Create the table with options.
+Map options = ...;
+admin.createTable("ns", "tbl", tableMetadata, options);
+```
+
+### Create a secondary index
+
+You can create a secondary index as follows:
+
+```java
+// Create a secondary index on column "c5" for table "ns.tbl". If a secondary index already exists, an exception will be thrown.
+admin.createIndex("ns", "tbl", "c5");
+
+// Create the secondary index only if it does not already exist.
+boolean ifNotExists = true;
+admin.createIndex("ns", "tbl", "c5", ifNotExists);
+
+// Create the secondary index with options.
+Map options = ...;
+admin.createIndex("ns", "tbl", "c5", options);
+```
+
+### Add a new column to a table
+
+You can add a new, non-partition key column to a table as follows:
+
+```java
+// Add a new column "c6" with the INT data type to the table "ns.tbl".
+admin.addNewColumnToTable("ns", "tbl", "c6", DataType.INT)
+```
+
+:::warning
+
+You should carefully consider adding a new column to a table because the execution time may vary greatly depending on the underlying storage. Please plan accordingly and consider the following, especially if the database runs in production:
+
+- **For Cosmos DB for NoSQL and DynamoDB:** Adding a column is almost instantaneous as the table schema is not modified. Only the table metadata stored in a separate table is updated.
+- **For Cassandra:** Adding a column will only update the schema metadata and will not modify the existing schema records. The cluster topology is the main factor for the execution time. Changes to the schema metadata are shared to each cluster node via a gossip protocol. Because of this, the larger the cluster, the longer it will take for all nodes to be updated.
+- **For relational databases (MySQL, Oracle, etc.):** Adding a column shouldn't take a long time to execute.
+
+:::
+
+### Truncate a table
+
+You can truncate a table as follows:
+
+```java
+// Truncate the table "ns.tbl".
+admin.truncateTable("ns", "tbl");
+```
+
+### Drop a secondary index
+
+You can drop a secondary index as follows:
+
+```java
+// Drop the secondary index on column "c5" from table "ns.tbl". If the secondary index does not exist, an exception will be thrown.
+admin.dropIndex("ns", "tbl", "c5");
+
+// Drop the secondary index only if it exists.
+boolean ifExists = true;
+admin.dropIndex("ns", "tbl", "c5", ifExists);
+```
+
+### Drop a table
+
+You can drop a table as follows:
+
+```java
+// Drop the table "ns.tbl". If the table does not exist, an exception will be thrown.
+admin.dropTable("ns", "tbl");
+
+// Drop the table only if it exists.
+boolean ifExists = true;
+admin.dropTable("ns", "tbl", ifExists);
+```
+
+### Drop a namespace
+
+You can drop a namespace as follows:
+
+```java
+// Drop the namespace "ns". If the namespace does not exist, an exception will be thrown.
+admin.dropNamespace("ns");
+
+// Drop the namespace only if it exists.
+boolean ifExists = true;
+admin.dropNamespace("ns", ifExists);
+```
+
+### Get existing namespaces
+
+You can get the existing namespaces as follows:
+
+```java
+Set namespaces = admin.getNamespaceNames();
+```
+
+:::note
+
+This method extracts the namespace names of user tables dynamically. As a result, only namespaces that contain tables are returned. Starting from ScalarDB 4.0, we plan to improve the design to remove this limitation.
+
+:::
+
+### Get the tables of a namespace
+
+You can get the tables of a namespace as follows:
+
+```java
+// Get the tables of the namespace "ns".
+Set tables = admin.getNamespaceTableNames("ns");
+```
+
+### Get table metadata
+
+You can get table metadata as follows:
+
+```java
+// Get the table metadata for "ns.tbl".
+TableMetadata tableMetadata = admin.getTableMetadata("ns", "tbl");
+```
+### Repair a table
+
+You can repair the table metadata of an existing table as follows:
+
+```java
+// Repair the table "ns.tbl" with options.
+TableMetadata tableMetadata =
+ TableMetadata.newBuilder()
+ ...
+ .build();
+Map options = ...;
+admin.repairTable("ns", "tbl", tableMetadata, options);
+```
+
+### Specify operations for the Coordinator table
+
+The Coordinator table is used by the [Transactional API](#transactional-api) to track the statuses of transactions.
+
+When using a transaction manager, you must create the Coordinator table to execute transactions. In addition to creating the table, you can truncate and drop the Coordinator table.
+
+#### Create the Coordinator table
+
+You can create the Coordinator table as follows:
+
+```java
+// Create the Coordinator table.
+admin.createCoordinatorTables();
+
+// Create the Coordinator table only if one does not already exist.
+boolean ifNotExist = true;
+admin.createCoordinatorTables(ifNotExist);
+
+// Create the Coordinator table with options.
+Map options = ...;
+admin.createCoordinatorTables(options);
+```
+
+#### Truncate the Coordinator table
+
+You can truncate the Coordinator table as follows:
+
+```java
+// Truncate the Coordinator table.
+admin.truncateCoordinatorTables();
+```
+
+#### Drop the Coordinator table
+
+You can drop the Coordinator table as follows:
+
+```java
+// Drop the Coordinator table.
+admin.dropCoordinatorTables();
+
+// Drop the Coordinator table if one exist.
+boolean ifExist = true;
+admin.dropCoordinatorTables(ifExist);
+```
+
+### Import a table
+
+You can import an existing table to ScalarDB as follows:
+
+```java
+// Import the table "ns.tbl". If the table is already managed by ScalarDB, the target table does not
+// exist, or the table does not meet the requirements of the ScalarDB table, an exception will be thrown.
+admin.importTable("ns", "tbl", options);
+```
+
+:::warning
+
+You should carefully plan to import a table to ScalarDB in production because it will add transaction metadata columns to your database tables and the ScalarDB metadata tables. In this case, there would also be several differences between your database and ScalarDB, as well as some limitations. For details, see [Importing Existing Tables to ScalarDB by Using ScalarDB Schema Loader](./schema-loader-import.mdx).
+
+
+:::
+
+## Transactional API
+
+This section explains how to execute transactional operations by using the Transactional API in ScalarDB.
+
+### Get a `DistributedTransactionManager` instance
+
+You first need to get a `DistributedTransactionManager` instance to execute transactional operations.
+
+To get a `DistributedTransactionManager` instance, you can use `TransactionFactory` as follows:
+
+```java
+TransactionFactory transactionFactory = TransactionFactory.create("");
+DistributedTransactionManager transactionManager = transactionFactory.getTransactionManager();
+```
+
+After you have executed all transactional operations, you should close the `DistributedTransactionManager` instance as follows:
+
+```java
+transactionManager.close();
+```
+
+### Execute transactions
+
+This subsection explains how to execute transactions with multiple CRUD operations.
+
+#### Begin or start a transaction
+
+Before executing transactional CRUD operations, you need to begin or start a transaction.
+
+You can begin a transaction as follows:
+
+```java
+// Begin a transaction.
+DistributedTransaction transaction = transactionManager.begin();
+```
+
+Or, you can start a transaction as follows:
+
+```java
+// Start a transaction.
+DistributedTransaction transaction = transactionManager.start();
+```
+
+Alternatively, you can use the `begin` method for a transaction by specifying a transaction ID as follows:
+
+```java
+// Begin a transaction with specifying a transaction ID.
+DistributedTransaction transaction = transactionManager.begin("");
+```
+
+Or, you can use the `start` method for a transaction by specifying a transaction ID as follows:
+
+```java
+// Start a transaction with specifying a transaction ID.
+DistributedTransaction transaction = transactionManager.start("");
+```
+
+:::note
+
+Specifying a transaction ID is useful when you want to link external systems to ScalarDB. Otherwise, you should use the `begin()` method or the `start()` method.
+
+When you specify a transaction ID, make sure you specify a unique ID (for example, UUID v4) throughout the system since ScalarDB depends on the uniqueness of transaction IDs for correctness.
+
+:::
+
+#### Join a transaction
+
+Joining a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can join the ongoing transaction by using the `join()` method.
+
+You can join an ongoing transaction that has already begun by specifying the transaction ID as follows:
+
+```java
+// Join a transaction.
+DistributedTransaction transaction = transactionManager.join("");
+```
+
+:::note
+
+To get the transaction ID with `getId()`, you can specify the following:
+
+```java
+tx.getId();
+```
+
+:::
+
+#### Resume a transaction
+
+Resuming a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can resume the ongoing transaction by using the `resume()` method.
+
+You can resume an ongoing transaction that you have already begun by specifying a transaction ID as follows:
+
+```java
+// Resume a transaction.
+DistributedTransaction transaction = transactionManager.resume("");
+```
+
+:::note
+
+To get the transaction ID with `getId()`, you can specify the following:
+
+```java
+tx.getId();
+```
+
+:::
+
+#### Implement CRUD operations
+
+The following sections describe key construction and CRUD operations.
+
+:::note
+
+Although all the builders of the CRUD operations can specify consistency by using the `consistency()` methods, those methods are ignored. Instead, the `LINEARIZABLE` consistency level is always used in transactions.
+
+:::
+
+##### Key construction
+
+Most CRUD operations need to specify `Key` objects (partition-key, clustering-key, etc.). So, before moving on to CRUD operations, the following explains how to construct a `Key` object.
+
+For a single column key, you can use `Key.of()` methods to construct the key as follows:
+
+```java
+// For a key that consists of a single column of INT.
+Key key1 = Key.ofInt("col1", 1);
+
+// For a key that consists of a single column of BIGINT.
+Key key2 = Key.ofBigInt("col1", 100L);
+
+// For a key that consists of a single column of DOUBLE.
+Key key3 = Key.ofDouble("col1", 1.3d);
+
+// For a key that consists of a single column of TEXT.
+Key key4 = Key.ofText("col1", "value");
+```
+
+For a key that consists of two to five columns, you can use the `Key.of()` method to construct the key as follows. Similar to `ImmutableMap.of()` in Guava, you need to specify column names and values in turns:
+
+```java
+// For a key that consists of two to five columns.
+Key key1 = Key.of("col1", 1, "col2", 100L);
+Key key2 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d);
+Key key3 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value");
+Key key4 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value", "col5", false);
+```
+
+For a key that consists of more than five columns, we can use the builder to construct the key as follows:
+
+```java
+// For a key that consists of more than five columns.
+Key key = Key.newBuilder()
+ .addInt("col1", 1)
+ .addBigInt("col2", 100L)
+ .addDouble("col3", 1.3d)
+ .addText("col4", "value")
+ .addBoolean("col5", false)
+ .addInt("col6", 100)
+ .build();
+```
+
+##### `Get` operation
+
+`Get` is an operation to retrieve a single record specified by a primary key.
+
+You need to create a `Get` object first, and then you can execute the object by using the `transaction.get()` method as follows:
+
+```java
+// Create a `Get` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .projections("c1", "c2", "c3", "c4")
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .build();
+
+// Execute the `Get` operation.
+Optional result = transaction.get(get);
+```
+
+You can specify projections to choose which columns are returned.
+
+###### Use the `WHERE` clause
+
+You can also specify arbitrary conditions by using the `where()` method. If the retrieved record does not match the conditions specified by the `where()` method, `Option.empty()` will be returned. As an argument of the `where()` method, you can specify a condition, an AND-wise condition set, or an OR-wise condition set. After calling the `where()` method, you can add more conditions or condition sets by using the `and()` method or `or()` method as follows:
+
+```java
+// Create a `Get` operation with condition sets.
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .where(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c1").isLessThanInt(10))
+ .or(ConditionBuilder.column("c1").isGreaterThanInt(20))
+ .build())
+ .and(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c2").isLikeText("a%"))
+ .or(ConditionBuilder.column("c2").isLikeText("b%"))
+ .build())
+ .build();
+```
+
+:::note
+
+In the `where()` condition method chain, the conditions must be an AND-wise junction of `ConditionalExpression` or `OrConditionSet` (known as conjunctive normal form) like the above example or an OR-wise junction of `ConditionalExpression` or `AndConditionSet` (known as disjunctive normal form).
+
+:::
+
+For more details about available conditions and condition sets, see the `ConditionBuilder` and `ConditionSetBuilder` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using.
+
+###### Handle `Result` objects
+
+The `Get` operation and `Scan` operation return `Result` objects. The following shows how to handle `Result` objects.
+
+You can get a column value of a result by using `get("")` methods as follows:
+
+```java
+// Get the BOOLEAN value of a column.
+boolean booleanValue = result.getBoolean("");
+
+// Get the INT value of a column.
+int intValue = result.getInt("");
+
+// Get the BIGINT value of a column.
+long bigIntValue = result.getBigInt("");
+
+// Get the FLOAT value of a column.
+float floatValue = result.getFloat("");
+
+// Get the DOUBLE value of a column.
+double doubleValue = result.getDouble("");
+
+// Get the TEXT value of a column.
+String textValue = result.getText("");
+
+// Get the BLOB value of a column as a `ByteBuffer`.
+ByteBuffer blobValue = result.getBlob("");
+
+// Get the BLOB value of a column as a `byte` array.
+byte[] blobValueAsBytes = result.getBlobAsBytes("");
+```
+
+And if you need to check if a value of a column is null, you can use the `isNull("")` method.
+
+``` java
+// Check if a value of a column is null.
+boolean isNull = result.isNull("");
+```
+
+For more details, see the `Result` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/3.13.0/index.html) of the version of ScalarDB that you're using.
+
+###### Execute `Get` by using a secondary index
+
+You can execute a `Get` operation by using a secondary index.
+
+Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows:
+
+```java
+// Create a `Get` operation by using a secondary index.
+Key indexKey = Key.ofFloat("c4", 1.23F);
+
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .indexKey(indexKey)
+ .projections("c1", "c2", "c3", "c4")
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .build();
+
+// Execute the `Get` operation.
+Optional result = transaction.get(get);
+```
+
+You can also specify arbitrary conditions by using the `where()` method. For details, see [Use the `WHERE` clause](#use-the-where-clause).
+
+:::note
+
+If the result has more than one record, `transaction.get()` will throw an exception. If you want to handle multiple results, see [Execute `Scan` by using a secondary index](#execute-scan-by-using-a-secondary-index).
+
+:::
+
+##### `Scan` operation
+
+`Scan` is an operation to retrieve multiple records within a partition. You can specify clustering-key boundaries and orderings for clustering-key columns in `Scan` operations.
+
+You need to create a `Scan` object first, and then you can execute the object by using the `transaction.scan()` method as follows:
+
+```java
+// Create a `Scan` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key startClusteringKey = Key.of("c2", "aaa", "c3", 100L);
+Key endClusteringKey = Key.of("c2", "aaa", "c3", 300L);
+
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .start(startClusteringKey, true) // Include startClusteringKey
+ .end(endClusteringKey, false) // Exclude endClusteringKey
+ .projections("c1", "c2", "c3", "c4")
+ .orderings(Scan.Ordering.desc("c2"), Scan.Ordering.asc("c3"))
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+You can omit the clustering-key boundaries or specify either a `start` boundary or an `end` boundary. If you don't specify `orderings`, you will get results ordered by the clustering order that you defined when creating the table.
+
+In addition, you can specify `projections` to choose which columns are returned and use `limit` to specify the number of records to return in `Scan` operations.
+
+###### Use the `WHERE` clause
+
+You can also specify arbitrary conditions by using the `where()` method to filter scanned records. As an argument of the `where()` method, you can specify a condition, an AND-wise condition set, or an OR-wise condition set. After calling the `where()` method, you can add more conditions or condition sets by using the `and()` method or `or()` method as follows:
+
+```java
+// Create a `Scan` operation with condition sets.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .where(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c1").isLessThanInt(10))
+ .or(ConditionBuilder.column("c1").isGreaterThanInt(20))
+ .build())
+ .and(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c2").isLikeText("a%"))
+ .or(ConditionBuilder.column("c2").isLikeText("b%"))
+ .build())
+ .limit(10)
+ .build();
+```
+
+:::note
+
+In the `where()` condition method chain, the conditions must be an AND-wise junction of `ConditionalExpression` or `OrConditionSet` (known as conjunctive normal form) like the above example or an OR-wise junction of `ConditionalExpression` or `AndConditionSet` (known as disjunctive normal form).
+
+:::
+
+For more details about available conditions and condition sets, see the `ConditionBuilder` and `ConditionSetBuilder` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using.
+
+###### Execute `Scan` by using a secondary index
+
+You can execute a `Scan` operation by using a secondary index.
+
+Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows:
+
+```java
+// Create a `Scan` operation by using a secondary index.
+Key indexKey = Key.ofFloat("c4", 1.23F);
+
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .indexKey(indexKey)
+ .projections("c1", "c2", "c3", "c4")
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+You can also specify arbitrary conditions using the `where()` method. For details, see [Use the `WHERE` clause](#use-the-where-clause-1).
+
+:::note
+
+You can't specify clustering-key boundaries and orderings in `Scan` by using a secondary index.
+
+:::
+
+###### Execute cross-partition `Scan` without specifying a partition key to retrieve all the records of a table
+
+You can execute a `Scan` operation across all partitions, which we call *cross-partition scan*, without specifying a partition key by enabling the following configuration in the ScalarDB properties file.
+
+```properties
+scalar.db.cross_partition_scan.enabled=true
+```
+
+:::warning
+
+For non-JDBC databases, transactions could be executed at read-committed snapshot isolation (`SNAPSHOT`), which is a lower isolation level, even if you enable cross-partition scan with the `SERIALIZABLE` isolation level. When using non-JDBC databases, use cross-partition scan only if consistency does not matter for your transactions.
+
+:::
+
+Instead of calling the `partitionKey()` method in the builder, you can call the `all()` method to scan a table without specifying a partition key as follows:
+
+```java
+// Create a `Scan` operation without specifying a partition key.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .projections("c1", "c2", "c3", "c4")
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+:::note
+
+You can't specify any orderings in cross-partition `Scan` when using non-JDBC databases. For details on how to use cross-partition `Scan` with filtering or ordering, see [Execute cross-partition `Scan` with filtering and ordering](#execute-cross-partition-scan-with-filtering-and-ordering).
+
+:::
+
+###### Execute cross-partition `Scan` with filtering and ordering
+
+By enabling the cross-partition scan option with filtering and ordering as follows, you can execute a cross-partition `Scan` operation with flexible conditions and orderings:
+
+```properties
+scalar.db.cross_partition_scan.enabled=true
+scalar.db.cross_partition_scan.filtering.enabled=true
+scalar.db.cross_partition_scan.ordering.enabled=true
+```
+
+:::note
+
+You can't enable `scalar.db.cross_partition_scan.ordering` in non-JDBC databases.
+
+:::
+
+You can call the `where()` and `ordering()` methods after calling the `all()` method to specify arbitrary conditions and orderings as follows:
+
+```java
+// Create a `Scan` operation with arbitrary conditions and orderings.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .projections("c1", "c2", "c3", "c4")
+ .orderings(Scan.Ordering.desc("c3"), Scan.Ordering.asc("c4"))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+For details about the `WHERE` clause, see [Use the `WHERE` clause](#use-the-where-clause-1).
+
+##### `Put` operation
+
+:::note
+
+The `Put` operation is deprecated as of ScalarDB 3.13.0 and will be removed in a future release. Instead of using the `Put` operation, use the `Insert` operation, the `Upsert` operation, or the `Update` operation.
+
+:::
+
+`Put` is an operation to put a record specified by a primary key. The operation behaves as an upsert operation for a record, in which the operation updates the record if the record exists or inserts the record if the record does not exist.
+
+:::note
+
+When you update an existing record, you need to read the record by using `Get` or `Scan` before using a `Put` operation. Otherwise, the operation will fail due to a conflict. This occurs because of the specification of ScalarDB to manage transactions properly. Instead of reading the record explicitly, you can enable implicit pre-read. For details, see [Enable implicit pre-read for `Put` operations](#enable-implicit-pre-read-for-put-operations).
+
+:::
+
+You need to create a `Put` object first, and then you can execute the object by using the `transaction.put()` method as follows:
+
+```java
+// Create a `Put` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Put` operation.
+transaction.put(put);
+```
+
+You can also put a record with `null` values as follows:
+
+```java
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", null)
+ .doubleValue("c5", null)
+ .build();
+```
+
+###### Enable implicit pre-read for `Put` operations
+
+In Consensus Commit, an application must read a record before mutating the record with `Put` and `Delete` operations to obtain the latest states of the record if the record exists. Instead of reading the record explicitly, you can enable *implicit pre-read*. By enabling implicit pre-read, if an application does not read the record explicitly in a transaction, ScalarDB will read the record on behalf of the application before committing the transaction.
+
+You can enable implicit pre-read for a `Put` operation by specifying `enableImplicitPreRead()` in the `Put` operation builder as follows:
+
+```java
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .enableImplicitPreRead()
+ .build();
+```
+
+:::note
+
+If you are certain that a record you are trying to mutate does not exist, you should not enable implicit pre-read for the `Put` operation for better performance. For example, if you load initial data, you should not enable implicit pre-read. A `Put` operation without implicit pre-read is faster than `Put` operation with implicit pre-read because the operation skips an unnecessary read.
+
+:::
+
+##### `Insert` operation
+
+`Insert` is an operation to insert an entry into the underlying storage through a transaction. If the entry already exists, a conflict error will occur.
+
+You need to create an `Insert` object first, and then you can execute the object by using the `transaction.insert()` method as follows:
+
+```java
+// Create an `Insert` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Insert insert =
+ Insert.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Insert` operation.
+transaction.insert(insert);
+```
+
+##### `Upsert` operation
+
+`Upsert` is an operation to insert an entry into or update an entry in the underlying storage through a transaction. If the entry already exists, it will be updated; otherwise, the entry will be inserted.
+
+You need to create an `Upsert` object first, and then you can execute the object by using the `transaction.upsert()` method as follows:
+
+```java
+// Create an `Upsert` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Upsert upsert =
+ Upsert.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Upsert` operation.
+transaction.upsert(upsert);
+```
+
+##### `Update` operation
+
+`Update` is an operation to update an entry in the underlying storage through a transaction. If the entry does not exist, the operation will not make any changes.
+
+You need to create an `Update` object first, and then you can execute the object by using the `transaction.update()` method as follows:
+
+```java
+// Create an `Update` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Update update =
+ Update.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Update` operation.
+transaction.update(update);
+```
+
+##### `Delete` operation
+
+`Delete` is an operation to delete a record specified by a primary key.
+
+:::note
+
+When you delete a record, you don't have to read the record beforehand because implicit pre-read is always enabled for `Delete` operations.
+
+:::
+
+You need to create a `Delete` object first, and then you can execute the object by using the `transaction.delete()` method as follows:
+
+```java
+// Create a `Delete` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .build();
+
+// Execute the `Delete` operation.
+transaction.delete(delete);
+```
+
+##### `Put`, `Delete`, and `Update` with a condition
+
+You can write arbitrary conditions (for example, a bank account balance must be equal to or more than zero) that you require a transaction to meet before being committed by implementing logic that checks the conditions in the transaction. Alternatively, you can write simple conditions in a mutation operation, such as `Put`, `Delete`, and `Update`.
+
+When a `Put`, `Delete`, or `Update` operation includes a condition, the operation is executed only if the specified condition is met. If the condition is not met when the operation is executed, an exception called `UnsatisfiedConditionException` will be thrown.
+
+:::note
+
+When you specify a condition in a `Put` operation, you need to read the record beforehand or enable implicit pre-read.
+
+:::
+
+###### Conditions for `Put`
+
+You can specify a condition in a `Put` operation as follows:
+
+```java
+// Build a condition.
+MutationCondition condition =
+ ConditionBuilder.putIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F))
+ .and(ConditionBuilder.column("c5").isEqualToDouble(0.0))
+ .build();
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .condition(condition) // condition
+ .build();
+
+// Execute the `Put` operation.
+transaction.put(put);
+```
+
+In addition to using the `putIf` condition, you can specify the `putIfExists` and `putIfNotExists` conditions as follows:
+
+```java
+// Build a `putIfExists` condition.
+MutationCondition putIfExistsCondition = ConditionBuilder.putIfExists();
+
+// Build a `putIfNotExists` condition.
+MutationCondition putIfNotExistsCondition = ConditionBuilder.putIfNotExists();
+```
+
+###### Conditions for `Delete`
+
+You can specify a condition in a `Delete` operation as follows:
+
+```java
+// Build a condition.
+MutationCondition condition =
+ ConditionBuilder.deleteIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F))
+ .and(ConditionBuilder.column("c5").isEqualToDouble(0.0))
+ .build();
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .condition(condition) // condition
+ .build();
+
+// Execute the `Delete` operation.
+transaction.delete(delete);
+```
+
+In addition to using the `deleteIf` condition, you can specify the `deleteIfExists` condition as follows:
+
+```java
+// Build a `deleteIfExists` condition.
+MutationCondition deleteIfExistsCondition = ConditionBuilder.deleteIfExists();
+```
+
+###### Conditions for `Update`
+
+You can specify a condition in an `Update` operation as follows:
+
+```java
+// Build a condition.
+MutationCondition condition =
+ ConditionBuilder.updateIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F))
+ .and(ConditionBuilder.column("c5").isEqualToDouble(0.0))
+ .build();
+
+Update update =
+ Update.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .condition(condition) // condition
+ .build();
+
+// Execute the `Update` operation.
+transaction.update(update);
+```
+
+In addition to using the `updateIf` condition, you can specify the `updateIfExists` condition as follows:
+
+```java
+// Build a `updateIfExists` condition.
+MutationCondition updateIfExistsCondition = ConditionBuilder.updateIfExists();
+```
+
+##### Mutate operation
+
+Mutate is an operation to execute multiple operations for `Put`, `Insert`, `Upsert`, `Update`, and `Delete`.
+
+You need to create mutation objects first, and then you can execute the objects by using the `transaction.mutate()` method as follows:
+
+```java
+// Create `Put` and `Delete` operations.
+Key partitionKey = Key.ofInt("c1", 10);
+
+Key clusteringKeyForPut = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForPut)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+Key clusteringKeyForDelete = Key.of("c2", "bbb", "c3", 200L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForDelete)
+ .build();
+
+// Execute the operations.
+transaction.mutate(Arrays.asList(put, delete));
+```
+
+##### Default namespace for CRUD operations
+
+A default namespace for all CRUD operations can be set by using a property in the ScalarDB configuration.
+
+```properties
+scalar.db.default_namespace_name=
+```
+
+Any operation that does not specify a namespace will use the default namespace set in the configuration.
+
+```java
+// This operation will target the default namespace.
+Scan scanUsingDefaultNamespace =
+ Scan.newBuilder()
+ .table("tbl")
+ .all()
+ .build();
+// This operation will target the "ns" namespace.
+Scan scanUsingSpecifiedNamespace =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .build();
+```
+
+#### Commit a transaction
+
+After executing CRUD operations, you need to commit a transaction to finish it.
+
+You can commit a transaction as follows:
+
+```java
+// Commit a transaction.
+transaction.commit();
+```
+
+#### Roll back or abort a transaction
+
+If an error occurs when executing a transaction, you can roll back or abort the transaction.
+
+You can roll back a transaction as follows:
+
+```java
+// Roll back a transaction.
+transaction.rollback();
+```
+
+Or, you can abort a transaction as follows:
+
+```java
+// Abort a transaction.
+transaction.abort();
+```
+
+For details about how to handle exceptions in ScalarDB, see [How to handle exceptions](#how-to-handle-exceptions).
+
+### Execute transactions without beginning or starting a transaction
+
+You can execute transactional operations without beginning or starting a transaction. In this case, ScalarDB will automatically begin a transaction before executing the operations and commit the transaction after executing the operations. This section explains how to execute transactions without beginning or starting a transaction.
+
+#### Execute `Get` operation
+
+`Get` is an operation to retrieve a single record specified by a primary key.
+
+You need to create a `Get` object first, and then you can execute the object by using the `transactionManager.get()` method as follows:
+
+```java
+// Create a `Get` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .projections("c1", "c2", "c3", "c4")
+ .build();
+
+// Execute the `Get` operation.
+Optional result = transactionManager.get(get);
+```
+
+For details about the `Get` operation, see [`Get` operation](#get-operation).
+
+#### Execute `Scan` operation
+
+`Scan` is an operation to retrieve multiple records within a partition. You can specify clustering-key boundaries and orderings for clustering-key columns in `Scan` operations.
+
+You need to create a `Scan` object first, and then you can execute the object by using the `transactionManager.scan()` method as follows:
+
+```java
+// Create a `Scan` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key startClusteringKey = Key.of("c2", "aaa", "c3", 100L);
+Key endClusteringKey = Key.of("c2", "aaa", "c3", 300L);
+
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .start(startClusteringKey, true) // Include startClusteringKey
+ .end(endClusteringKey, false) // Exclude endClusteringKey
+ .projections("c1", "c2", "c3", "c4")
+ .orderings(Scan.Ordering.desc("c2"), Scan.Ordering.asc("c3"))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transactionManager.scan(scan);
+```
+
+For details about the `Scan` operation, see [`Scan` operation](#scan-operation).
+
+#### Execute `Put` operation
+
+:::note
+
+The `Put` operation is deprecated as of ScalarDB 3.13.0 and will be removed in a future release. Instead of using the `Put` operation, use the `Insert` operation, the `Upsert` operation, or the `Update` operation.
+
+:::
+
+You need to create a `Put` object first, and then you can execute the object by using the `transactionManager.put()` method as follows:
+
+```java
+// Create a `Put` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Put` operation.
+transactionManager.put(put);
+```
+
+For details about the `Put` operation, see [`Put` operation](#put-operation).
+
+#### Execute `Insert` operation
+
+`Insert` is an operation to insert an entry into the underlying storage through a transaction. If the entry already exists, a conflict error will occur.
+
+You need to create an `Insert` object first, and then you can execute the object by using the `transactionManager.insert()` method as follows:
+
+```java
+// Create an `Insert` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Insert insert =
+ Insert.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Insert` operation.
+transactionManager.insert(insert);
+```
+
+For details about the `Insert` operation, see [`Insert` operation](#insert-operation).
+
+#### Execute `Upsert` operation
+
+`Upsert` is an operation to insert an entry into or update an entry in the underlying storage through a transaction. If the entry already exists, it will be updated; otherwise, the entry will be inserted.
+
+You need to create an `Upsert` object first, and then you can execute the object by using the `transactionManager.upsert()` method as follows:
+
+```java
+// Create an `Upsert` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Upsert upsert =
+ Upsert.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Upsert` operation.
+transactionManager.upsert(upsert);
+```
+
+For details about the `Insert` operation, see [`Upsert` operation](#upsert-operation).
+
+#### Execute `Update` operation
+
+`Update` is an operation to update an entry in the underlying storage through a transaction. If the entry does not exist, the operation will not make any changes.
+
+You need to create an `Update` object first, and then you can execute the object by using the `transactionManager.update()` method as follows:
+
+```java
+// Create an `Update` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Update update =
+ Update.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Update` operation.
+transactionManager.update(update);
+```
+
+For details about the `Update` operation, see [`Update` operation](#update-operation).
+
+#### Execute `Delete` operation
+
+`Delete` is an operation to delete a record specified by a primary key.
+
+You need to create a `Delete` object first, and then you can execute the object by using the `transaction.delete()` method as follows:
+
+```java
+// Create a `Delete` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .build();
+
+// Execute the `Delete` operation.
+transactionManager.delete(delete);
+```
+
+For details about the `Delete` operation, see [`Delete` operation](#delete-operation).
+
+#### Execute Mutate operation
+
+Mutate is an operation to execute multiple mutations (`Put`, `Insert`, `Upsert`, `Update`, and `Delete` operations).
+
+You need to create mutation objects first, and then you can execute the objects by using the `transactionManager.mutate()` method as follows:
+
+```java
+// Create `Put` and `Delete` operations.
+Key partitionKey = Key.ofInt("c1", 10);
+
+Key clusteringKeyForPut = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForPut)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+Key clusteringKeyForDelete = Key.of("c2", "bbb", "c3", 200L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForDelete)
+ .build();
+
+// Execute the operations.
+transactionManager.mutate(Arrays.asList(put, delete));
+```
+
+For details about the Mutate operation, see [Mutate operation](#mutate-operation).
+
+In addition, for details about how to handle exceptions in ScalarDB, see [How to handle exceptions](#how-to-handle-exceptions).
+
+## How to handle exceptions
+
+When executing a transaction, you will also need to handle exceptions properly.
+
+:::warning
+
+If you don't handle exceptions properly, you may face anomalies or data inconsistency.
+
+:::
+
+The following sample code shows how to handle exceptions:
+
+```java
+public class Sample {
+ public static void main(String[] args) throws Exception {
+ TransactionFactory factory = TransactionFactory.create("");
+ DistributedTransactionManager transactionManager = factory.getTransactionManager();
+
+ int retryCount = 0;
+ TransactionException lastException = null;
+
+ while (true) {
+ if (retryCount++ > 0) {
+ // Retry the transaction three times maximum.
+ if (retryCount >= 3) {
+ // Throw the last exception if the number of retries exceeds the maximum.
+ throw lastException;
+ }
+
+ // Sleep 100 milliseconds before retrying the transaction.
+ TimeUnit.MILLISECONDS.sleep(100);
+ }
+
+ DistributedTransaction transaction = null;
+ try {
+ // Begin a transaction.
+ transaction = transactionManager.begin();
+
+ // Execute CRUD operations in the transaction.
+ Optional result = transaction.get(...);
+ List results = transaction.scan(...);
+ transaction.put(...);
+ transaction.delete(...);
+
+ // Commit the transaction.
+ transaction.commit();
+ } catch (UnsatisfiedConditionException e) {
+ // You need to handle `UnsatisfiedConditionException` only if a mutation operation specifies a condition.
+ // This exception indicates the condition for the mutation operation is not met.
+
+ try {
+ transaction.rollback();
+ } catch (RollbackException ex) {
+ // Rolling back the transaction failed. Since the transaction should eventually recover,
+ // you don't need to do anything further. You can simply log the occurrence here.
+ }
+
+ // You can handle the exception here, according to your application requirements.
+
+ return;
+ } catch (UnknownTransactionStatusException e) {
+ // If you catch `UnknownTransactionStatusException` when committing the transaction,
+ // it indicates that the status of the transaction, whether it was successful or not, is unknown.
+ // In such a case, you need to check if the transaction is committed successfully or not and
+ // retry the transaction if it failed. How to identify a transaction status is delegated to users.
+ return;
+ } catch (TransactionException e) {
+ // For other exceptions, you can try retrying the transaction.
+
+ // For `CrudConflictException`, `CommitConflictException`, and `TransactionNotFoundException`,
+ // you can basically retry the transaction. However, for the other exceptions, the transaction
+ // will still fail if the cause of the exception is non-transient. In such a case, you will
+ // exhaust the number of retries and throw the last exception.
+
+ if (transaction != null) {
+ try {
+ transaction.rollback();
+ } catch (RollbackException ex) {
+ // Rolling back the transaction failed. The transaction should eventually recover,
+ // so you don't need to do anything further. You can simply log the occurrence here.
+ }
+ }
+
+ lastException = e;
+ }
+ }
+ }
+}
+```
+
+### `TransactionException` and `TransactionNotFoundException`
+
+The `begin()` API could throw `TransactionException` or `TransactionNotFoundException`:
+
+- If you catch `TransactionException`, this exception indicates that the transaction has failed to begin due to transient or non-transient faults. You can try retrying the transaction, but you may not be able to begin the transaction due to non-transient faults.
+- If you catch `TransactionNotFoundException`, this exception indicates that the transaction has failed to begin due to transient faults. In this case, you can retry the transaction.
+
+The `join()` API could also throw `TransactionNotFoundException`. You can handle this exception in the same way that you handle the exceptions for the `begin()` API.
+
+### `CrudException` and `CrudConflictException`
+
+The APIs for CRUD operations (`get()`, `scan()`, `put()`, `delete()`, and `mutate()`) could throw `CrudException` or `CrudConflictException`:
+
+- If you catch `CrudException`, this exception indicates that the transaction CRUD operation has failed due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient.
+- If you catch `CrudConflictException`, this exception indicates that the transaction CRUD operation has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning.
+
+### `UnsatisfiedConditionException`
+
+The APIs for mutation operations (`put()`, `delete()`, and `mutate()`) could also throw `UnsatisfiedConditionException`.
+
+If you catch `UnsatisfiedConditionException`, this exception indicates that the condition for the mutation operation is not met. You can handle this exception according to your application requirements.
+
+### `CommitException`, `CommitConflictException`, and `UnknownTransactionStatusException`
+
+The `commit()` API could throw `CommitException`, `CommitConflictException`, or `UnknownTransactionStatusException`:
+
+- If you catch `CommitException`, this exception indicates that committing the transaction fails due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient.
+- If you catch `CommitConflictException`, this exception indicates that committing the transaction has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning.
+- If you catch `UnknownTransactionStatusException`, this exception indicates that the status of the transaction, whether it was successful or not, is unknown. In this case, you need to check if the transaction is committed successfully and retry the transaction if it has failed.
+
+How to identify a transaction status is delegated to users. You may want to create a transaction status table and update it transactionally with other application data so that you can get the status of a transaction from the status table.
+
+### Notes about some exceptions
+
+Although not illustrated in the sample code, the `resume()` API could also throw `TransactionNotFoundException`. This exception indicates that the transaction associated with the specified ID was not found and/or the transaction might have expired. In either case, you can retry the transaction from the beginning since the cause of this exception is basically transient.
+
+In the sample code, for `UnknownTransactionStatusException`, the transaction is not retried because the application must check if the transaction was successful to avoid potential duplicate operations. For other exceptions, the transaction is retried because the cause of the exception is transient or non-transient. If the cause of the exception is transient, the transaction may succeed if you retry it. However, if the cause of the exception is non-transient, the transaction will still fail even if you retry it. In such a case, you will exhaust the number of retries.
+
+:::note
+
+In the sample code, the transaction is retried three times maximum and sleeps for 100 milliseconds before it is retried. But you can choose a retry policy, such as exponential backoff, according to your application requirements.
+
+:::
+
+### Group commit for the Coordinator table
+
+The Coordinator table that is used for Consensus Commit transactions is a vital data store, and using robust storage for it is recommended. However, utilizing more robust storage options, such as internally leveraging multi-AZ or multi-region replication, may lead to increased latency when writing records to the storage, resulting in poor throughput performance.
+
+ScalarDB provides a group commit feature for the Coordinator table that groups multiple record writes into a single write operation, improving write throughput. In this case, latency may increase or decrease, depending on the underlying database and the workload.
+
+To enable the group commit feature, add the following configuration:
+
+```properties
+# By default, this configuration is set to `false`.
+scalar.db.consensus_commit.coordinator.group_commit.enabled=true
+
+# These properties are for tuning the performance of the group commit feature.
+# scalar.db.consensus_commit.coordinator.group_commit.group_size_fix_timeout_millis=40
+# scalar.db.consensus_commit.coordinator.group_commit.delayed_slot_move_timeout_millis=800
+# scalar.db.consensus_commit.coordinator.group_commit.old_group_abort_timeout_millis=30000
+# scalar.db.consensus_commit.coordinator.group_commit.timeout_check_interval_millis=10
+# scalar.db.consensus_commit.coordinator.group_commit.metrics_monitor_log_enabled=true
+```
+
+#### Limitations
+
+This section describes the limitations of the group commit feature.
+
+##### Custom transaction ID passed by users
+
+The group commit feature implicitly generates an internal value and uses it as a part of transaction ID. Therefore, a custom transaction ID manually passed by users via `com.scalar.db.transaction.consensuscommit.ConsensusCommitManager.begin(String txId)` or `com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitManager.begin(String txId)` can't be used as is for later API calls. You need to use a transaction ID returned from`com.scalar.db.transaction.consensuscommit.ConsensusCommit.getId()` or `com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommit.getId()` instead.
+
+```java
+ // This custom transaction ID needs to be used for ScalarDB transactions.
+ String myTxId = UUID.randomUUID().toString();
+
+ ...
+
+ DistributedTransaction transaction = manager.begin(myTxId);
+
+ ...
+
+ // When the group commit feature is enabled, a custom transaction ID passed by users can't be used as is.
+ // logger.info("The transaction state: {}", manager.getState(myTxId));
+ logger.info("The transaction state: {}", manager.getState(transaction.getId()));
+```
+
+##### Prohibition of use with a two-phase commit interface
+
+The group commit feature manages all ongoing transactions in memory. If this feature is enabled with a two-phase commit interface, the information must be solely maintained by the coordinator service to prevent conflicts caused by participant services' inconsistent writes to the Coordinator table, which may contain different transaction distributions over groups.
+
+This limitation introduces some complexities and inflexibilities related to application development. Therefore, combining the use of the group commit feature with a two-phase commit interface is currently prohibited.
+
+##### Enabling the feature on existing applications is not supported
+
+The group commit feature uses a new column in the Coordinator table. The current [Schema Loader](schema-loader.mdx), as of ScalarDB 3, doesn't support table schema migration for the Coordinator table.
+
+Therefore, enabling the group commit feature on existing applications where any transactions have been executed is not supported. To use this feature, you'll need to start your application in a clean state.
+
+Coordinator table schema migration in [Schema Loader](schema-loader.mdx) is expected to be supported in ScalarDB 4.0.
+
+## Investigating Consensus Commit transaction manager errors
+
+To investigate errors when using the Consensus Commit transaction manager, you can enable a configuration that will return table metadata augmented with transaction metadata columns, which can be helpful when investigating transaction-related issues. This configuration, which is only available when troubleshooting the Consensus Commit transaction manager, enables you to see transaction metadata column details for a given table by using the `DistributedTransactionAdmin.getTableMetadata()` method.
+
+By adding the following configuration, `Get` and `Scan` operations results will contain [transaction metadata](schema-loader.mdx#internal-metadata-for-consensus-commit):
+
+```properties
+# By default, this configuration is set to `false`.
+scalar.db.consensus_commit.include_metadata.enabled=true
+```
diff --git a/versioned_docs/version-3.13/backup-restore.mdx b/versioned_docs/version-3.13/backup-restore.mdx
new file mode 100644
index 00000000..923cc94e
--- /dev/null
+++ b/versioned_docs/version-3.13/backup-restore.mdx
@@ -0,0 +1,177 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# How to Back Up and Restore Databases Used Through ScalarDB
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+Since ScalarDB provides transaction capabilities on top of non-transactional or transactional databases non-invasively, you need to take special care to back up and restore the databases in a transactionally consistent way.
+
+This guide describes how to back up and restore the databases that ScalarDB supports.
+
+## Create a backup
+
+How you create a backup depends on which database you're using and whether or not you're using multiple databases. The following decision tree shows which approach you should take.
+
+```mermaid
+flowchart TD
+ A[Are you using a single database with ScalarDB?]
+ A -->|Yes| B[Does the database have transaction support?]
+ B -->|Yes| C[Perform back up without explicit pausing]
+ B ---->|No| D[Perform back up with explicit pausing]
+ A ---->|No| D
+```
+
+### Back up without explicit pausing
+
+If you're using ScalarDB with a single database with support for transactions, you can create a backup of the database even while ScalarDB continues to accept transactions.
+
+:::warning
+
+Before creating a backup, you should consider the safest way to create a transactionally consistent backup of your databases and understand any risks that are associated with the backup process.
+
+:::
+
+One requirement for creating a backup in ScalarDB is that backups for all the ScalarDB-managed tables (including the Coordinator table) need to be transactionally consistent or automatically recoverable to a transactionally consistent state. That means that you need to create a consistent backup by dumping all tables in a single transaction.
+
+How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+:::note
+
+The backup methods by database listed below are just examples of some of the databases that ScalarDB supports.
+
+:::
+
+
+
+ You can restore to any point within the backup retention period by using the automated backup feature.
+
+
+ Use the `mysqldump` command with the `--single-transaction` option.
+
+
+ Use the `pg_dump` command.
+
+
+ Use the `.backup` command with the `.timeout` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_)
+
+ For an example, see [BASH: SQLite3 .backup command](https://stackoverflow.com/questions/23164445/bash-sqlite3-backup-command).
+
+
+ Clusters are backed up automatically based on the backup policy, and these backups are retained for a specific duration. You can also perform on-demand backups. For details on performing backups, see [YugabyteDB Managed: Back up and restore clusters](https://docs.yugabyte.com/preview/yugabyte-cloud/cloud-clusters/backup-clusters/).
+
+
+
+### Back up with explicit pausing
+
+Another way to create a transactionally consistent backup is to create a backup while a cluster of ScalarDB instances does not have any outstanding transactions. Creating the backup depends on the following:
+
+- If the underlying database has a point-in-time snapshot or backup feature, you can create a backup during the period when no outstanding transactions exist.
+- If the underlying database has a point-in-time restore or recovery (PITR) feature, you can set a restore point to a time (preferably the mid-time) in the pause duration period when no outstanding transactions exist.
+
+:::note
+
+When using a PITR feature, you should minimize the clock drifts between clients and servers by using clock synchronization, such as NTP. Otherwise, the time you get as the paused duration might be too different from the time in which the pause was actually conducted, which could restore the backup to a point where ongoing transactions exist.
+
+In addition, you should pause for a sufficient amount of time (for example, five seconds) and use the mid-time of the paused duration as a restore point since clock synchronization cannot perfectly synchronize clocks between nodes.
+
+:::
+
+To make ScalarDB drain outstanding requests and stop accepting new requests so that a pause duration can be created, you should implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface properly in your application that uses ScalarDB or use [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/), which implements the Scalar Admin interface.
+
+By using the [Scalar Admin client tool](https://github.com/scalar-labs/scalar-admin/tree/main/java#scalar-admin-client-tool), you can pause nodes, servers, or applications that implement the Scalar Admin interface without losing ongoing transactions.
+
+How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+:::note
+
+The backup methods by database listed below are just examples of some of the databases that ScalarDB supports.
+
+:::
+
+
+
+ You must enable the PITR feature for DynamoDB tables. If you're using [ScalarDB Schema Loader](schema-loader.mdx) to create schemas, the tool enables the PITR feature for tables by default.
+
+ To specify a transactionally consistent restore point, pause your application that is using ScalarDB with DynamoDB as described in [Back up with explicit pausing](#back-up-with-explicit-pausing).
+
+
+ You must create a Cosmos DB for NoSQL account with a continuous backup policy that has the PITR feature enabled. After enabling the feature, backups are created continuously.
+
+ To specify a transactionally consistent restore point, pause your application that is using ScalarDB with Cosmos DB for NoSQL as described in [Back up with explicit pausing](#back-up-with-explicit-pausing).
+
+
+ Cassandra has a built-in replication feature, so you do not always have to create a transactionally consistent backup. For example, if the replication factor is set to `3` and only the data of one of the nodes in a Cassandra cluster is lost, you won't need a transactionally consistent backup (snapshot) because the node can be recovered by using a normal, transactionally inconsistent backup (snapshot) and the repair feature.
+
+ However, if the quorum of cluster nodes loses their data, you will need a transactionally consistent backup (snapshot) to restore the cluster to a certain transactionally consistent point.
+
+ To create a transactionally consistent cluster-wide backup (snapshot), pause the application that is using ScalarDB or [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/) and create backups (snapshots) of the nodes as described in [Back up with explicit pausing](#back-up-with-explicit-pausing) or stop the Cassandra cluster, take copies of all the data in the nodes, and start the cluster.
+
+
+ You can perform on-demand backups or scheduled backups during a paused duration. For details on performing backups, see [YugabyteDB Managed: Back up and restore clusters](https://docs.yugabyte.com/preview/yugabyte-cloud/cloud-clusters/backup-clusters/).
+
+
+
+## Restore a backup
+
+How you restore a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+:::note
+
+The restore methods by database listed below are just examples of some of the databases that ScalarDB supports.
+
+:::
+
+
+
+ You can restore to any point within the backup retention period by using the automated backup feature.
+
+
+ First, stop all the nodes of the Cassandra cluster. Then, clean the `data`, `commitlog`, and `hints` directories, and place the backups (snapshots) in each node.
+
+ After placing the backups (snapshots) in each node, start all the nodes of the Cassandra Cluster.
+
+
+ Follow the official Azure documentation for [restore an account by using Azure portal](https://docs.microsoft.com/en-us/azure/cosmos-db/restore-account-continuous-backup#restore-account-portal). After restoring a backup, [configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level) of the restored databases to `STRONG`. In addition, you should use the mid-time of the paused duration as the restore point as previously explained.
+
+ ScalarDB implements the Cosmos DB adapter by using its stored procedures, which are installed when creating schemas by using ScalarDB Schema Loader. However, the PITR feature of Cosmos DB doesn't restore stored procedures. Because of this, you need to re-install the required stored procedures for all tables after restoration. You can do this by using ScalarDB Schema Loader with the `--repair-all` option. For details, see [Repair tables](schema-loader.mdx#repair-tables).
+
+
+ Follow the official AWS documentation for [restoring a DynamoDB table to a point in time](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.Tutorial.html), but keep in mind that a table can only be restored with an alias. Because of this, you will need to restore the table with an alias, delete the original table, and rename the alias to the original name to restore the tables with the same name.
+
+ To do this procedure:
+
+ 1. Create a backup.
+ 1. Select the mid-time of the paused duration as the restore point.
+ 2. Restore by using the PITR of table A to table B.
+ 3. Create a backup of the restored table B (assuming that the backup is named backup B).
+ 4. Remove table B.
+ 2. Restore the backup.
+ 1. Remove table A.
+ 2. Create a table named A by using backup B.
+
+:::note
+
+* You must do the steps mentioned above for each table because tables can only be restored one at a time.
+* Configurations such as PITR and auto-scaling policies are reset to the default values for restored tables, so you must manually configure the required settings. For details, see the official AWS documentation for [How to restore DynamoDB tables with DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/CreateBackup.html#CreateBackup_HowItWorks-restore).
+
+:::
+
+
+ If you used `mysqldump` to create the backup file, use the `mysql` command to restore the backup as specified in [Reloading SQL-Format Backups](https://dev.mysql.com/doc/mysql-backup-excerpt/8.0/en/reloading-sql-format-dumps.html).
+
+
+ If you used `pg_dump` to create the backup file, use the `psql` command to restore the backup as specified in [Restoring the Dump](https://www.postgresql.org/docs/current/backup-dump.html#BACKUP-DUMP-RESTORE).
+
+
+ Use the `.restore` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_).
+
+
+ You can restore from the scheduled or on-demand backup within the backup retention period. For details on performing backups, see [YugabyteDB Managed: Back up and restore clusters](https://docs.yugabyte.com/preview/yugabyte-cloud/cloud-clusters/backup-clusters/).
+
+
diff --git a/versioned_docs/version-3.13/configurations.mdx b/versioned_docs/version-3.13/configurations.mdx
new file mode 100644
index 00000000..0225d171
--- /dev/null
+++ b/versioned_docs/version-3.13/configurations.mdx
@@ -0,0 +1,294 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# ScalarDB Configurations
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This page describes the available configurations for ScalarDB.
+
+## ScalarDB client configurations
+
+This section describes the configurations for the ScalarDB client. ScalarDB provides ways to run transactions by using Consensus Commit, run non-transactional storage operations, and run transactions through ScalarDB Cluster.
+
+### Run transactions by using Consensus Commit
+
+ScalarDB provides its own transaction protocol called Consensus Commit, which is the default transaction manager type in ScalarDB. To use the Consensus Commit transaction manager, add the following to the ScalarDB properties file:
+
+```properties
+scalar.db.transaction_manager=consensus-commit
+```
+
+:::note
+
+If you don't specify the `scalar.db.transaction_manager` property, `consensus-commit` will be the default value.
+
+:::
+
+#### Basic configurations
+
+The following basic configurations are available for the Consensus Commit transaction manager:
+
+| Name | Description | Default |
+|-------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| `scalar.db.transaction_manager` | `consensus-commit` should be specified. | - |
+| `scalar.db.consensus_commit.isolation_level` | Isolation level used for Consensus Commit. Either `SNAPSHOT` or `SERIALIZABLE` can be specified. | `SNAPSHOT` |
+| `scalar.db.consensus_commit.serializable_strategy` | Serializable strategy used for Consensus Commit. Either `EXTRA_READ` or `EXTRA_WRITE` can be specified. If `SNAPSHOT` is specified in the property `scalar.db.consensus_commit.isolation_level`, this configuration will be ignored. | `EXTRA_READ` |
+| `scalar.db.consensus_commit.coordinator.namespace` | Namespace name of Coordinator tables. | `coordinator` |
+| `scalar.db.consensus_commit.include_metadata.enabled` | If set to `true`, `Get` and `Scan` operations results will contain transaction metadata. To see the transaction metadata columns details for a given table, you can use the `DistributedTransactionAdmin.getTableMetadata()` method, which will return the table metadata augmented with the transaction metadata columns. Using this configuration can be useful to investigate transaction-related issues. | `false` |
+
+#### Performance-related configurations
+
+The following performance-related configurations are available for the Consensus Commit transaction manager:
+
+| Name | Description | Default |
+|----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
+| `scalar.db.consensus_commit.parallel_executor_count` | Number of executors (threads) for parallel execution. | `128` |
+| `scalar.db.consensus_commit.parallel_preparation.enabled` | Whether or not the preparation phase is executed in parallel. | `true` |
+| `scalar.db.consensus_commit.parallel_validation.enabled` | Whether or not the validation phase (in `EXTRA_READ`) is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` |
+| `scalar.db.consensus_commit.parallel_commit.enabled` | Whether or not the commit phase is executed in parallel. | `true` |
+| `scalar.db.consensus_commit.parallel_rollback.enabled` | Whether or not the rollback phase is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` |
+| `scalar.db.consensus_commit.async_commit.enabled` | Whether or not the commit phase is executed asynchronously. | `false` |
+| `scalar.db.consensus_commit.async_rollback.enabled` | Whether or not the rollback phase is executed asynchronously. | The value of `scalar.db.consensus_commit.async_commit.enabled` |
+| `scalar.db.consensus_commit.parallel_implicit_pre_read.enabled` | Whether or not implicit pre-read is executed in parallel. | `true` |
+| `scalar.db.consensus_commit.coordinator.group_commit.enabled` | Whether or not committing the transaction state is executed in batch mode. This feature can't be used with a two-phase commit interface. | `false` |
+| `scalar.db.consensus_commit.coordinator.group_commit.slot_capacity` | Maximum number of slots in a group for the group commit feature. A large value improves the efficiency of group commit, but may also increase latency and the likelihood of transaction conflicts.[^1] | `20` |
+| `scalar.db.consensus_commit.coordinator.group_commit.group_size_fix_timeout_millis` | Timeout to fix the size of slots in a group. A large value improves the efficiency of group commit, but may also increase latency and the likelihood of transaction conflicts.[^1] | `40` |
+| `scalar.db.consensus_commit.coordinator.group_commit.delayed_slot_move_timeout_millis` | Timeout to move delayed slots from a group to another isolated group to prevent the original group from being affected by delayed transactions. A large value improves the efficiency of group commit, but may also increase the latency and the likelihood of transaction conflicts.[^1] | `1200` |
+| `scalar.db.consensus_commit.coordinator.group_commit.old_group_abort_timeout_millis` | Timeout to abort an old ongoing group. A small value reduces resource consumption through aggressive aborts, but may also increase the likelihood of unnecessary aborts for long-running transactions. | `60000` |
+| `scalar.db.consensus_commit.coordinator.group_commit.timeout_check_interval_millis` | Interval for checking the group commit–related timeouts. | `20` |
+| `scalar.db.consensus_commit.coordinator.group_commit.metrics_monitor_log_enabled` | Whether or not the metrics of the group commit are logged periodically. | `false` |
+
+#### Underlying storage or database configurations
+
+Consensus Commit has a storage abstraction layer and supports multiple underlying storages. You can specify the storage implementation by using the `scalar.db.storage` property.
+
+Select a database to see the configurations available for each storage.
+
+
+
+ The following configurations are available for JDBC databases:
+
+ | Name | Description | Default |
+ |-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------|
+ | `scalar.db.storage` | `jdbc` must be specified. | - |
+ | `scalar.db.contact_points` | JDBC connection URL. | |
+ | `scalar.db.username` | Username to access the database. | |
+ | `scalar.db.password` | Password to access the database. | |
+ | `scalar.db.jdbc.connection_pool.min_idle` | Minimum number of idle connections in the connection pool. | `20` |
+ | `scalar.db.jdbc.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool. | `50` |
+ | `scalar.db.jdbc.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool. Use a negative value for no limit. | `100` |
+ | `scalar.db.jdbc.prepared_statements_pool.enabled` | Setting this property to `true` enables prepared-statement pooling. | `false` |
+ | `scalar.db.jdbc.prepared_statements_pool.max_open` | Maximum number of open statements that can be allocated from the statement pool at the same time. Use a negative value for no limit. | `-1` |
+ | `scalar.db.jdbc.isolation_level` | Isolation level for JDBC. `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ`, or `SERIALIZABLE` can be specified. | Underlying-database specific |
+ | `scalar.db.jdbc.table_metadata.schema` | Schema name for the table metadata used for ScalarDB. | `scalardb` |
+ | `scalar.db.jdbc.table_metadata.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for the table metadata. | `5` |
+ | `scalar.db.jdbc.table_metadata.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for the table metadata. | `10` |
+ | `scalar.db.jdbc.table_metadata.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for the table metadata. Use a negative value for no limit. | `25` |
+ | `scalar.db.jdbc.admin.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for admin. | `5` |
+ | `scalar.db.jdbc.admin.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for admin. | `10` |
+ | `scalar.db.jdbc.admin.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for admin. Use a negative value for no limit. | `25` |
+
+:::note
+
+#### SQLite3
+
+If you're using SQLite3 as a JDBC database, you must set `scalar.db.contact_points` as follows:
+
+```properties
+scalar.db.contact_points=jdbc:sqlite:.sqlite3?busy_timeout=10000
+```
+
+Unlike other JDBC databases, [SQLite3 doesn't fully support concurrent access](https://www.sqlite.org/lang_transaction.html). To avoid frequent errors caused internally by [`SQLITE_BUSY`](https://www.sqlite.org/rescode.html#busy), setting a [`busy_timeout`](https://www.sqlite.org/c3ref/busy_timeout.html) parameter is recommended.
+
+#### YugabyteDB
+
+If you're using YugabyteDB as a JDBC database, you can specify multiple endpoints in `scalar.db.contact_points` as follows:
+
+```properties
+scalar.db.contact_points=jdbc:yugabytedb://127.0.0.1:5433\\,127.0.0.2:5433\\,127.0.0.3:5433/?load-balance=true
+```
+
+Multiple endpoints should be separated by escaped commas.
+
+For information on YugabyteDB's smart driver and load balancing, see [YugabyteDB smart drivers for YSQL](https://docs.yugabyte.com/preview/drivers-orms/smart-drivers/).
+
+:::
+
+
+
+ The following configurations are available for DynamoDB:
+
+ | Name | Description | Default |
+ |---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|
+ | `scalar.db.storage` | `dynamo` must be specified. | - |
+ | `scalar.db.contact_points` | AWS region with which ScalarDB should communicate (e.g., `us-east-1`). | |
+ | `scalar.db.username` | AWS access key used to identify the user interacting with AWS. | |
+ | `scalar.db.password` | AWS secret access key used to authenticate the user interacting with AWS. | |
+ | `scalar.db.dynamo.endpoint_override` | Amazon DynamoDB endpoint with which ScalarDB should communicate. This is primarily used for testing with a local instance instead of an AWS service. | |
+ | `scalar.db.dynamo.table_metadata.namespace` | Namespace name for the table metadata used for ScalarDB. | `scalardb` |
+ | `scalar.db.dynamo.namespace.prefix` | Prefix for the user namespaces and metadata namespace names. Since AWS requires having unique tables names in a single AWS region, this is useful if you want to use multiple ScalarDB environments (development, production, etc.) in a single AWS region. | |
+
+
+ The following configurations are available for CosmosDB for NoSQL:
+
+ | Name | Description | Default |
+ |--------------------------------------------|----------------------------------------------------------------------------------------------------------|------------|
+ | `scalar.db.storage` | `cosmos` must be specified. | - |
+ | `scalar.db.contact_points` | Azure Cosmos DB for NoSQL endpoint with which ScalarDB should communicate. | |
+ | `scalar.db.password` | Either a master or read-only key used to perform authentication for accessing Azure Cosmos DB for NoSQL. | |
+ | `scalar.db.cosmos.table_metadata.database` | Database name for the table metadata used for ScalarDB. | `scalardb` |
+ | `scalar.db.cosmos.consistency_level` | Consistency level used for Cosmos DB operations. `STRONG` or `BOUNDED_STALENESS` can be specified. | `STRONG` |
+
+
+ The following configurations are available for Cassandra:
+
+ | Name | Description | Default |
+ |-----------------------------------------|-----------------------------------------------------------------------|------------|
+ | `scalar.db.storage` | `cassandra` must be specified. | - |
+ | `scalar.db.contact_points` | Comma-separated contact points. | |
+ | `scalar.db.contact_port` | Port number for all the contact points. | |
+ | `scalar.db.username` | Username to access the database. | |
+ | `scalar.db.password` | Password to access the database. | |
+ | `scalar.db.cassandra.metadata.keyspace` | Keyspace name for the namespace and table metadata used for ScalarDB. | `scalardb` |
+
+
+
+##### Multi-storage support
+
+ScalarDB supports using multiple storage implementations simultaneously. You can use multiple storages by specifying `multi-storage` as the value for the `scalar.db.storage` property.
+
+For details about using multiple storages, see [Multi-Storage Transactions](multi-storage-transactions.mdx).
+
+##### Cross-partition scan configurations
+
+By enabling the cross-partition scan option as described below, the `Scan` operation can retrieve all records across partitions. In addition, you can specify arbitrary conditions and orderings in the cross-partition `Scan` operation by enabling `cross_partition_scan.filtering` and `cross_partition_scan.ordering`, respectively. Currently, the cross-partition scan with ordering option is available only for JDBC databases. To enable filtering and ordering, `scalar.db.cross_partition_scan.enabled` must be set to `true`.
+
+For details on how to use cross-partition scan, see [Scan operation](./api-guide.mdx#scan-operation).
+
+:::warning
+
+For non-JDBC databases, transactions could be executed at read-committed snapshot isolation (`SNAPSHOT`), which is a lower isolation level, even if you enable cross-partition scan with the `SERIALIZABLE` isolation level. When using non-JDBC databases, use cross-partition scan only if consistency does not matter for your transactions.
+
+:::
+
+| Name | Description | Default |
+|----------------------------------------------------|-----------------------------------------------|---------|
+| `scalar.db.cross_partition_scan.enabled` | Enable cross-partition scan. | `true` |
+| `scalar.db.cross_partition_scan.filtering.enabled` | Enable filtering in cross-partition scan. | `false` |
+| `scalar.db.cross_partition_scan.ordering.enabled` | Enable ordering in cross-partition scan. | `false` |
+
+### Run non-transactional storage operations
+
+To run non-transactional storage operations, you need to configure the `scalar.db.transaction_manager` property to `single-crud-operation`:
+
+```properties
+scalar.db.transaction_manager=single-crud-operation
+```
+
+Also, you need to configure the underlying storage or database as described in [Underlying storage or database configurations](#underlying-storage-or-database-configurations).
+
+### Run transactions through ScalarDB Cluster
+
+[ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/) is a component that provides a gRPC interface to ScalarDB.
+
+For details about client configurations, see the ScalarDB Cluster [client configurations (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api/#client-configurations).
+
+## Other ScalarDB configurations
+
+The following are additional configurations available for ScalarDB:
+
+| Name | Description | Default |
+|------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|
+| `scalar.db.metadata.cache_expiration_time_secs` | ScalarDB has a metadata cache to reduce the number of requests to the database. This setting specifies the expiration time of the cache in seconds. | `-1` (no expiration) |
+| `scalar.db.active_transaction_management.expiration_time_millis` | ScalarDB maintains ongoing transactions, which can be resumed by using a transaction ID. This setting specifies the expiration time of this transaction management feature in milliseconds. | `-1` (no expiration) |
+| `scalar.db.default_namespace_name` | The given namespace name will be used by operations that do not already specify a namespace. | |
+
+## Placeholder usage
+
+You can use placeholders in the values, and they are replaced with environment variables (`${env:}`) or system properties (`${sys:}`). You can also specify default values in placeholders like `${sys::-}`.
+
+The following is an example of a configuration that uses placeholders:
+
+```properties
+scalar.db.username=${env::-admin}
+scalar.db.password=${env:}
+```
+
+In this example configuration, ScalarDB reads the username and password from environment variables. If the environment variable `SCALAR_DB_USERNAME` does not exist, ScalarDB uses the default value `admin`.
+
+## Configuration examples
+
+This section provides some configuration examples.
+
+### Configuration example #1 - App and database
+
+```mermaid
+flowchart LR
+ app["App
(ScalarDB library with
Consensus Commit)"]
+ db[(Underlying storage or database)]
+ app --> db
+```
+
+In this example configuration, the app (ScalarDB library with Consensus Commit) connects to an underlying storage or database (in this case, Cassandra) directly.
+
+:::warning
+
+This configuration exists only for development purposes and isn’t suitable for a production environment. This is because the app needs to implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface to take transactionally consistent backups for ScalarDB, which requires additional configurations.
+
+:::
+
+The following is an example of the configuration for connecting the app to the underlying database through ScalarDB:
+
+```properties
+# Transaction manager implementation.
+scalar.db.transaction_manager=consensus-commit
+
+# Storage implementation.
+scalar.db.storage=cassandra
+
+# Comma-separated contact points.
+scalar.db.contact_points=
+
+# Credential information to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+### Configuration example #2 - App, ScalarDB Cluster, and database
+
+```mermaid
+flowchart LR
+ app["App -
ScalarDB library with gRPC"]
+ cluster["ScalarDB Cluster -
(ScalarDB library with
Consensus Commit)"]
+ db[(Underlying storage or database)]
+ app --> cluster --> db
+```
+
+In this example configuration, the app (ScalarDB library with gRPC) connects to an underlying storage or database (in this case, Cassandra) through ScalarDB Cluster, which is a component that is available only in the ScalarDB Enterprise edition.
+
+:::note
+
+This configuration is acceptable for production use because ScalarDB Cluster implements the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface, which enables you to take transactionally consistent backups for ScalarDB by pausing ScalarDB Cluster.
+
+
+:::
+
+The following is an example of the configuration for connecting the app to the underlying database through ScalarDB Cluster:
+
+```properties
+# Transaction manager implementation.
+scalar.db.transaction_manager=cluster
+
+# Contact point of the cluster.
+scalar.db.contact_points=indirect:
+```
+
+For details about client configurations, see the ScalarDB Cluster [client configurations (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.10/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api/#client-configurations).
+
+[^1]: It's worth benchmarking the performance with a few variations (for example, 75% and 125% of the default value) on the same underlying storage that your application uses, considering your application's access pattern, to determine the optimal configuration as it really depends on those factors. Also, it's important to benchmark combinations of these parameters (for example, first, `slot_capacity:20` and `group_size_fix_timeout_millis:40`; second, `slot_capacity:30` and `group_size_fix_timeout_millis:40`; and third, `slot_capacity:20` and `group_size_fix_timeout_millis:80`) to determine the optimal combination.
diff --git a/versioned_docs/version-3.13/data-modeling.mdx b/versioned_docs/version-3.13/data-modeling.mdx
new file mode 100644
index 00000000..9b2121e1
--- /dev/null
+++ b/versioned_docs/version-3.13/data-modeling.mdx
@@ -0,0 +1,131 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Model Your Data
+
+Data modeling (or in other words, designing your database schemas) is the process of conceptualizing and visualizing how data will be stored and used by identifying the patterns used to access data and the types of queries to be performed within business operations.
+
+This page first explains the ScalarDB data model and then describes how to design your database schemas based on the data model.
+
+## ScalarDB data model
+
+ScalarDB's data model is an extended key-value model inspired by the Bigtable data model. It is similar to the relational model but differs in several ways, as described below. The data model is chosen to abstract various databases, such as relational databases, NoSQL databases, and NewSQL databases.
+
+The following diagram shows an example of ScalarDB tables, each of which is a collection of records. This section first explains what objects, such as tables and records, ScalarDB defines and then describes how to locate the records.
+
+![ScalarDB data model](images/scalardb_data_model.png)
+
+### Objects in ScalarDB
+
+The ScalarDB data model has several objects.
+
+#### Namespace
+
+A namespace is a collection of tables analogous to an SQL namespace or database.
+
+#### Table
+
+A table is a collection of partitions. A namespace most often contains one or more tables, each identified by a name.
+
+#### Partition
+
+A partition is a collection of records and a unit of distribution to nodes, whether logical or physical. Therefore, records within the same partition are placed in the same node. ScalarDB assumes multiple partitions are distributed by hashing.
+
+#### Record / row
+
+A record or row is a set of columns that is uniquely identifiable among all other records.
+
+#### Column
+
+A column is a fundamental data element and does not need to be broken down any further. Each record is composed of one or more columns. Each column has a data type. For details about the data type, refer to [Data-type mapping between ScalarDB and other databases](schema-loader.mdx#data-type-mapping-between-scalardb-and-other-databases).
+
+#### Secondary index
+
+A secondary index is a sorted copy of a column in a single base table. Each index entry is linked to a corresponding table partition. ScalarDB currently doesn't support multi-column indexes, so it can create indexes with only one column.
+
+### How to locate records
+
+This section discusses how to locate records from a table.
+
+#### Primary key
+
+A primary key uniquely identifies each record; no two records can have the same primary key. Therefore, you can locate a record by specifying a primary key. A primary key comprises a partition key and, optionally, a clustering key.
+
+#### Partition key
+
+A partition key uniquely identifies a partition. A partition key comprises a set of columns, which are called partition key columns. When you specify only a partition key, you can get a set of records that belong to the partition.
+
+#### Clustering key
+
+A clustering key uniquely identifies a record within a partition. It comprises a set of columns called clustering-key columns. When you want to specify a clustering key, you should specify a partition key for efficient lookups. When you specify a clustering key without a partition key, you end up scanning all the partitions. Scanning all the partitions is time consuming, especially when the amount of data is large, so only do so at your own discretion.
+
+Records within a partition are assumed to be sorted by clustering-key columns, specified as a clustering order. Therefore, you can specify a part of clustering-key columns in the defined order to narrow down the results to be returned.
+
+#### Index key
+
+An index key identifies records by looking up the key in indexes. An index key lookup spans all the partitions, so it is not necessarily efficient, especially if the selectivity of a lookup is not low.
+
+## How to design your database schemas
+
+You can design your database schemas similarly to the relational model, but there is a basic principle and are a few best practices to follow.
+
+### Query-driven data modeling
+
+In relational databases, data is organized in normalized tables with foreign keys used to reference related data in other tables. The queries that the application will make are structured by the tables, and the related data is queried as table joins.
+
+Although ScalarDB supports join operations in ScalarDB SQL, data modeling should be more query-driven, like NoSQL databases. The data access patterns and application queries should determine the structure and organization of tables.
+
+### Best practices
+
+This section describes best practices for designing your database schemas.
+
+#### Consider data distribution
+
+Preferably, you should try to balance loads to partitions by properly selecting partition and clustering keys.
+
+For example, in a banking application, if you choose an account ID as a partition key, you can perform any account operations for a specific account within the partition to which the account belongs. So, if you operate on different account IDs, you will access different partitions.
+
+On the other hand, if you choose a branch ID as a partition key and an account ID as a clustering key, all the accesses to a branch's account IDs go to the same partition, causing an imbalance in loads and data sizes. In addition, you should choose a high-cardinality column as a partition key because creating a small number of large partitions also causes an imbalance in loads and data sizes.
+
+#### Try to read a single partition
+
+Because of the data model characteristics, single partition lookup is most efficient. If you need to issue a scan or select a request that requires multi-partition lookups or scans, which you can [enable with cross-partition scan](configurations.mdx#cross-partition-scan-configurations), do so at your own discretion and consider updating the schemas if possible.
+
+For example, in a banking application, if you choose email as a partition key and an account ID as a clustering key, and issue a query that specifies an account ID, the query will span all the partitions because it cannot identify the corresponding partition efficiently. In such a case, you should always look up the table with an account ID.
+
+:::note
+
+If you read multiple partitions on a relational database with proper indexes, your query might be efficient because the query is pushed down to the database.
+
+:::
+
+#### Try to avoid using secondary indexes
+
+Similarly to the above, if you need to issue a scan or select a request that uses a secondary index, the request will span all the partitions of a table. Therefore, you should try to avoid using secondary indexes. If you need to use a secondary index, use it through a low-selectivity query, which looks up a small portion.
+
+As an alternative to secondary indexes, you can create another table that works as a clustered index of a base table.
+
+For example, assume there is a table with three columns: `table1(A, B, C)`, with the primary key `A`. Then, you can create a table like `index-table1(C, A, B)` with `C` as the primary key so that you can look up a single partition by specifying a value for `C`. This approach could speed up read queries but might create more load to write queries because you need to write to two tables by using ScalarDB transactions.
+
+:::note
+
+There are plans to have a table-based secondary-index feature in ScalarDB in the future.
+
+:::
+
+#### Consider data is assumed to be distributed by hashing
+
+In the current ScalarDB data model, data is assumed to be distributed by hashing. Therefore, you can't perform range queries efficiently without a partition key.
+
+If you want to issue range queries efficiently, you need to do so within a partition. However, if you follow this approach, you must specify a partition key. This can pose scalability issues as the range queries always go to the same partition, potentially overloading it. This limitation is not specific to ScalarDB but to databases where data is distributed by hashing for scalability.
+
+:::note
+
+If you run ScalarDB on a relational database with proper indexes, your range query might be efficient because the query is pushed down to the database.
+
+:::
+
diff --git a/versioned_docs/version-3.13/database-configurations.mdx b/versioned_docs/version-3.13/database-configurations.mdx
new file mode 100644
index 00000000..8e7c0098
--- /dev/null
+++ b/versioned_docs/version-3.13/database-configurations.mdx
@@ -0,0 +1,119 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Configurations for the Underlying Databases of ScalarDB
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This document explains how to configure the underlying databases of ScalarDB to make applications that use ScalarDB work correctly and efficiently.
+
+## General requirements for the underlying databases
+
+ScalarDB requires each underlying database to provide certain capabilities to run transactions and analytics on the databases. This document explains the general requirements and how to configure each database to achieve the requirements.
+
+### Transactions
+
+ScalarDB requires each underlying database to provide at least the following capabilities to run transactions on the databases:
+
+- Linearizable read and conditional mutations (write and delete) on a single database record.
+- Durability of written database records.
+- Ability to store arbitrary data beside application data in each database record.
+
+### Analytics
+
+ScalarDB requires each underlying database to provide the following capability to run analytics on the databases:
+
+- Ability to return only committed records.
+
+:::note
+
+You need to have database accounts that have enough privileges to access the databases through ScalarDB since ScalarDB runs on the underlying databases not only for CRUD operations but also for performing operations like creating or altering schemas, tables, or indexes. ScalarDB basically requires a fully privileged account to access the underlying databases.
+
+:::
+
+## How to configure databases to achieve the general requirements
+
+Select your database for details on how to configure it to achieve the general requirements.
+
+
+
+ Transactions
+
+ - Use a single primary server or synchronized multi-primary servers for all operations (no read operations on read replicas that are asynchronously replicated from a primary database).
+ - Use read-committed or stricter isolation levels.
+
+ Analytics
+
+ - Use read-committed or stricter isolation levels.
+
+
+ Transactions
+
+ - Use a single primary region for all operations. (No read and write operations on global tables in non-primary regions.)
+ - There is no concept for primary regions in DynamoDB, so you must designate a primary region by yourself.
+
+ Analytics
+
+ - Not applicable. DynamoDB always returns committed records, so there are no DynamoDB-specific requirements.
+
+
+ Transactions
+
+ - Use a single primary region for all operations with `Strong` or `Bounded Staleness` consistency.
+
+ Analytics
+
+ - Not applicable. Cosmos DB always returns committed records, so there are no Cosmos DB–specific requirements.
+
+
+ Transactions
+
+ - Use a single primary cluster for all operations (no read or write operations in non-primary clusters).
+ - Use `batch` or `group` for `commitlog_sync`.
+ - If you're using Cassandra-compatible databases, those databases must properly support lightweight transactions (LWT).
+
+ Analytics
+
+ - Not applicable. Cassandra always returns committed records, so there are no Cassandra-specific requirements.
+
+
+
+## Recommendations
+
+Properly configuring each underlying database of ScalarDB for high performance and high availability is recommended. The following recommendations include some knobs and configurations to update.
+
+:::note
+
+ScalarDB can be seen as an application of underlying databases, so you may want to try updating other knobs and configurations that are commonly used to improve efficiency.
+
+:::
+
+
+
+ - Use read-committed isolation for better performance.
+ - Follow the performance optimization best practices for each database. For example, increasing the buffer size (for example, `shared_buffers` in PostgreSQL) and increasing the number of connections (for example, `max_connections` in PostgreSQL) are usually recommended for better performance.
+
+
+ - Increase the number of read capacity units (RCUs) and write capacity units (WCUs) for high throughput.
+ - Enable point-in-time recovery (PITR).
+
+:::note
+
+Since DynamoDB stores data in multiple availability zones by default, you don’t need to adjust any configurations to improve availability.
+
+:::
+
+
+ - Increase the number of Request Units (RUs) for high throughput.
+ - Enable point-in-time restore (PITR).
+ - Enable availability zones.
+
+
+ - Increase `concurrent_reads` and `concurrent_writes` for high throughput. For details, see the official Cassandra documentation about [`concurrent_writes`](https://cassandra.apache.org/doc/stable/cassandra/configuration/cass_yaml_file.html#concurrent_writes).
+
+
diff --git a/versioned_docs/version-3.13/deploy-overview.mdx b/versioned_docs/version-3.13/deploy-overview.mdx
new file mode 100644
index 00000000..3eaecbf0
--- /dev/null
+++ b/versioned_docs/version-3.13/deploy-overview.mdx
@@ -0,0 +1,18 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Deploy Overview
+
+In this category, you can follow guides to help you become more familiar with deploying ScalarDB, specifically ScalarDB Cluster and ScalarDB Analytics, in local and cloud-based Kubernetes environments.
+
+## Deploy ScalarDB in your local Kubernetes environment
+
+In this sub-category, you can learn how to deploy ScalarDB in your local Kubernetes environment. The primary focus of this sub-category is learning to use Scalar Helm Charts. Because of this, a particular database (PostgreSQL) is specified for simplicity.
+
+## Deploy ScalarDB in a cloud-based Kubernetes environment
+
+In this sub-category, you can learn how to deploy ScalarDB in a cloud-based Kubernetes environment. This sub-category describes how to use Scalar Helm Charts and other steps that are required to run ScalarDB in a cloud-based Kubernetes environment.
\ No newline at end of file
diff --git a/versioned_docs/version-3.13/design.mdx b/versioned_docs/version-3.13/design.mdx
new file mode 100644
index 00000000..d900065c
--- /dev/null
+++ b/versioned_docs/version-3.13/design.mdx
@@ -0,0 +1,13 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# ScalarDB Design Document
+
+For details about the design and implementation of ScalarDB, please see the following documents, which we presented at the VLDB 2023 conference:
+
+- **Speakerdeck presentation:** [ScalarDB: Universal Transaction Manager for Polystores](https://speakerdeck.com/scalar/scalardb-universal-transaction-manager-for-polystores-vldb23)
+- **Detailed paper:** [ScalarDB: Universal Transaction Manager for Polystores](https://www.vldb.org/pvldb/vol16/p3768-yamada.pdf)
diff --git a/versioned_docs/version-3.13/develop-overview.mdx b/versioned_docs/version-3.13/develop-overview.mdx
new file mode 100644
index 00000000..397931f4
--- /dev/null
+++ b/versioned_docs/version-3.13/develop-overview.mdx
@@ -0,0 +1,28 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Develop Overview
+
+In this category, you can follow guides to help you become more familiar with ScalarDB, specifically with how to run transactions, analytical queries, and non-transactional storage operations.
+
+## Run transactions
+
+In this sub-category, you can learn how to model your data based on the ScalarDB data model and create schemas. Then, you can learn how to run transactions through the ScalarDB core library and ScalarDB Cluster, a gRPC server that wraps the core library.
+
+You can also learn how to create correct, secure, and well-performing ScalarDB-based applications.
+
+## Run analytical queries
+
+In this section, you can learn how to set up and configure ScalarDB Analytics, an analytics component of ScalarDB. Then, you run analytical queries over the databases you write through ScalarDB transactions.
+
+## Run non-transactional storage operations
+
+ScalarDB was initially designed to provide a unified abstraction between diverse databases and transactions across such databases. However, there are cases where you only need the unified abstraction to simplify your applications that use multiple, possibly diverse, databases.
+
+ScalarDB can be configured to provide only the unified abstraction, without transaction capabilities, so that it only runs non-transactional operations on the underlying database and storage. Since ScalarDB doesn't guarantee ACID across multiple operations, you can perform operations with better performance.
+
+In this sub-category, you can learn how to run such non-transactional storage operations.
\ No newline at end of file
diff --git a/versioned_docs/version-3.13/getting-started-with-scalardb-by-using-kotlin.mdx b/versioned_docs/version-3.13/getting-started-with-scalardb-by-using-kotlin.mdx
new file mode 100644
index 00000000..b347e95d
--- /dev/null
+++ b/versioned_docs/version-3.13/getting-started-with-scalardb-by-using-kotlin.mdx
@@ -0,0 +1,416 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Getting Started with ScalarDB by Using Kotlin
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This getting started tutorial explains how to configure your preferred database in ScalarDB and set up a basic electronic money application by using Kotlin. Since Kotlin has Java interoperability, you can use ScalarDB directly from Kotlin.
+
+:::warning
+
+The electronic money application is simplified for this tutorial and isn't suitable for a production environment.
+
+:::
+
+## Prerequisites for this sample application
+
+Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment:
+
+- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) 8
+- OpenJDK 8 from [Eclipse Temurin](https://adoptium.net/temurin/releases/), [Amazon Corretto](https://aws.amazon.com/corretto/), or [Microsoft](https://learn.microsoft.com/en-us/java/openjdk/download)
+
+:::note
+
+This sample application only works with Java 8. However, ScalarDB itself works with Java LTS versions, which means that you can use Java LTS versions for your application that uses ScalarDB. For details on the requirements of ScalarDB, such as which Java versions can be used, see [Requirements](./requirements.mdx).
+
+:::
+
+In addition, since you'll be using Docker Compose to run the databases, you must have [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later installed.
+
+## Clone the ScalarDB samples repository
+
+Open **Terminal**, then clone the ScalarDB samples repository by running the following command:
+
+```console
+git clone https://github.com/scalar-labs/scalardb-samples
+```
+
+Then, go to the directory that contains the sample application by running the following command:
+
+```console
+cd scalardb-samples/scalardb-kotlin-sample
+```
+
+## Set up your database for ScalarDB
+
+Select your database, and follow the instructions to configure it for ScalarDB.
+
+For a list of databases that ScalarDB supports, see [Databases](requirements.mdx#databases).
+
+
+
+ Run MySQL locally
+
+ You can run MySQL in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start MySQL, run the following command:
+
+ ```console
+ docker compose up -d mysql
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for MySQL in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For MySQL
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:mysql://localhost:3306/
+ scalar.db.username=root
+ scalar.db.password=mysql
+ ```
+
+
+ Run PostgreSQL locally
+
+ You can run PostgreSQL in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start PostgreSQL, run the following command:
+
+ ```console
+ docker compose up -d postgres
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for PostgreSQL in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For PostgreSQL
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://localhost:5432/
+ scalar.db.username=postgres
+ scalar.db.password=postgres
+ ```
+
+
+ Run Oracle Database locally
+
+ You can run Oracle Database in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start Oracle Database, run the following command:
+
+ ```console
+ docker compose up -d oracle
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Oracle Database in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For Oracle
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/FREEPDB1
+ scalar.db.username=SYSTEM
+ scalar.db.password=Oracle
+ ```
+
+
+ Run SQL Server locally
+
+ You can run SQL Server in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start SQL Server, run the following command:
+
+ ```console
+ docker compose up -d sqlserver
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for SQL Server in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For SQL Server
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:sqlserver://localhost:1433;encrypt=true;trustServerCertificate=true
+ scalar.db.username=sa
+ scalar.db.password=SqlServer22
+ ```
+
+
+ Run Amazon DynamoDB Local
+
+ You can run Amazon DynamoDB Local in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start Amazon DynamoDB Local, run the following command:
+
+ ```console
+ docker compose up -d dynamodb
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Amazon DynamoDB Local in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For DynamoDB Local
+ scalar.db.storage=dynamo
+ scalar.db.contact_points=sample
+ scalar.db.username=sample
+ scalar.db.password=sample
+ scalar.db.dynamo.endpoint_override=http://localhost:8000
+ ```
+
+
+ To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account).
+
+ Configure Cosmos DB for NoSQL
+
+ Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level).
+
+ Configure ScalarDB
+
+ The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure.
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described.
+
+ ```properties
+ # For Cosmos DB
+ scalar.db.storage=cosmos
+ scalar.db.contact_points=
+ scalar.db.password=
+ ```
+
+:::note
+
+You can use a primary key or a secondary key as the value for `scalar.db.password`.
+
+:::
+
+
+ Run Cassandra locally
+
+ You can run Apache Cassandra in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+ To start Apache Cassandra, run the following command:
+ ```console
+ docker compose up -d cassandra
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-kotlin-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Cassandra in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For Cassandra
+ scalar.db.storage=cassandra
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ ```
+
+
+
+## Load the database schema
+
+You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.mdx#data-type-mapping-between-scalardb-and-other-databases).
+
+For this tutorial, a file named **schema.json** already exists in the `scalardb-samples/scalardb-kotlin-sample` directory. To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `scalardb-samples/scalardb-kotlin-sample` directory.
+
+Then, based on your database, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded:
+
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator --no-backup --no-scaling
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+Also, `--no-backup` and `--no-scaling` options are specified because Amazon DynamoDB Local does not support continuous backup and auto-scaling.
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator --replication-factor=1
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+In addition, the `--replication-factor=1` option has an effect only when using Cassandra. The default replication factor is `3`, but to facilitate the setup in this tutorial, `1` is used so that you only need to prepare a cluster with one node instead of three nodes. However, keep in mind that a replication factor of `1` is not suited for production.
+
+:::
+
+
+
+## Execute transactions and retrieve data in the basic electronic money application
+
+After loading the schema, you can execute transactions and retrieve data in the basic electronic money application that is included in the repository that you cloned.
+
+The application supports the following types of transactions:
+
+- Create an account.
+- Add funds to an account.
+- Send funds between two accounts.
+- Get an account balance.
+
+:::note
+
+When you first execute a Gradle command, Gradle will automatically install the necessary libraries.
+
+:::
+
+### Create an account with a balance
+
+You need an account with a balance so that you can send funds between accounts.
+
+To create an account for **customer1** that has a balance of **500**, run the following command:
+
+```console
+./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+### Create an account without a balance
+
+After setting up an account that has a balance, you need another account for sending funds to.
+
+To create an account for **merchant1** that has a balance of **0**, run the following command:
+
+```console
+./gradlew run --args="-action charge -amount 0 -to merchant1"
+```
+
+### Add funds to an account
+
+You can add funds to an account in the same way that you created and added funds to an account in [Create an account with a balance](#create-an-account-with-a-balance).
+
+To add **500** to the account for **customer1**, run the following command:
+
+```console
+./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+The account for **customer1** will now have a balance of **1000**.
+
+### Send electronic money between two accounts
+
+Now that you have created two accounts, with at least one of those accounts having a balance, you can send funds from one account to the other account.
+
+To have **customer1** pay **100** to **merchant1**, run the following command:
+
+```console
+./gradlew run --args="-action pay -amount 100 -from customer1 -to merchant1"
+```
+
+### Get an account balance
+
+After sending funds from one account to the other, you can check the balance of each account.
+
+To get the balance of **customer1**, run the following command:
+
+```console
+./gradlew run --args="-action getBalance -id customer1"
+```
+
+You should see the following output:
+
+```console
+...
+The balance for customer1 is 900
+...
+```
+
+To get the balance of **merchant1**, run the following command:
+
+```console
+./gradlew run --args="-action getBalance -id merchant1"
+```
+
+You should see the following output:
+
+```console
+...
+The balance for merchant1 is 100
+...
+```
+
+## Stop the database
+
+To stop the database, stop the Docker container by running the following command:
+
+```console
+docker compose down
+```
+
+## Reference
+
+To see the source code for the electronic money application used in this tutorial, see [`ElectronicMoney.kt`](https://github.com/scalar-labs/scalardb-samples/blob/main/getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt).
diff --git a/versioned_docs/version-3.13/getting-started-with-scalardb.mdx b/versioned_docs/version-3.13/getting-started-with-scalardb.mdx
new file mode 100644
index 00000000..afb2be5d
--- /dev/null
+++ b/versioned_docs/version-3.13/getting-started-with-scalardb.mdx
@@ -0,0 +1,536 @@
+---
+tags:
+ - Community
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Getting Started with ScalarDB
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This getting started tutorial explains how to configure your preferred database in ScalarDB and illustrates the process of creating a sample e-commerce application, where items can be ordered and paid for with a credit card by using ScalarDB. The sample e-commerce application shows how users can order and pay for items by using a line of credit.
+
+:::warning
+
+Since the focus of the sample application is to demonstrate using ScalarDB, application-specific error handling, authentication processing, and similar functions are not included in the sample application. For details about exception handling in ScalarDB, see [How to handle exceptions](api-guide.mdx#how-to-handle-exceptions).
+
+:::
+
+## Prerequisites for this sample application
+
+Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment:
+
+- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) 8
+- OpenJDK 8 from [Eclipse Temurin](https://adoptium.net/temurin/releases/), [Amazon Corretto](https://aws.amazon.com/corretto/), or [Microsoft](https://learn.microsoft.com/en-us/java/openjdk/download)
+
+:::note
+
+This sample application only works with Java 8. However, ScalarDB itself works with Java LTS versions, which means that you can use Java LTS versions for your application that uses ScalarDB. For details on the requirements of ScalarDB, such as which Java versions can be used, see [Requirements](./requirements.mdx).
+
+:::
+
+In addition, since you'll be using Docker Compose to run the databases, you must have [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later installed.
+
+## Clone the ScalarDB samples repository
+
+Open **Terminal**, then clone the ScalarDB samples repository by running the following command:
+
+```console
+git clone https://github.com/scalar-labs/scalardb-samples
+```
+
+Then, go to the directory that contains the sample application by running the following command:
+
+```console
+cd scalardb-samples/scalardb-sample
+```
+
+## Set up your database for ScalarDB
+
+Select your database, and follow the instructions to configure it for ScalarDB.
+
+For a list of databases that ScalarDB supports, see [Databases](requirements.mdx#databases).
+
+
+
+ Run MySQL locally
+
+ You can run MySQL in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start MySQL, run the following command:
+
+ ```console
+ docker compose up -d mysql
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for MySQL in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For MySQL
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:mysql://localhost:3306/
+ scalar.db.username=root
+ scalar.db.password=mysql
+ ```
+
+
+ Run PostgreSQL locally
+
+ You can run PostgreSQL in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start PostgreSQL, run the following command:
+
+ ```console
+ docker compose up -d postgres
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for PostgreSQL in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For PostgreSQL
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://localhost:5432/
+ scalar.db.username=postgres
+ scalar.db.password=postgres
+ ```
+
+
+ Run Oracle Database locally
+
+ You can run Oracle Database in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start Oracle Database, run the following command:
+
+ ```console
+ docker compose up -d oracle
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Oracle Database in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For Oracle
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/FREEPDB1
+ scalar.db.username=SYSTEM
+ scalar.db.password=Oracle
+ ```
+
+
+ Run SQL Server locally
+
+ You can run SQL Server in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start SQL Server, run the following command:
+
+ ```console
+ docker compose up -d sqlserver
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for SQL Server in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For SQL Server
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:sqlserver://localhost:1433;encrypt=true;trustServerCertificate=true
+ scalar.db.username=sa
+ scalar.db.password=SqlServer22
+ ```
+
+
+ Run Amazon DynamoDB Local
+
+ You can run Amazon DynamoDB Local in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start Amazon DynamoDB Local, run the following command:
+
+ ```console
+ docker compose up -d dynamodb
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Amazon DynamoDB Local in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For DynamoDB Local
+ scalar.db.storage=dynamo
+ scalar.db.contact_points=sample
+ scalar.db.username=sample
+ scalar.db.password=sample
+ scalar.db.dynamo.endpoint_override=http://localhost:8000
+ ```
+
+
+ To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account).
+
+ Configure Cosmos DB for NoSQL
+
+ Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level).
+
+ Configure ScalarDB
+
+ The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure.
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described.
+
+ ```properties
+ # For Cosmos DB
+ scalar.db.storage=cosmos
+ scalar.db.contact_points=
+ scalar.db.password=
+ ```
+
+:::note
+
+You can use a primary key or a secondary key as the value for `scalar.db.password`.
+
+:::
+
+
+ Run Cassandra locally
+
+ You can run Apache Cassandra in Docker Compose by using the `docker-compose.yml` file in the `scalardb-samples/scalardb-sample` directory.
+
+ To start Apache Cassandra, run the following command:
+ ```console
+ docker compose up -d cassandra
+ ```
+
+ Configure ScalarDB
+
+ The **database.properties** file in the `scalardb-samples/scalardb-sample` directory contains database configurations for ScalarDB. Please uncomment the properties for Cassandra in the **database.properties** file so that the configuration looks as follows:
+
+ ```properties
+ # For Cassandra
+ scalar.db.storage=cassandra
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ ```
+
+
+
+## Load the database schema
+
+You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.mdx#data-type-mapping-between-scalardb-and-other-databases).
+
+For this tutorial, a file named **schema.json** already exists in the `scalardb-samples/scalardb-sample` directory. To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `scalardb-samples/scalardb-sample` directory.
+
+Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded:
+
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator --no-backup --no-scaling
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+Also, `--no-backup` and `--no-scaling` options are specified because Amazon DynamoDB Local does not support continuous backup and auto-scaling.
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+:::
+
+
+ ```console
+ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator --replication-factor=1
+ ```
+
+:::note
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.mdx).
+
+In addition, the `--replication-factor=1` option has an effect only when using Cassandra. The default replication factor is `3`, but to facilitate the setup in this tutorial, `1` is used so that you only need to prepare a cluster with one node instead of three nodes. However, keep in mind that a replication factor of `1` is not suited for production.
+
+:::
+
+
+
+### Schema details
+
+As shown in [`schema.json`](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample/schema.json) for the sample application, all the tables are created in the `sample` namespace.
+
+- `sample.customers`: a table that manages customer information
+ - `credit_limit`: the maximum amount of money that the lender will allow the customer to spend from their line of credit
+ - `credit_total`: the amount of money that the customer has spent from their line of credit
+- `sample.orders`: a table that manages order information
+- `sample.statements`: a table that manages order statement information
+- `sample.items`: a table that manages information for items to be ordered
+
+The Entity Relationship Diagram for the schema is as follows:
+
+![ERD](images/getting-started-ERD.png)
+
+### Load the initial data
+
+Before running the sample application, you need to load the initial data by running the following command:
+
+```console
+./gradlew run --args="LoadInitialData"
+```
+
+After the initial data has loaded, the following records should be stored in the tables.
+
+**`sample.customers` table**
+
+| customer_id | name | credit_limit | credit_total |
+|-------------|---------------|--------------|--------------|
+| 1 | Yamada Taro | 10000 | 0 |
+| 2 | Yamada Hanako | 10000 | 0 |
+| 3 | Suzuki Ichiro | 10000 | 0 |
+
+**`sample.items` table**
+
+| item_id | name | price |
+|---------|--------|-------|
+| 1 | Apple | 1000 |
+| 2 | Orange | 2000 |
+| 3 | Grape | 2500 |
+| 4 | Mango | 5000 |
+| 5 | Melon | 3000 |
+
+## Execute transactions and retrieve data in the sample application
+
+The following sections describe how to execute transactions and retrieve data in the sample e-commerce application.
+
+### Get customer information
+
+Start with getting information about the customer whose ID is `1` by running the following command:
+
+```console
+./gradlew run --args="GetCustomerInfo 1"
+```
+
+You should see the following output:
+
+```console
+...
+{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0}
+...
+```
+
+### Place an order
+
+Then, have customer ID `1` place an order for three apples and two oranges by running the following command:
+
+:::note
+
+The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`.
+
+:::
+
+```console
+./gradlew run --args="PlaceOrder 1 1:3,2:2"
+```
+
+You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful:
+
+```console
+...
+{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e"}
+...
+```
+
+### Check order details
+
+Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command:
+
+```console
+./gradlew run --args="GetOrder "
+```
+
+You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`:
+
+```console
+...
+{"order": {"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}}
+...
+```
+
+### Place another order
+
+Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command:
+
+```console
+./gradlew run --args="PlaceOrder 1 5:1"
+```
+
+You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful:
+
+```console
+...
+{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d"}
+...
+```
+
+### Check order history
+
+Get the history of all orders for customer ID `1` by running the following command:
+
+```console
+./gradlew run --args="GetOrders 1"
+```
+
+You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp:
+
+```console
+...
+{"order": [{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000},{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d","timestamp": 1650948412766,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000}]}
+...
+```
+
+### Check credit total
+
+Get the credit total for customer ID `1` by running the following command:
+
+```console
+./gradlew run --args="GetCustomerInfo 1"
+```
+
+You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders:
+
+```console
+...
+{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000}
+...
+```
+
+Try to place an order for one grape and one mango by running the following command:
+
+```console
+./gradlew run --args="PlaceOrder 1 3:1,4:1"
+```
+
+You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount.
+
+```console
+...
+java.lang.RuntimeException: Credit limit exceeded
+ at sample.Sample.placeOrder(Sample.java:205)
+ at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:33)
+ at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8)
+ at picocli.CommandLine.executeUserObject(CommandLine.java:1783)
+ at picocli.CommandLine.access$900(CommandLine.java:145)
+ at picocli.CommandLine$RunLast.handle(CommandLine.java:2141)
+ at picocli.CommandLine$RunLast.handle(CommandLine.java:2108)
+ at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:1975)
+ at picocli.CommandLine.execute(CommandLine.java:1904)
+ at sample.command.SampleCommand.main(SampleCommand.java:35)
+...
+```
+
+### Make a payment
+
+To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount.
+
+Make a payment by running the following command:
+
+```console
+./gradlew run --args="Repayment 1 8000"
+```
+
+Then, check the `credit_total` amount for customer ID `1` by running the following command:
+
+```console
+./gradlew run --args="GetCustomerInfo 1"
+```
+
+You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount:
+
+```console
+...
+{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000}
+...
+```
+
+Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command:
+
+```console
+./gradlew run --args="PlaceOrder 1 3:1,4:1"
+```
+
+You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful:
+
+```console
+...
+{"order_id": "8911cab3-1c2b-4322-9386-adb1c024e078"}
+...
+```
+
+## Stop the database
+
+To stop the database, stop the Docker container by running the following command:
+
+```console
+docker compose down
+```
+
+## Reference
+
+To see the source code for the e-commerce application used in this tutorial, see [`Sample.java`](https://github.com/scalar-labs/scalardb-samples/blob/main/scalardb-sample/src/main/java/sample/Sample.java).
diff --git a/versioned_docs/version-3.13/helm-charts/conf/scalar-loki-stack-custom-values.yaml b/versioned_docs/version-3.13/helm-charts/conf/scalar-loki-stack-custom-values.yaml
new file mode 100644
index 00000000..997537a5
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/conf/scalar-loki-stack-custom-values.yaml
@@ -0,0 +1,80 @@
+promtail:
+ config:
+ snippets:
+ # -- `scapeConfigs` is exactly the part of https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs
+ # -- The value will be created as a Kubernetes ConfigMap and then mounted to the Promtail Pod.
+ # -- Not really need to change this value. It's set to scrape all logs of ScalarDL/DB Pods by using regular expression.
+ scrapeConfigs: |
+ # -- the `scalardl` job scrapes all the logs from Scalar Ledger Pods, Scalar Auditor Pods, and the corresponding Envoy Pods
+ - job_name: scalardl
+ pipeline_stages:
+ - docker: {}
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels:
+ - __meta_kubernetes_pod_node_name
+ target_label: __host__
+ - action: replace
+ source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - action: keep
+ regex: (.*)scalardl-(.+)
+ source_labels:
+ - pod
+ - replacement: /var/log/pods/*$1/*.log
+ separator: /
+ source_labels:
+ - __meta_kubernetes_pod_uid
+ - __meta_kubernetes_pod_container_name
+ target_label: __path__
+ # -- the `scalardb` job scrapes all the logs of ScalarDB Server Pods and the corresponding Envoy Pods
+ - job_name: scalardb
+ pipeline_stages:
+ - docker: {}
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels:
+ - __meta_kubernetes_pod_node_name
+ target_label: __host__
+ - action: replace
+ source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - action: keep
+ regex: (.*)scalardb-(.+)
+ source_labels:
+ - pod
+ - replacement: /var/log/pods/*$1/*.log
+ separator: /
+ source_labels:
+ - __meta_kubernetes_pod_uid
+ - __meta_kubernetes_pod_container_name
+ target_label: __path__
+ # -- the `scalar-admin-for-kubernetes` job scrapes all the logs of Scalar Admin for Kubernetes Pods
+ - job_name: scalar-admin-for-kubernetes
+ pipeline_stages:
+ - docker: {}
+ - cri: {}
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels:
+ - __meta_kubernetes_pod_node_name
+ target_label: __host__
+ - action: replace
+ source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - action: keep
+ regex: (.*)scalar-admin-for-kubernetes-(.+)
+ source_labels:
+ - pod
+ - replacement: /var/log/pods/*$1/*.log
+ separator: /
+ source_labels:
+ - __meta_kubernetes_pod_uid
+ - __meta_kubernetes_pod_container_name
+ target_label: __path__
diff --git a/versioned_docs/version-3.13/helm-charts/conf/scalar-prometheus-custom-values.yaml b/versioned_docs/version-3.13/helm-charts/conf/scalar-prometheus-custom-values.yaml
new file mode 100644
index 00000000..816ead1b
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/conf/scalar-prometheus-custom-values.yaml
@@ -0,0 +1,167 @@
+defaultRules:
+ # -- Default PrometheusRules are not enabled
+ create: false
+
+alertmanager:
+ # -- alertmanager is enabled
+ enabled: true
+
+ # -- Only check own namespace
+ alertmanagerConfigNamespaceSelector: null
+
+grafana:
+ # -- grafana is enabled
+ enabled: true
+
+ # -- Default Grafana dashboards are not enabled
+ defaultDashboardsEnabled: false
+
+ sidecar:
+ datasources:
+ enabled: true
+ defaultDatasourceEnabled: false
+ label: grafana_datasource
+ labelValue: "1"
+ dashboards:
+ enabled: true
+ label: grafana_dashboard
+ labelValue: "1"
+ # -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ # -- Grafana's primary configuration
+ grafana.ini:
+ security:
+ # -- allow Grafana to be embedded (not set the X-Frame-Options header)
+ # -- If you use Scalar Manager, you need to set allow_embedding to true.
+ # -- https://grafana.com/docs/grafana/latest/administration/configuration/#allow_embedding
+ allow_embedding: false
+
+ # -- Additional data source configurations
+ additionalDataSources:
+ - name: Prometheus
+ type: prometheus
+ uid: prometheus
+ url: http://scalar-monitoring-kube-pro-prometheus:9090/
+ access: proxy
+ editable: false
+ isDefault: false
+ jsonData:
+ timeInterval: 30s
+ # - name: Loki
+ # type: loki
+ # uid: loki
+ # url: http://scalar-logging-loki:3100/
+ # access: proxy
+ # editable: false
+ # isDefault: false
+
+kubeApiServer:
+ # -- Scraping kube-apiserver is disabled
+ enabled: false
+
+kubeControllerManager:
+ # -- Scraping kube-controller-manager is disabled
+ enabled: false
+
+coreDns:
+ # -- Scraping CoreDNS is disabled
+ enabled: false
+
+kubeEtcd:
+ # -- Scraping etcd is disabled
+ enabled: false
+
+kubeScheduler:
+ # -- Scraping kube-scheduler is disabled
+ enabled: false
+
+kubeProxy:
+ # -- Scraping kube-proxy is disabled
+ enabled: false
+
+kubelet:
+ # -- Scraping kubelet is disabled
+ enabled: false
+
+kubeStateMetrics:
+ # -- kube-state-metrics is disabled
+ enabled: false
+
+nodeExporter:
+ # -- node-exporter is disabled
+ enabled: false
+
+prometheusOperator:
+ # -- Prometheus Operator is enabled
+ enabled: true
+
+ admissionWebhooks:
+ patch:
+ # -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ namespaces:
+ # -- Only check own namespace
+ releaseNamespace: true
+
+ kubeletService:
+ # -- kubelet service for scraping kubelets is disabled
+ enabled: false
+
+ ## -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+prometheus:
+ # -- Prometheus is enabled
+ enabled: true
+
+ prometheusSpec:
+ # -- All PrometheusRules are enabled
+ ruleSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ ruleNamespaceSelector: {}
+
+ # -- All ServiceMonitors are enabled
+ serviceMonitorSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ serviceMonitorNamespaceSelector: {}
+
+ # -- All PodMonitors are enabled
+ podMonitorSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ podMonitorNamespaceSelector: {}
+
+ # -- All Probes are enabled
+ probeSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ probeNamespaceSelector: {}
+
+ ## -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## -- Prometheus StorageSpec for persistent data
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
+ storageSpec: {}
+ ## Using PersistentVolumeClaim
+ ##
+ # volumeClaimTemplate:
+ # spec:
+ # storageClassName: gluster
+ # accessModes: ["ReadWriteOnce"]
+ # resources:
+ # requests:
+ # storage: 50Gi
+ # selector: {}
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-envoy.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-envoy.mdx
new file mode 100644
index 00000000..bfa40a0c
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-envoy.mdx
@@ -0,0 +1,391 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Configure a custom values file for Scalar Envoy
+
+This document explains how to create your custom values file for the Scalar Envoy chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/envoy/README.md) of the Scalar Envoy chart.
+
+## Configure custom values for Scalar Envoy chart
+
+The Scalar Envoy chart is used via other charts (scalardb, scalardb-cluster, scalardl, and scalardl-audit), so you don't need to create a custom values file for the Scalar Envoy chart. If you want to configure Scalar Envoy, you need to add the `envoy.*` configuration to the other charts.
+
+For example, if you want to configure the Scalar Envoy for ScalarDB Server, you can configure some Scalar Envoy configurations in the custom values file of ScalarDB as follows.
+
+* Example (scalardb-custom-values.yaml)
+ ```yaml
+ envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+ scalardb:
+ configurationsForScalarDB:
+ ...
+ ```
+
+## Required configurations
+
+### Service configurations
+
+You must set `envoy.service.type` to specify the Service resource type of Kubernetes.
+
+If you accept client requests from inside of the Kubernetes cluster only (for example, if you deploy your client applications on the same Kubernetes cluster as Scalar products), you can set `envoy.service.type` to `ClusterIP`. This configuration doesn't create any load balancers provided by cloud service providers.
+
+```yaml
+envoy:
+ service:
+ type: ClusterIP
+```
+
+If you want to use a load balancer provided by a cloud service provider to accept client requests from outside of the Kubernetes cluster, you need to set `envoy.service.type` to `LoadBalancer`.
+
+```yaml
+envoy:
+ service:
+ type: LoadBalancer
+```
+
+If you want to configure the load balancer via annotations, you can also set annotations to `envoy.service.annotations`.
+
+```yaml
+envoy:
+ service:
+ type: LoadBalancer
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `envoy.resources`.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+envoy:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `envoy.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+envoy:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-cluster
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - envoy
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus and Grafana configurations (Recommended in production environments)
+
+If you want to monitor Scalar Envoy pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `envoy.grafanaDashboard.enabled`, `envoy.serviceMonitor.enabled`, and `envoy.prometheusRule.enabled`.
+
+```yaml
+envoy:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for Scalar Envoy pods, you can use `envoy.securityContext` and `envoy.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+envoy:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (Default value is recommended)
+
+If you want to change the image repository and version, you can use `envoy.image.repository` to specify the container repository information of the Scalar Envoy container image that you want to pull.
+
+```yaml
+envoy:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+* [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+### TLS configurations (optional based on your environment)
+
+You can enable TLS in:
+
+- Downstream connections between the client and Scalar Envoy.
+- Upstream connections between Scalar Envoy and Scalar products.
+
+In addition, you have several options from the following two perspectives:
+
+1. Management of private key and certificate files
+ 1. Manage your private key and certificate files automatically by using [cert-manager](https://cert-manager.io/docs/).
+ - You can reduce maintenance or operation costs. For example, cert-manager automatically renews certificates before they expire and Scalar Helm Chart automatically mounts private key and certificate files on the Scalar product pods.
+ - You cannot use a CA that cert-manager does not support. You can see the supported issuer in the [cert-manager documentation](https://cert-manager.io/docs/configuration/issuers/).
+ 1. Manage your private key and certificate files manually.
+ - You can issue and manage your private key and certificate files by using your preferred method on your own.
+ - You can use any certificate even if cert-manager does not support it.
+ - You must update secret resources when certificates expire.
+1. Kinds of certificates
+ 1. Use a trusted CA (signed certificate by third party).
+ - You can use trusted certificates from a third-party certificate issuer.
+ - You can encrypt packets.
+ - You must pay costs to issue trusted certificates.
+ 1. Use self-signed certificates.
+ - You can reduce costs to issue certificates.
+ - Reliability of certificates is lower than a trusted CA, but you can encrypt packets.
+
+In other words, you have the following four options:
+
+1. Use a self-signed CA with automatic management.
+1. Use a trusted CA with automatic management.
+1. Use a self-signed CA with manual management.
+1. Use a trusted CA with manual management.
+
+You should consider which method you use based on your security requirements. For guidance and related documentation for each method, refer to the following decision tree:
+
+```mermaid
+flowchart TD
+ A[Do you want to use
cert-manager to manage your
private key and certificate
files automatically?]
+ A -->|Yes, I want to manage my
certificates automatically.| B
+ A -->|No, I want to manage my
certificates manually by myself.| C
+ B[Do you want to use a
self-signed CA or a trusted CA?]
+ C[Do you want to use a
self-signed CA or a trusted CA?]
+ B -->|I want to use a
self-signed CA.| D
+ B -->|I want to use a
trusted CA.| E
+ C -->|I want to use a
self-signed CA.| F
+ C -->|I want to use a
trusted CA.| G
+ D[See the Use a self-signed
CA with cert-manager to
manage your private key and
certificate files section.]
+ E[See the Use a trusted
CA with cert-manager to
manage private key and
certificate files section.]
+ F[See the Use your private
key and certificate files
section, and use the self-signed
certificate you generated.]
+ G[See the Use your private key
and certificate files section,
and use the trusted certificate
generated by the third party.]
+```
+
+#### Enable TLS in downstream connections
+
+You can enable TLS in downstream connections by using the following configurations:
+
+```yaml
+envoy:
+ tls:
+ downstream:
+ enabled: true
+```
+
+##### Use your private key and certificate files
+
+You can set your private key and certificate files by using the following configurations:
+
+```yaml
+envoy:
+ tls:
+ downstream:
+ enabled: true
+ certChainSecret: "envoy-tls-cert"
+ privateKeySecret: "envoy-tls-key"
+```
+
+In this case, you have to create secret resources that include private key and certificate files for Scalar Envoy as follows, replacing the contents in the angle brackets as described:
+
+```console
+kubectl create secret generic envoy-tls-cert --from-file=tls.crt=/ -n
+kubectl create secret generic envoy-tls-key --from-file=tls.key=/ -n
+```
+
+For more details on how to prepare private key and certificate files, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx).
+
+##### Use a trusted CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and certificate files with cert-manager by using the following configurations, replacing the content in the angle brackets as described:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager and prepare the `Issuers` resource. For details, see the cert-manager documentation, [Installation](https://cert-manager.io/docs/installation/) and [Issuer Configuration](https://cert-manager.io/docs/configuration/).
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. The default certificate configuration is recommended, but if you use a custom certificate configuration, you must satisfy the certificate requirements of Scalar products. For details, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+envoy:
+ tls:
+ downstream:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name:
+ dnsNames:
+ - envoy.scalar.example.com
+```
+
+In this case, cert-manager issues your private key and certificate files by using your trusted issuer. By using cert-manager, you don't need to mount your private key and certificate files manually.
+
+##### Use a self-signed CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and self-signed certificate files with cert-manager by using the following configurations:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager. For details, see the cert-manager documentation, [Installation](https://cert-manager.io/docs/installation/).
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. The default certificate configuration is recommended, but if you use a custom certificate configuration, you must satisfy the certificate requirements of Scalar products. For details, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+envoy:
+ tls:
+ downstream:
+ enabled: true
+ certManager:
+ enabled: true
+ selfSigned:
+ enabled: true
+ dnsNames:
+ - envoy.scalar.example.com
+```
+
+In this case, Scalar Helm Charts and cert-manager issue your private key and self-signed certificate files. You don't need to mount your private key and certificate files manually.
+
+#### Enable TLS in upstream connections
+
+You can enable TLS in upstream connections by using the following configurations:
+
+```yaml
+envoy:
+ tls:
+ upstream:
+ enabled: true
+```
+
+Also, you must set root CA certificate file of upstream Scalar products. To determine which approach you should take, refer to the following decision tree:
+
+```mermaid
+flowchart TD
+ A[Are you using cert-manager?]
+ A -->|Yes| B
+ A -->|No| D
+ B[Are you using a self-signed CA with cert-manager?]
+ B -->|No| C[Are you using the same trusted CA for Envoy and
upstream Scalar products with cert-manager?]
+ C -->|No| D[You must set upstream Scalar products'
root CA certificate manually.]
+ C ---->|Yes| E[Scalar Helm Chart automatically sets the root CA certificate. You
don't need to set `envoy.tls.upstream.caRootCertSecret` explicitly.]
+ B ---->|Yes| E
+```
+
+##### Set your root CA certificate file of upstream Scalar products
+
+You can set your root CA certificate file by using the following configurations:
+
+```yaml
+envoy:
+ tls:
+ upstream:
+ enabled: true
+ caRootCertSecret: "envoy-upstream-scalardb-cluster-root-ca"
+```
+
+In this case, you have to create secret resources that include CA certificate files as follows. You must set the root CA certificate file based on the upstream that you use (ScalarDB Cluster, ScalarDL Ledger, or ScalarDL Auditor). Be sure to replace the contents in the angle brackets as described.
+
+
+
+ ```console
+ kubectl create secret generic envoy-upstream-scalardb-cluster-root-ca --from-file=ca.crt=/ -n
+ ```
+
+
+ ```console
+ kubectl create secret generic envoy-upstream-scalardl-ledger-root-ca --from-file=ca.crt=/ -n
+ ```
+
+
+ ```console
+ kubectl create secret generic envoy-upstream-scalardl-auditor-root-ca --from-file=ca.crt=/ -n
+ ```
+
+
+
+For more details on how to prepare private key and certificate files, see [How to create key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx).
+
+##### Set custom authority for TLS communications
+
+You can set the custom authority for TLS communications by using `envoy.tls.upstream.overrideAuthority`. This value doesn't change what host is actually connected. This value is intended for testing but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set by using `scalardbCluster.tls.certChainSecret`, `ledger.tls.certChainSecret`, or `auditor.tls.certChainSecret`, depending on which product you're using. Envoy uses this value for verifying the certificate of the TLS connection with ScalarDB Cluster or ScalarDL.
+
+```yaml
+envoy:
+ tls:
+ upstream:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of Scalar Envoy using `envoy.replicaCount`.
+
+```yaml
+envoy:
+ replicaCount: 3
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `envoy.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+envoy:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-file.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-file.mdx
new file mode 100644
index 00000000..7d1593ca
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-file.mdx
@@ -0,0 +1,20 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Configure a custom values file for Scalar Helm Charts
+
+When you deploy Scalar products using Scalar Helm Charts, you must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to a create custom values file for each product.
+
+* [ScalarDB Cluster](configure-custom-values-scalardb-cluster.mdx)
+* [ScalarDB Analytics with PostgreSQL](configure-custom-values-scalardb-analytics-postgresql.mdx)
+* [ScalarDL Ledger](configure-custom-values-scalardl-ledger.mdx)
+* [ScalarDL Auditor](configure-custom-values-scalardl-auditor.mdx)
+* [ScalarDL Schema Loader](configure-custom-values-scalardl-schema-loader.mdx)
+* [Scalar Admin for Kubernetes](configure-custom-values-scalar-admin-for-kubernetes.mdx)
+* [Scalar Manager](configure-custom-values-scalar-manager.mdx)
+* [Envoy](configure-custom-values-envoy.mdx)
+* [[Deprecated] ScalarDB Server](configure-custom-values-scalardb.mdx)
+* [[Deprecated] ScalarDB GraphQL](configure-custom-values-scalardb-graphql.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.mdx
new file mode 100644
index 00000000..9bf07b18
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.mdx
@@ -0,0 +1,135 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Configure a custom values file for Scalar Admin for Kubernetes
+
+This document explains how to create your custom values file for the Scalar Admin for Kubernetes chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-admin-for-kubernetes/README.md) of the Scalar Admin for Kubernetes chart.
+
+## Required configurations
+
+This section explains the required configurations when setting up a custom values file for Scalar Admin for Kubernetes.
+
+### Flag configurations
+
+You must specify several flags to `scalarAdminForKubernetes.commandArgs` as an array to run Scalar Admin for Kubernetes. For more details on the flags, see [README](https://github.com/scalar-labs/scalar-admin-for-kubernetes/blob/main/README.md) of Scalar Admin for Kubernetes.
+
+```yaml
+scalarAdminForKubernetes:
+ commandArgs:
+ - -r
+ -
+ - -n
+ -
+ - -d
+ -
+ - -z
+ -
+```
+
+## Optional configurations
+
+This section explains the optional configurations when setting up a custom values file for Scalar Admin for Kubernetes.
+
+### CronJob configurations (optional based on your environment)
+
+By default, the Scalar Admin for Kubernetes chart creates a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource to run the Scalar Admin for Kubernetes CLI tool once. If you want to run the Scalar Admin for Kubernetes CLI tool periodically by using [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/), you can set `scalarAdminForKubernetes.jobType` to `cronjob`. Also, you can set some configurations for the CronJob resource.
+
+```yaml
+scalarAdminForKubernetes:
+ cronJob:
+ timeZone: "Etc/UTC"
+ schedule: "0 0 * * *"
+```
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalarAdminForKubernetes.resources`.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalarAdminForKubernetes:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for Scalar Admin for Kubernetes pods, you can use `scalarAdminForKubernetes.securityContext` and `scalarAdminForKubernetes.podSecurityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalarAdminForKubernetes:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (default value is recommended)
+
+If you want to change the image repository, you can use `scalarAdminForKubernetes.image.repository` to specify the container repository information of the Scalar Admin for Kubernetes image that you want to pull.
+
+```yaml
+scalarAdminForKubernetes:
+ image:
+ repository:
+```
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalarAdminForKubernetes.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalarAdminForKubernetes:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-analytics-postgresql
+```
+
+### TLS configurations (optional based on your environment)
+
+You can enable TLS between Scalar Admin for Kubernetes and the pause targets (ScalarDB Cluster or ScalarDL) by using the following configurations:
+
+```yaml
+scalarAdminForKubernetes:
+ commandArgs:
+ - (omit other options)
+ - --tls
+ - --ca-root-cert-path
+ - /tls/certs/ca.crt
+ - --override-authority
+ - cluster.scalardb.example.com
+```
+
+You can mount the `/tls/certs/ca.crt` file on a pod by using a secret resource. To mount the file, specify the name of the secret resource that includes the root CA certificate file to `scalarAdminForKubernetes.tls.caRootCertSecret` as follows:
+
+```yaml
+scalarAdminForKubernetes:
+ tls:
+ caRootCertSecret: "scalar-admin-tls-ca"
+```
+
+In this case, you have to create a secret resource that includes the root CA certificate file for the pause targets (ScalarDB Cluster or ScalarDL) as follows:
+
+```console
+kubectl create secret generic scalar-admin-tls-ca --from-file=ca.crt=/path/to/your/ca/certificate/file -n
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-manager.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-manager.mdx
new file mode 100644
index 00000000..06ea0970
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalar-manager.mdx
@@ -0,0 +1,126 @@
+---
+tags:
+ - Enterprise Option
+---
+
+# Configure a custom values file for Scalar Manager
+
+This document explains how to create your custom values file for the Scalar Manager chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-manager/README.md) of the Scalar Manager chart.
+
+## Required configurations
+
+### Service configurations
+
+You must set `service.type` to specify the Service resource type of Kubernetes. If you want to use a load balancer provided by could providers, you need to set `service.type` to `LoadBalancer`.
+
+```yaml
+service:
+ type: LoadBalancer
+```
+
+#### Security concerns regarding exposing Scalar Manager
+
+Setting `service.type` to `LoadBalancer` exposes Scalar Manager outside the cluster, which may be a security concern.
+
+Currently, Scalar Manager lacks authentication or access-control mechanisms. Because of this, exposing Scalar Manager directly to a public network without proper security measures can lead to unauthorized actions, because Scalar Manager provides features like scheduling jobs to pause Scalar products.
+
+If external access is necessary, using a private network or properly configuring network access to your Kubernetes cluster is recommended.
+
+### Image configurations
+
+You must set `api.image.repository` and `web.image.repository`. Be sure to specify the Scalar Manager container image so that you can pull the image from the container repository.
+
+```yaml
+api:
+ image:
+ repository:
+web:
+ image:
+ repository:
+```
+
+## Optional configurations
+
+This section explains the optional configurations when setting up a custom values file for Scalar Manager.
+
+### Scalar Manager configurations (optional based on your environment)
+
+You can override the `api.applicationProperties` custom value to change the default configurations of Scalar Manager.
+
+```yaml
+api:
+ applicationProperties: |
+ grafana.kubernetesServiceLabelName="app.kubernetes.io/name"
+ grafana.kubernetesServiceLabelValue="grafana"
+ grafana.kubernetesServicePortName="http-web"
+```
+
+Scalar Manager has default configurations that, for example, discover Scalar product deployments and the Prometheus and Loki services in the cluster. In most use cases, especially if you follow the [guide to deploy `kube-prometheus-stack`](getting-started-monitoring.mdx) and the [guide to deploy `loki-stack`](getting-started-logging.mdx), you don't need to change the default configurations.
+
+#### Properties that you can set in `api.applicationProperties`
+
+The configurations for Scalar Manager are in the format of Java application properties, which are `key=value` pairs. These application properties can be set by using the `api.applicationProperties` custom value in the Scalar Manager Helm Chart.
+
+|Name|Description|Default value|
+|---|---|---|
+|`grafana.kubernetesServiceLabelName`|The label name used to discover the Grafana service in Kubernetes|`app.kubernetes.io/name`|
+|`grafana.kubernetesServiceLabelValue`|The label value corresponding to `grafana.kubernetesServiceLabelName`|`grafana`|
+|`grafana.kubernetesServicePortName`|The port name used to discover the Grafana service port in Kubernetes|`http-web`|
+|`prometheus.kubernetesServiceLabelName`|The label name used to discover the Prometheus service in Kubernetes|`app`|
+|`prometheus.kubernetesServiceLabelValue`|The label value corresponding to `prometheus.kubernetesServiceLabelName`|`kube-prometheus-stack-prometheus`|
+|`prometheus.kubernetesServicePortName`|The port name used to discover the Prometheus service port in Kubernetes|`http-web`|
+|`loki.kubernetesServiceLabelName`|The label name used to discover the Loki service in Kubernetes|`app`|
+|`loki.kubernetesServiceLabelValue`|The label value corresponding to `loki.kubernetesServiceLabelName`|`loki`|
+|`loki.kubernetesServicePortName`|The port name used to discover the Loki service port in Kubernetes|`http-metrics`|
+|`helm.scalarRepositoryName`|The name used to represent the Scalar Helm repository|`scalar-labs`|
+|`helm.scalarRepositoryUrl`|The URL of the Scalar Helm repository|`https://scalar-labs.github.io/helm-charts`|
+|`helm.scalarAdminForKubernetesChartName`|The Helm Chart name of `Scalar Admin for Kubernetes` in the Scalar Helm repository|`scalar-admin-for-kubernetes`|
+|`helm.scalarAdminForKubernetesChartVersion`|The version of the Scalar Admin for Kubernetes Helm Chart|`1.0.0`|
+|`configMapNamespace`|The namespace where the ConfigMap used by Scalar Manager is deployed|`default`|
+|`configMapName`|The name of the ConfigMap used by Scalar Manager|`scalar-manager-metadata`|
+|`paused-state-retention.storage`|The storage type used to retain paused states|`configmap`|
+|`paused-state-retention.max-number`|The max number that the paused states are retained by Scalar Manager|`100`|
+
+##### Service discovery
+
+Scalar Manager uses label selectors to discover Grafana, Prometheus, and Loki services in Kubernetes, and then uses the port name to connect to them. You can modify the label selectors and the port name by setting the `*.kubernetesServiceLabelName`, `*.kubernetesServiceLabelValue`, and `*.kubernetesServicePortName` configurations.
+
+In general, you don't need to modify these configurations. However, if you customized the labels or port names of the Grafana, Prometheus, or Loki services when installing their Helm Charts, you should adjust these configurations to match your customizations.
+
+##### Use of Scalar Admin for Kubernetes
+
+Scalar Manager installs the [Scalar Admin for Kubernetes](https://github.com/scalar-labs/scalar-admin-for-kubernetes) Helm Chart to schedule or execute the pausing of jobs on Scalar products. It sets up the Scalar Helm repository and locates the Helm Chart by using the following configurations:
+
+- `helm.scalarRepositoryName`
+- `helm.scalarRepositoryUrl`
+- `helm.scalarAdminForKubernetesChartName`
+- `helm.scalarAdminForKubernetesChartVersion`
+
+:::note
+
+Modifying these configurations isn't recommended unless you have a specific reason to do so.
+
+:::
+
+##### ConfigMap used by Scalar Manager
+
+Scalar Manager uses ConfigMap to store metadata, such as the default application properties. ConfigMap is deployed in the namespace specified by the following configurations:
+
+- `configMapNamespace`
+- `configMapName`
+
+:::note
+
+Modifying these configurations isn't recommended unless you have a specific reason to do so, like if you're using a conflicting ConfigMap.
+
+:::
+
+##### Retention of paused states
+
+Scalar Manager retains the paused states of Scalar products in storage. Currently, Scalar Manager can only use ConfigMap as the storage, so the `paused-state-retention.storage` configuration must be set to `configmap`. You can adjust the maximum number of paused states that Scalar Manager retains by setting the `paused-state-retention.max-number` configuration.
+
+:::warning
+
+When using ConfigMap for storage, it is not recommended to set a large value for `paused-state-retention.max-number` due to the size limitation of ConfigMap. Data stored in a ConfigMap cannot exceed 1 MiB, which means that the maximum number of paused states that Scalar Manager can retain is approximately 3,000.
+
+:::
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-analytics-postgresql.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-analytics-postgresql.mdx
new file mode 100644
index 00000000..6d71b780
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-analytics-postgresql.mdx
@@ -0,0 +1,188 @@
+---
+tags:
+ - Community
+---
+
+# Configure a custom values file for ScalarDB Analytics with PostgreSQL
+
+This document explains how to create your custom values file for the ScalarDB Analytics with PostgreSQL chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-analytics-postgresql/README.md) of the ScalarDB Analytics with PostgreSQL chart.
+
+## Required configurations
+
+This section explains the required configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL.
+
+### Database configurations
+
+To access databases via ScalarDB Analytics with PostgreSQL, you must set the `scalardbAnalyticsPostgreSQL.databaseProperties` parameter by following the same syntax that you use to configure the `database.properties` file. For details about configurations, see [ScalarDB Configurations](https://scalardb.scalar-labs.com/docs/latest/configurations/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ databaseProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=${env:SCALAR_DB_USERNAME:-}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD:-}
+ scalar.db.storage=cassandra
+```
+
+### Database namespaces configurations
+
+You must set `schemaImporter.namespaces` to all the database namespaces that include tables you want to read via ScalarDB Analytics with PostgreSQL.
+
+```yaml
+schemaImporter:
+ namespaces:
+ - namespace1
+ - namespace2
+ - namespace3
+```
+
+## Optional configurations
+
+This section explains the optional configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL.
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.resources`.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (recommended in production environments)
+
+To use environment variables to set some properties, like credentials, in `scalardbAnalyticsPostgreSQL.databaseProperties`, you can use `scalardbAnalyticsPostgreSQL.secretName` to specify the secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure.
+
+For more details on how to use a secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ secretName: "scalardb-analytics-postgresql-credentials-secret"
+```
+
+### Affinity configurations (recommended in production environments)
+
+To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.affinity`.
+
+You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-analytics-postgresql
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-analytics-postgresql
+ topologyKey: kubernetes.io/hostname
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for ScalarDB Analytics with PostgreSQL pods, you can use `scalardbAnalyticsPostgreSQL.securityContext`, `scalardbAnalyticsPostgreSQL.podSecurityContext`, and `schemaImporter.securityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ podSecurityContext:
+ fsGroup: 201
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ runAsUser: 999
+ allowPrivilegeEscalation: false
+
+schemaImporter:
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (default value is recommended)
+
+If you want to change the image repository, you can use `scalardbAnalyticsPostgreSQL.image.repository` and `schemaImporter.image.repository` to specify the container repository information of the ScalarDB Analytics with PostgreSQL and Schema Importer images that you want to pull.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ image:
+ repository:
+
+schemaImporter:
+ image:
+ repository:
+```
+
+### Replica configurations (optional based on your environment)
+
+You can specify the number of ScalarDB Analytics with PostgreSQL replicas (pods) by using `scalardbAnalyticsPostgreSQL.replicaCount`.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ replicaCount: 3
+```
+
+### PostgreSQL database name configuration (optional based on your environment)
+
+You can specify the database name that you create in PostgreSQL. Schema Importer creates some objects, such as a view of ScalarDB Analytics with PostgreSQL, in this database.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ postgresql:
+ databaseName: scalardb
+```
+
+### PostgreSQL superuser password configuration (optional based on your environment)
+
+You can specify the secret name that includes the superuser password for PostgreSQL.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ postgresql:
+ secretName: scalardb-analytics-postgresql-superuser-password
+```
+
+:::note
+
+You must create a secret resource with this name (`scalardb-analytics-postgresql-superuser-password` by default) before you deploy ScalarDB Analytics with PostgreSQL. For details, see [Prepare a secret resource](how-to-deploy-scalardb-analytics-postgresql.mdx#prepare-a-secret-resource).
+
+:::
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-analytics-postgresql
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-cluster.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-cluster.mdx
new file mode 100644
index 00000000..4a74f59c
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-cluster.mdx
@@ -0,0 +1,375 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Configure a custom values file for ScalarDB Cluster
+
+This document explains how to create your custom values file for the ScalarDB Cluster chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-cluster/README.md) of the ScalarDB Cluster chart.
+
+## Required configurations
+
+### Image configurations
+
+You must set `scalardbCluster.image.repository`. Be sure to specify the ScalarDB Cluster container image so that you can pull the image from the container repository.
+
+```yaml
+scalardbCluster:
+ image:
+ repository:
+```
+
+### Database configurations
+
+You must set `scalardbCluster.scalardbClusterNodeProperties`. Please set `scalardb-cluster-node.properties` to this parameter. For more details on the configurations of ScalarDB Cluster, see [ScalarDB Cluster Configurations](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/scalardb-cluster-configurations/).
+
+```yaml
+scalardbCluster:
+ scalardbClusterNodeProperties: |
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+ scalar.db.contact_points=localhost
+ scalar.db.username=${env:SCALAR_DB_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD}
+ scalar.db.storage=cassandra
+```
+
+Note that you must always set the following three properties if you deploy ScalarDB Cluster in a Kubernetes environment by using Scalar Helm Chart. These properties are fixed values. Since the properties don't depend on individual environments, you can set the same values by copying the following values and pasting them in `scalardbCluster.scalardbClusterNodeProperties`.
+
+```yaml
+scalardbCluster:
+ scalardbClusterNodeProperties: |
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+```
+
+## Optional configurations
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalardbCluster.resources`.
+
+Note that, for commercial licenses, the resources for each pod of Scalar products are limited to 2vCPU / 4GB memory. Also, if you use the pay-as-you-go containers that the AWS Marketplace provides, you will not be able to run any containers that exceed the 2vCPU / 4GB memory configuration in `resources.limits`. If you exceed this resource limitation, the pods will automatically stop.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalardbCluster:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (recommended in production environments)
+
+To use environment variables to set some properties (e.g., credentials) in `scalardbCluster.scalardbClusterNodeProperties`, you can use `scalardbCluster.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure.
+
+For more details on how to use a Secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx).
+
+```yaml
+scalardbCluster:
+ secretName: "scalardb-cluster-credentials-secret"
+```
+
+### Affinity configurations (recommended in production environments)
+
+To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbCluster.affinity`.
+
+You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+```yaml
+scalardbCluster:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-cluster
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-cluster
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus and Grafana configurations (recommended in production environments)
+
+To monitor ScalarDB Cluster pods by using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can set `scalardbCluster.grafanaDashboard.enabled`, `scalardbCluster.serviceMonitor.enabled`, and `scalardbCluster.prometheusRule.enabled` to `true`. When you set these configurations to `true`, the chart deploys the necessary resources and kube-prometheus-stack starts monitoring automatically.
+
+```yaml
+scalardbCluster:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for ScalarDB Cluster pods, you can use `scalardbCluster.securityContext` and `scalardbCluster.podSecurityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalardbCluster:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### TLS configurations (optional based on your environment)
+
+You can enable TLS in:
+
+- The communications between the ScalarDB Cluster node and clients.
+- The communications between all ScalarDB Cluster nodes (the cluster's internal communications).
+
+In addition, you have several options for certificate management. For more details, see [TLS configurations for Envoy](./configure-custom-values-envoy.mdx#tls-configurations-optional-based-on-your-environment).
+
+You should consider which method you use based on your security requirements. For guidance and related documentation for each method, refer to the following decision tree:
+
+```mermaid
+flowchart TD
+ A[Do you want to use
cert-manager to manage your
private key and certificate
files automatically?]
+ A -->|Yes, I want to manage my
certificates automatically.| B
+ A -->|No, I want to manage my
certificates manually by myself.| C
+ B[Do you want to use a
self-signed CA or a trusted CA?]
+ C[Do you want to use a
self-signed CA or a trusted CA?]
+ B -->|I want to use a
self-signed CA.| D
+ B -->|I want to use a
trusted CA.| E
+ C -->|I want to use a
self-signed CA.| F
+ C -->|I want to use a
trusted CA.| G
+ D[See the Use a self-signed
CA with cert-manager to
manage your private key and
certificate files section.]
+ E[See the Use a trusted
CA with cert-manager to
manage private key and
certificate files section.]
+ F[See the Use your private
key and certificate files
section, and use the self-signed
certificate you generated.]
+ G[See the Use your private key
and certificate files section,
and use the trusted certificate
generated by the third party.]
+```
+
+#### Enable TLS
+
+You can enable TLS in all ScalarDB Cluster connections by using the following configurations:
+
+```yaml
+scalardbCluster:
+ scalardbClusterNodeProperties: |
+ ...(omit)...
+ scalar.db.cluster.tls.enabled=true
+ scalar.db.cluster.tls.ca_root_cert_path=/tls/scalardb-cluster/certs/ca.crt
+ scalar.db.cluster.node.tls.cert_chain_path=/tls/scalardb-cluster/certs/tls.crt
+ scalar.db.cluster.node.tls.private_key_path=/tls/scalardb-cluster/certs/tls.key
+ scalar.db.cluster.tls.override_authority=
+ tls:
+ enabled: true
+```
+
+##### Use your private key and certificate files
+
+You can set your private key and certificate files by using the following configurations:
+
+```yaml
+scalardbCluster:
+ tls:
+ enabled: true
+ caRootCertSecret: "scalardb-cluster-tls-ca"
+ certChainSecret: "scalardb-cluster-tls-cert"
+ privateKeySecret: "scalardb-cluster-tls-key"
+```
+
+In this case, you have to create secret resources that include private key and certificate files for ScalarDB Cluster as follows, replacing the contents in the angle brackets as described:
+
+```console
+kubectl create secret generic scalardb-cluster-tls-ca --from-file=ca.crt=/ -n
+kubectl create secret generic scalardb-cluster-tls-cert --from-file=tls.crt=/ -n
+kubectl create secret generic scalardb-cluster-tls-key --from-file=tls.key=/ -n
+```
+
+For more details on how to prepare private key and certificate files, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx).
+
+##### Use a trusted CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and certificate files with cert-manager by using the following configurations, replacing the content in the angle brackets as described:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager and prepare the `Issuers` resource. For details, see the cert-manager documentation, [Installation](https://cert-manager.io/docs/installation/) and [Issuer Configuration](https://cert-manager.io/docs/configuration/).
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. The default certificate configuration is recommended, but if you use a custom certificate configuration, you must satisfy the certificate requirements of Scalar products. For details, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+scalardbCluster:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name:
+ dnsNames:
+ - cluster.scalardb.example.com
+```
+
+In this case, cert-manager issues your private key and certificate files by using your trusted issuer. You don't need to mount your private key and certificate files manually.
+
+##### Use a self-signed CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and self-signed certificate files with cert-manager by using the following configurations:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager. For more details on how to deploy cert-manager, see the [Installation](https://cert-manager.io/docs/installation/) in the cert-manager official document.
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. We recommend the default certificate configuration, but if you custom certificate configuration, you must satisfy the certificate requirements of Scalar products. See [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+scalardbCluster:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ selfSigned:
+ enabled: true
+ dnsNames:
+ - cluster.scalardb.example.com
+```
+
+In this case, Scalar Helm Charts and cert-manager issue your private key and self-signed certificate files. You don't need to mount your private key and certificate files manually.
+
+##### Set custom authority for TLS communications
+
+You can set the custom authority for TLS communications by using `scalardbCluster.tls.overrideAuthority`. This value doesn't change what host is actually connected. This value is intended for testing but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set by using `scalardbCluster.tls.certChainSecret`. This chart uses this value for `startupProbe` and `livenessProbe`.
+
+```yaml
+scalardbCluster:
+ tls:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+```
+
+##### Set a root CA certificate for Prometheus Operator
+
+If you set `scalardbCluster.serviceMonitor.enabled=true` and `scalardbCluster.tls.enabled=true` (in other words, if you monitor ScalarDB Cluster with TLS configuration by using Prometheus Operator), you must set the secret name to `scalardbCluster.tls.caRootCertSecretForServiceMonitor`.
+
+```yaml
+scalardbCluster:
+ tls:
+ enabled: true
+ caRootCertSecretForServiceMonitor: "scalardb-cluster-tls-ca-for-prometheus"
+```
+
+In this case, you have to create secret resources that include a root CA certificate for ScalarDB Cluster in the same namespace as Prometheus as follows:
+
+```console
+kubectl create secret generic scalardb-cluster-tls-ca-for-prometheus --from-file=ca.crt=/path/to/your/ca/certificate/file -n
+```
+
+### Replica configurations (optional based on your environment)
+
+You can specify the number of ScalarDB Cluster replicas (pods) by using `scalardbCluster.replicaCount`.
+
+```yaml
+scalardbCluster:
+ replicaCount: 3
+```
+
+### Logging configurations (optional based on your environment)
+
+To change the ScalarDB Cluster log level, you can use `scalardbCluster.logLevel`.
+
+```yaml
+scalardbCluster:
+ logLevel: INFO
+```
+
+### GraphQL configurations (optional based on your environment)
+
+To use the GraphQL feature in ScalarDB Cluster, you can set `scalardbCluster.graphql.enabled` to `true` to deploy some resources for the GraphQL feature. Note that you also need to set `scalar.db.graphql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties` when using the GraphQL feature.
+
+```yaml
+scalardbCluster:
+ graphql:
+ enabled: true
+```
+
+Also, you can configure the `Service` resource that accepts GraphQL requests from clients.
+
+```yaml
+scalardbCluster:
+ graphql:
+ service:
+ type: ClusterIP
+ annotations: {}
+ ports:
+ graphql:
+ port: 8080
+ targetPort: 8080
+ protocol: TCP
+```
+
+### SQL configurations (optional based on your environment)
+
+To use the SQL feature in ScalarDB Cluster, there is no configuration necessary for custom values files. You can use the feature by setting `scalar.db.sql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties`.
+
+### Scalar Envoy configurations (optional based on your environment)
+
+To use ScalarDB Cluster with `indirect` mode, you must enable Envoy as follows.
+
+```yaml
+envoy:
+ enabled: true
+```
+
+Also, you must set the Scalar Envoy configurations in the custom values file for ScalarDB Cluster. This is because clients need to send requests to ScalarDB Cluster via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Cluster in a Kubernetes environment with `indirect` mode.
+
+For more details on Scalar Envoy configurations, see [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.mdx).
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+scalardbCluster:
+ configurationsForScalarDbCluster:
+ ...
+```
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardbCluster.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardbCluster:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-cluster
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-graphql.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-graphql.mdx
new file mode 100644
index 00000000..34b4f26a
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb-graphql.mdx
@@ -0,0 +1,223 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# [Deprecated] Configure a custom values file for ScalarDB GraphQL
+
+:::note
+
+ScalarDB GraphQL Server is now deprecated. Please use [ScalarDB Cluster](configure-custom-values-scalardb-cluster.mdx) instead.
+
+:::
+
+This document explains how to create your custom values file for the ScalarDB GraphQL chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-graphql/README.md) of the ScalarDB GraphQL chart.
+
+## Required configurations
+
+### Ingress configuration
+
+You must set `ingress` to listen the client requests. When you deploy multiple GraphQL servers, session affinity is required to handle transactions properly. This is because GraphQL servers keep the transactions in memory, so GraphQL queries that use continued transactions must be routed to the same server that started the transaction.
+
+For example, if you use NGINX Ingress Controller, you can set ingress configurations as follows.
+
+```yaml
+ingress:
+ enabled: true
+ className: nginx
+ annotations:
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: INGRESSCOOKIE
+ nginx.ingress.kubernetes.io/session-cookie-hash: sha1
+ nginx.ingress.kubernetes.io/session-cookie-max-age: "300"
+ hosts:
+ - host: ""
+ paths:
+ - path: /graphql
+ pathType: Exact
+```
+
+If you use ALB of AWS, you can set ingress configurations as follows.
+
+```yaml
+ingress:
+ enabled: true
+ className: alb
+ annotations:
+ alb.ingress.kubernetes.io/scheme: internal
+ alb.ingress.kubernetes.io/target-group-attributes: stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=60
+ alb.ingress.kubernetes.io/target-type: ip
+ alb.ingress.kubernetes.io/healthcheck-path: /graphql?query=%7B__typename%7D
+ hosts:
+ - host: ""
+ paths:
+ - path: /graphql
+ pathType: Exact
+```
+
+### Image configurations
+
+You must set `image.repository`. Be sure to specify the ScalarDB GraphQL container image so that you can pull the image from the container repository.
+
+```yaml
+image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+* [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+### Database configurations
+
+You must set `scalarDbGraphQlConfiguration`.
+
+If you use ScalarDB Server with ScalarDB GraphQL (recommended), you must set the configuration to access the ScalarDB Server pods.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ contactPoints:
+ contactPort: 60051
+ storage: "grpc"
+ transactionManager: "grpc"
+ namespaces:
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-graphql
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDB GraphQL pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `grafanaDashboard.enabled`, `serviceMonitor.enabled`, and `prometheusRule.enabled`.
+
+```yaml
+grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDB GraphQL pods, you can use `securityContext` and `podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+
+securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### GraphQL Server configurations (Optional based on your environment)
+
+If you want to change the path to run the graphql queries, you can use `scalarDbGraphQlConfiguration.path`. By default, you can run the graphql queries using `http://:80/graphql`.
+
+You can also enable/disable [GraphiQL](https://github.com/graphql/graphiql/tree/main/packages/graphiql) using `scalarDbGraphQlConfiguration.graphiql`.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ path: /graphql
+ graphiql: "true"
+```
+
+### TLS configurations (Optional based on your environment)
+
+If you want to use TLS between the client and the ingress, you can use `ingress.tls`.
+
+You must create a Secret resource that includes a secret key and a certificate file. Please refer to the official document [Ingress - TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) for more details on the Secret resource for Ingress.
+
+```yaml
+ingress:
+ tls:
+ - hosts:
+ - foo.example.com
+ - bar.example.com
+ - bax.example.com
+ secretName: graphql-ingress-tls
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDB GraphQL using `replicaCount`.
+
+```yaml
+replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDB GraphQL, you can use `scalarDbGraphQlConfiguration.logLevel`.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ logLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb.mdx
new file mode 100644
index 00000000..36ab527a
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardb.mdx
@@ -0,0 +1,201 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+ - Deprecated
+---
+
+# [Deprecated] Configure a custom values file for ScalarDB Server
+
+:::note
+
+ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](configure-custom-values-scalardb-cluster.mdx) instead.
+
+:::
+
+This document explains how to create your custom values file for the ScalarDB Server chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb/README.md) of the ScalarDB Server chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDB Server. This is because client requests are sent to ScalarDB Server via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Server on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.mdx) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+scalardb:
+ configurationsForScalarDB:
+ ...
+```
+
+### Image configurations
+
+You must set `scalardb.image.repository`. Be sure to specify the ScalarDB Server container image so that you can pull the image from the container repository.
+
+```yaml
+scalardb:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+* [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+### Database configurations
+
+You must set `scalardb.databaseProperties`. Please set your `database.properties` to this parameter. Please refer to the [Configure ScalarDB Server](https://scalardb.scalar-labs.com/docs/latest/scalardb-server#configure-scalardb-server) for more details on the configuration of ScalarDB Server.
+
+```yaml
+scalardb:
+ databaseProperties: |
+ scalar.db.server.port=60051
+ scalar.db.server.prometheus_exporter_port=8080
+ scalar.db.server.grpc.max_inbound_message_size=
+ scalar.db.server.grpc.max_inbound_metadata_size=
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.db.transaction_manager=consensus-commit
+ scalar.db.consensus_commit.isolation_level=SNAPSHOT
+ scalar.db.consensus_commit.serializable_strategy=
+ scalar.db.consensus_commit.include_metadata.enabled=false
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `scalardb.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+scalardb:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `scalardb.databaseProperties`, you can use `scalardb.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx) for more details on how to use a Secret resource.
+
+```yaml
+scalardb:
+ secretName: "scalardb-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `scalardb.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+scalardb:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDB Server pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `scalardb.grafanaDashboard.enabled`, `scalardb.serviceMonitor.enabled`, and `scalardb.prometheusRule.enabled`.
+
+```yaml
+scalardb:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDB Server pods, you can use `scalardb.securityContext` and `scalardb.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+scalardb:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDB Server using `scalardb.replicaCount`.
+
+```yaml
+scalardb:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDB Server, you can use `scalardb.storageConfiguration.dbLogLevel`.
+
+```yaml
+scalardb:
+ storageConfiguration:
+ dbLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardb.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardb:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-auditor.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-auditor.mdx
new file mode 100644
index 00000000..86b77c2c
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-auditor.mdx
@@ -0,0 +1,362 @@
+---
+---
+
+# Configure a custom values file for ScalarDL Auditor
+
+This document explains how to create your custom values file for the ScalarDL Auditor chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl-audit/README.md) of the ScalarDL Auditor chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDL Auditor. This is because client requests are sent to ScalarDL Auditor via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Auditor on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.mdx) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+auditor:
+ configurationsForScalarDLAuditor:
+ ...
+```
+
+### Image configurations
+
+You must set `auditor.image.repository`. Be sure to specify the ScalarDL Auditor container image so that you can pull the image from the container repository.
+
+```yaml
+auditor:
+ image:
+ repository:
+```
+
+For more details on the container repository for Scalar products, see [How to get the container images of Scalar products](../scalar-kubernetes/HowToGetContainerImages.mdx).
+
+### Auditor/Database configurations
+
+You must set `auditor.auditorProperties`. Please set your `auditor.properties` to this parameter. Please refer to the [auditor.properties](https://github.com/scalar-labs/scalar/blob/master/auditor/conf/auditor.properties) for more details on the configuration of ScalarDL Auditor.
+
+```yaml
+auditor:
+ auditorProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.dl.auditor.ledger.host=
+ scalar.dl.auditor.private_key_path=/keys/auditor-key-file
+ scalar.dl.auditor.cert_path=/keys/auditor-cert-file
+```
+
+### Key/Certificate configurations
+
+You must set a private key file to `scalar.dl.auditor.private_key_path` and a certificate file to `scalar.dl.auditor.cert_path`.
+
+You must also mount the private key file and the certificate file on the ScalarDL Auditor pod.
+
+For more details on how to mount the private key file and the certificate file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](mount-files-or-volumes-on-scalar-pods.mdx#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts).
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `auditor.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+auditor:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `auditor.auditorProperties`, you can use `auditor.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx) for more details on how to use a Secret resource.
+
+```yaml
+auditor:
+ secretName: "auditor-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `auditor.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+auditor:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardl-audit
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - auditor
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDL Auditor pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `auditor.grafanaDashboard.enabled`, `auditor.serviceMonitor.enabled`, and `auditor.prometheusRule.enabled`.
+
+```yaml
+auditor:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDL Auditor pods, you can use `auditor.securityContext` and `auditor.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+auditor:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### TLS configurations (optional based on your environment)
+
+You can enable TLS in:
+
+- The communications between the ScalarDL Auditor and clients.
+- The communications between the ScalarDL Ledger and ScalarDL Auditor.
+
+In addition, you have several options for certificate management. For more details, see [TLS configurations for Envoy](./configure-custom-values-envoy.mdx#tls-configurations-optional-based-on-your-environment).
+
+You should consider which method you use based on your security requirements. For guidance and related documentation for each method, refer to the following decision tree:
+
+```mermaid
+flowchart TD
+ A[Do you want to use
cert-manager to manage your
private key and certificate
files automatically?]
+ A -->|Yes, I want to manage my
certificates automatically.| B
+ A -->|No, I want to manage my
certificates manually by myself.| C
+ B[Do you want to use a
self-signed CA or a trusted CA?]
+ C[Do you want to use a
self-signed CA or a trusted CA?]
+ B -->|I want to use a
self-signed CA.| D
+ B -->|I want to use a
trusted CA.| E
+ C -->|I want to use a
self-signed CA.| F
+ C -->|I want to use a
trusted CA.| G
+ D[See the Use a self-signed
CA with cert-manager to
manage your private key and
certificate files section.]
+ E[See the Use a trusted
CA with cert-manager to
manage private key and
certificate files section.]
+ F[See the Use your private
key and certificate files
section, and use the self-signed
certificate you generated.]
+ G[See the Use your private key
and certificate files section,
and use the trusted certificate
generated by the third party.]
+```
+
+#### Enable TLS
+
+You can enable TLS in all ScalarDL Auditor connections by using the following configurations:
+
+```yaml
+auditor:
+ auditorProperties: |
+ ...(omit)...
+ scalar.dl.auditor.server.tls.enabled=true
+ scalar.dl.auditor.server.tls.cert_chain_path=/tls/scalardl-auditor/certs/tls.crt
+ scalar.dl.auditor.server.tls.private_key_path=/tls/scalardl-auditor/certs/tls.key
+ scalar.dl.auditor.tls.enabled=true
+ scalar.dl.auditor.tls.ca_root_cert_path=/tls/scalardl-ledger/certs/ca.crt
+ scalar.dl.auditor.tls.override_authority=envoy.scalar.example.com
+ tls:
+ enabled: true
+```
+
+##### Use your private key and certificate files
+
+You can set your private key and certificate files by using the following configurations:
+
+```yaml
+auditor:
+ tls:
+ enabled: true
+ caRootCertSecret: "scalardl-auditor-tls-ca"
+ certChainSecret: "scalardl-auditor-tls-cert"
+ privateKeySecret: "scalardl-auditor-tls-key"
+```
+
+In this case, you have to create secret resources that include private key and certificate files for ScalarDL Ledger and ScalarDL Auditor as follows, replacing the contents in the angle brackets as described:
+
+```console
+kubectl create secret generic scalardl-auditor-tls-ca --from-file=ca.crt=/ -n
+kubectl create secret generic scalardl-auditor-tls-cert --from-file=tls.crt=/ -n
+kubectl create secret generic scalardl-auditor-tls-key --from-file=tls.key=/ -n
+kubectl create secret generic scalardl-auditor-tls-ca-for-ledger --from-file=ca.crt=/ -n
+```
+
+For more details on how to prepare private key and certificate files, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx).
+
+##### Use a trusted CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and certificate files with cert-manager by using the following configurations, replacing the content in the angle brackets as described:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager and prepare the `Issuers` resource. For details, see the cert-manager documentation, [Installation](https://cert-manager.io/docs/installation/) and [Issuer Configuration](https://cert-manager.io/docs/configuration/).
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. The default certificate configuration is recommended, but if you use a custom certificate configuration, you must satisfy the certificate requirements of Scalar products. For details, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+auditor:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name:
+ dnsNames:
+ - auditor.scalardl.example.com
+```
+
+In this case, cert-manager issues your private key and certificate files by using your trusted issuer. You don't need to mount private key and certificate files manually.
+
+##### Use a self-signed CA with cert-manager to manage your private key and certificate files
+
+You can manage your private key and self-signed certificate files with cert-manager by using the following configurations:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager. For details, see the cert-manager documentation, [Installation](https://cert-manager.io/docs/installation/).
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. The default certificate configuration is recommended, but if you use a custom certificate configuration, you must satisfy the certificate requirements of Scalar products. For details, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFilesx#certificate-requirements).
+
+:::
+
+```yaml
+auditor:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ selfSigned:
+ enabled: true
+ dnsNames:
+ - auditor.scalardl.example.com
+```
+
+In this case, Scalar Helm Charts and cert-manager issue your private key and self-signed certificate files. You don't need to mount private key and certificate files manually.
+
+#### Set a root CA certificate for ScalarDL Ledger
+
+If you enable TLS on the ScalarDL Ledger side, you must set a root CA certificate file for Envoy in front of ScalarDL Ledger to access it from ScalarDL Auditor. To determine which approach you should take, refer to the following decision tree:
+
+```mermaid
+flowchart TD
+ A[Are you using cert-manager?]
+ A -->|Yes| B
+ A -->|No| D
+ B[Are you using a self-signed CA with cert-manager?]
+ B -->|No| C[Are you using the same trusted CA for ScalarDL
Ledger and ScalarDL Auditor with cert-manager?]
+ C -->|No| D[You must set the root
CA certificate of Envoy for ScalarDL Ledger manually.]
+ C ---->|Yes| E[Scalar Helm Chart automatically sets the root CA certificate. You
don't need to set `auditor.tls.upstream.caRootCertSecret` explicitly.]
+```
+
+If you need to set the root CA certificate file of Envoy manually, you can set it by using the following configurations:
+
+```yaml
+auditor:
+ tls:
+ enabled: true
+ caRootCertForLedgerSecret: "scalardl-auditor-tls-ca-for-ledger"
+```
+
+In this case, you have to create secret resources that include root CA certificate files as follows, replacing the contents in the angle brackets as described:
+
+```console
+kubectl create secret generic scalardl-auditor-tls-ca-for-ledger --from-file=ca.crt=//scalardl-ledger -n
+```
+
+##### Set custom authority for TLS communications
+
+You can set the custom authority for TLS communications by using `auditor.tls.overrideAuthority`. This value doesn't change what host is actually connected. This value is intended for testing but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set by using `auditor.tls.certChainSecret`. This chart uses this value for `startupProbe` and `livenessProbe`.
+
+##### Set a root CA certificate for Prometheus Operator
+
+If you set `auditor.serviceMonitor.enabled=true` and `auditor.tls.enabled=true` (in other words, if you monitor ScalarDL Auditor with TLS configuration by using Prometheus Operator), you must set the secret name to `auditor.tls.caRootCertSecretForServiceMonitor`.
+
+```yaml
+auditor:
+ tls:
+ enabled: true
+ caRootCertSecretForServiceMonitor: "scalardl-auditor-tls-ca-for-prometheus"
+```
+
+In this case, you have to create secret resources that include a root CA certificate for ScalarDL Auditor in the same namespace as Prometheus as follows:
+
+```console
+kubectl create secret generic scalardl-auditor-tls-ca-for-prometheus --from-file=ca.crt=/path/to/your/ca/certificate/file -n
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDL Auditor using `auditor.replicaCount`.
+
+```yaml
+auditor:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDL Auditor, you can use `auditor.scalarAuditorConfiguration.auditorLogLevel`.
+
+```yaml
+auditor:
+ scalarAuditorConfiguration:
+ auditorLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `auditor.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+auditor:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardl-auditor
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-ledger.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-ledger.mdx
new file mode 100644
index 00000000..bf328028
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-ledger.mdx
@@ -0,0 +1,335 @@
+---
+---
+
+# Configure a custom values file for ScalarDL Ledger
+
+This document explains how to create your custom values file for the ScalarDL Ledger chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl/README.md) of the ScalarDL Ledger chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDL Ledger. This is because client requests are sent to ScalarDL Ledger via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Ledger on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.mdx) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+ledger:
+ configurationsForScalarDLLedger:
+ ...
+```
+
+### Image configurations
+
+You must set `ledger.image.repository`. Be sure to specify the ScalarDL Ledger container image so that you can pull the image from the container repository.
+
+```yaml
+ledger:
+ image:
+ repository:
+```
+
+For more details on the container repository for Scalar products, see [How to get the container images of Scalar products](../scalar-kubernetes/HowToGetContainerImages.mdx).
+
+### Ledger/Database configurations
+
+You must set `ledger.ledgerProperties`. Please set your `ledger.properties` to this parameter. Please refer to the [ledger.properties](https://github.com/scalar-labs/scalar/blob/master/ledger/conf/ledger.properties) for more details on the configuration of ScalarDL Ledger.
+
+```yaml
+ledger:
+ ledgerProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/ledger-key-file
+```
+
+### Key/Certificate configurations
+
+If you set `scalar.dl.ledger.proof.enabled` to `true` (this configuration is required if you use ScalarDL Auditor), you must set a private key file to `scalar.dl.ledger.proof.private_key_path`.
+
+In this case, you must mount the private key file on the ScalarDL Ledger pod.
+
+For more details on how to mount the private key file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](mount-files-or-volumes-on-scalar-pods.mdx#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts).
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `ledger.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+ledger:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `ledger.ledgerProperties`, you can use `ledger.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx) for more details on how to use a Secret resource.
+
+```yaml
+ledger:
+ secretName: "ledger-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `ledger.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+ledger:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardl
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - ledger
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDL Ledger pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `ledger.grafanaDashboard.enabled`, `ledger.serviceMonitor.enabled`, and `ledger.prometheusRule.enabled`.
+
+```yaml
+ledger:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDL Ledger pods, you can use `ledger.securityContext` and `ledger.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+ledger:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### TLS configurations (optional based on your environment)
+
+You can enable TLS in:
+
+- The communications between the ScalarDL Ledger and clients.
+- The communications between the ScalarDL Ledger and ScalarDL Auditor.
+
+Also, you have several options from the certificate management. See [TLS configurations on the Envoy document side](./configure-custom-values-envoy.mdx#tls-configurations-optional-based-on-your-environment) for more details.
+
+Please consider which you use based on your security requirements. According to your decision, you can see the related document as follows:
+
+```mermaid
+flowchart TD
+ A[Do you want to use
cert-manager to manage your
private key and certificate
files automatically?]
+ A -->|Yes, I want to manage my
certificates automatically.| B
+ A -->|No, I want to manage my
certificates manually by myself.| C
+ B[Do you want to use a
self-signed CA or a trusted CA?]
+ C[Do you want to use a
self-signed CA or a trusted CA?]
+ B -->|I want to use a
self-signed CA.| D
+ B -->|I want to use a
trusted CA.| E
+ C -->|I want to use a
self-signed CA.| F
+ C -->|I want to use a
trusted CA.| G
+ D[See the Use a self-signed
CA with cert-manager to
manage your private key and
certificate files section.]
+ E[See the Use a trusted
CA with cert-manager to
manage private key and
certificate files section.]
+ F[See the Use your private
key and certificate files
section, and use the self-signed
certificate you generated.]
+ G[See the Use your private key
and certificate files section,
and use the trusted certificate
generated by the third party.]
+```
+
+#### Enable TLS
+
+You can enable TLS in all ScalarDL Ledger connections by using the following configurations:
+
+```yaml
+ledger:
+ ledgerProperties: |
+ ...(omit)...
+ scalar.dl.ledger.server.tls.enabled=true
+ scalar.dl.ledger.server.tls.cert_chain_path=/tls/scalardl-ledger/certs/tls.crt
+ scalar.dl.ledger.server.tls.private_key_path=/tls/scalardl-ledger/certs/tls.key
+ tls:
+ enabled: true
+```
+
+##### Use your private key and certificate files
+
+You can set your private key and certificate files by using the following configurations:
+
+```yaml
+ledger:
+ tls:
+ enabled: true
+ caRootCertSecret: "scalardl-ledger-tls-ca"
+ certChainSecret: "scalardl-ledger-tls-cert"
+ privateKeySecret: "scalardl-ledger-tls-key"
+```
+
+In this case, you have to create secret resources that include private key and certificate files for ScalarDL Ledger as follows, replacing the contents in the angle brackets as described:
+
+```console
+kubectl create secret generic scalardl-ledger-tls-ca --from-file=ca.crt=/ -n
+kubectl create secret generic scalardl-ledger-tls-cert --from-file=tls.crt=/ -n
+kubectl create secret generic scalardl-ledger-tls-key --from-file=tls.key=/ -n
+```
+
+For more details on how to prepare private key and certificate files, see [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx).
+
+##### Use a trusted CA with cert-manager to manage your private key and certificate files
+
+You can manage private key and certificate with cert-manager by using the following configurations:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager and prepare the `Issuers` resource. For more details on cert-manager, see the [Installation](https://cert-manager.io/docs/installation/) and [Issuer Configuration](https://cert-manager.io/docs/configuration/) in the cert-manager official document.
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. We recommend the default certificate configuration, but if you custom certificate configuration, you must satisfy the certificate requirements of Scalar products. See [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+ledger:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name: your-trusted-ca
+ dnsNames:
+ - ledger.scalardl.example.com
+```
+
+In this case, cert-manager issues private key and certificate by using your trusted issuer. You don't need to mount private key and certificate files manually.
+
+##### Use a self-signed CA with cert-manager to manage your private key and certificate files
+
+You can manage private key and self-signed certificate with cert-manager by using the following configurations:
+
+:::note
+
+* If you want to use cert-manager, you must deploy cert-manager. For more details on how to deploy cert-manager, see the [Installation](https://cert-manager.io/docs/installation/) in the cert-manager official document.
+* By default, Scalar Helm Chart creates a `Certificate` resource that satisfies the certificate requirements of Scalar products. We recommend the default certificate configuration, but if you custom certificate configuration, you must satisfy the certificate requirements of Scalar products. See [How to create private key and certificate files for Scalar products](../scalar-kubernetes/HowToCreateKeyAndCertificateFiles.mdx#certificate-requirements).
+
+:::
+
+```yaml
+ledger:
+ tls:
+ enabled: true
+ certManager:
+ enabled: true
+ selfSigned:
+ enabled: true
+ dnsNames:
+ - ledger.scalardl.example.com
+```
+
+In this case, Scalar Helm Charts and cert-manager issue private key and self-signed certificate. You don't need to mount private key and certificate files manually.
+
+##### Set custom authority for TLS communications
+
+You can set the custom authority for TLS communications by using `ledger.tls.overrideAuthority`. This value doesn't change what host is actually connected. This value is intended for testing but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set by using `ledger.tls.certChainSecret`. This chart uses this value for `startupProbe` and `livenessProbe`.
+
+```yaml
+ledger:
+ tls:
+ enabled: true
+ overrideAuthority: "ledger.scalardl.example.com"
+```
+
+##### Set a root CA certificate for Prometheus Operator
+
+If you set `ledger.serviceMonitor.enabled=true` and `ledger.tls.enabled=true` (in other words, if you monitor ScalarDL Ledger with TLS configuration by using Prometheus Operator), you must set the secret name to `ledger.tls.caRootCertSecretForServiceMonitor`.
+
+```yaml
+ledger:
+ tls:
+ enabled: true
+ caRootCertSecretForServiceMonitor: "scalardl-ledger-tls-ca-for-prometheus"
+```
+
+In this case, you have to create secret resources that include a root CA certificate for ScalarDL Ledger in the same namespace as Prometheus as follows:
+
+```console
+kubectl create secret generic scalardl-ledger-tls-ca-for-prometheus --from-file=ca.crt=/path/to/your/ca/certificate/file -n
+```
+
+### Replica configurations (optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDL Ledger using `ledger.replicaCount`.
+
+```yaml
+ledger:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDL Ledger, you can use `ledger.scalarLedgerConfiguration.ledgerLogLevel`.
+
+```yaml
+ledger:
+ scalarLedgerConfiguration:
+ ledgerLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `ledger.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+ledger:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardl-ledger
+```
diff --git a/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-schema-loader.mdx b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-schema-loader.mdx
new file mode 100644
index 00000000..f3b6d5dd
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/configure-custom-values-scalardl-schema-loader.mdx
@@ -0,0 +1,87 @@
+---
+---
+
+# Configure a custom values file for ScalarDL Schema Loader
+
+This document explains how to create your custom values file for the ScalarDL Schema Loader chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/schema-loading/README.md) of the ScalarDL Schema Loader chart.
+
+## Required configurations
+
+### Database configurations
+
+You must set `schemaLoading.databaseProperties`. Please set your `database.properties` to access the backend database to this parameter. Please refer to the [Getting Started with ScalarDB](https://scalardb.scalar-labs.com/docs/latest/getting-started-with-scalardb) for more details on the database configuration of ScalarDB.
+
+```yaml
+schemaLoading:
+ databaseProperties: |
+ scalar.db.contact_points=cassandra
+ scalar.db.contact_port=9042
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+```
+
+### Schema type configurations
+
+You must set `schemaLoading.schemaType`.
+
+If you create the schema of ScalarDL Ledger, please set `ledger`.
+
+```yaml
+schemaLoading:
+ schemaType: ledger
+```
+
+If you create the schema of ScalarDL Auditor, please set `auditor`.
+
+```yaml
+schemaLoading:
+ schemaType: auditor
+```
+
+## Optional configurations
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `schemaLoading.databaseProperties`, you can use `schemaLoading.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](use-secret-for-credentials.mdx) for more details on how to use a Secret resource.
+
+```yaml
+schemaLoading:
+ secretName: "schema-loader-credentials-secret"
+```
+
+### Image configurations (Default value is recommended)
+
+If you want to change the image repository, you can use `schemaLoading.image.repository` to specify which repository you want to use to pull the ScalarDL Schema Loader container image from.
+
+```yaml
+schemaLoading:
+ image:
+ repository:
+```
+
+### Flags configurations (Optional based on your environment)
+
+You can specify several flags as an array. Please refer to the document [ScalarDB Schema Loader](https://scalardb.scalar-labs.com/docs/latest/schema-loader) for more details on the flags.
+
+```yaml
+schemaLoading:
+ commandArgs:
+ - "--alter"
+ - "--compaction-strategy"
+ - ""
+ - "--delete-all"
+ - "--no-backup"
+ - "--no-scaling"
+ - "--repair-all"
+ - "--replication-factor"
+ - ""
+ - "--replication-strategy"
+ - ""
+ - "--ru"
+ - ""
+```
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-logging.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-logging.mdx
new file mode 100644
index 00000000..ab82c393
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-logging.mdx
@@ -0,0 +1,96 @@
+---
+tags:
+ - Community
+---
+
+# Getting Started with Helm Charts (Logging using Loki Stack)
+
+This document explains how to get started with log aggregation for Scalar products on Kubernetes using Grafana Loki (with Promtail).
+
+We assume that you have already read the [getting-started with monitoring](getting-started-monitoring.mdx) for Scalar products and installed kube-prometheus-stack.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +------------------------------------+ |
+| | loki-stack | |
+| | | +-----------------+ |
+| | +--------------+ +--------------+ | <-----------------(Log)-------------- | Scalar Products | |
+| | | Loki | | Promtail | | | | |
+| | +--------------+ +--------------+ | | +-----------+ | |
+| +------------------------------------+ | | ScalarDB | | |
+| | +-----------+ | |
+| +------------------------------------------------------+ | | |
+| | kube-prometheus-stack | | +-----------+ | |
+| | | | | ScalarDL | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ |
+| | +-------+------+ +------+-------+ +------+-------+ | |
+| | | | | | |
+| | +----------------+-----------------+ | |
+| | | | |
+| +--------------------------+---------------------------+ |
+| | |
+| | Kubernetes |
++----------------------------+---------------------------------------------------------------------+
+ | <- expose to localhost (127.0.0.1) or use load balancer etc to access
+ |
+ (Access Dashboard through HTTP)
+ |
+ +----+----+
+ | Browser |
+ +---------+
+```
+
+## Step 1. Prepare a custom values file
+
+1. Get the sample file [scalar-loki-stack-custom-values.yaml](conf/scalar-loki-stack-custom-values.yaml) for the `loki-stack` helm chart.
+
+## Step 2. Deploy `loki-stack`
+
+1. Add the `grafana` helm repository.
+ ```console
+ helm repo add grafana https://grafana.github.io/helm-charts
+ ```
+
+1. Deploy the `loki-stack` helm chart.
+ ```console
+ helm install scalar-logging-loki grafana/loki-stack -n monitoring -f scalar-loki-stack-custom-values.yaml
+ ```
+
+## Step 3. Add a Loki data source in the Grafana configuration
+
+1. Add a configuration of the Loki data source in the `scalar-prometheus-custom-values.yaml` file.
+ ```yaml
+ grafana:
+ additionalDataSources:
+ - name: Loki
+ type: loki
+ uid: loki
+ url: http://scalar-logging-loki:3100/
+ access: proxy
+ editable: false
+ isDefault: false
+ ```
+
+1. Apply the configuration (upgrade the deployment of `kube-prometheus-stack`).
+ ```console
+ helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 4. Access the Grafana dashboard
+
+1. Add Loki as a data source
+ - Go to Grafana http://localhost:3000 (If you use minikube)
+ - Go to `Explore` to find the added Loki
+ - You can see the collected logs in the `Explore` page
+
+## Step 5. Delete the `loki-stack` helm chart
+
+1. Uninstall `loki-stack`.
+ ```console
+ helm uninstall scalar-logging-loki -n monitoring
+ ```
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-monitoring.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-monitoring.mdx
new file mode 100644
index 00000000..c28a9a40
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-monitoring.mdx
@@ -0,0 +1,266 @@
+---
+tags:
+ - Community
+---
+
+# Getting Started with Helm Charts (Monitoring using Prometheus Operator)
+
+This document explains how to get started with Scalar products monitoring on Kubernetes using Prometheus Operator (kube-prometheus-stack). Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +------------------------------------------------------+ +-----------------+ |
+| | kube-prometheus-stack | | Scalar Products | |
+| | | | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | | | ScalarDB | | |
+| | +-------+------+ +------+-------+ +------+-------+ | | +-----------+ | |
+| | | | | | | +-----------+ | |
+| | +----------------+-----------------+ | | | ScalarDL | | |
+| | | | | +-----------+ | |
+| +--------------------------+---------------------------+ +-----------------+ |
+| | |
+| | Kubernetes |
++----------------------------+---------------------------------------------------------------------+
+ | <- expose to localhost (127.0.0.1) or use load balancer etc to access
+ |
+ (Access Dashboard through HTTP)
+ |
+ +----+----+
+ | Browser |
+ +---------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Prepare a custom values file
+
+1. Save the sample file [scalar-prometheus-custom-values.yaml](conf/scalar-prometheus-custom-values.yaml) for `kube-prometheus-stack`.
+
+1. Add custom values in the `scalar-prometheus-custom-values.yaml` as follows.
+ * settings
+ * `prometheus.service.type` to `LoadBalancer`
+ * `alertmanager.service.type` to `LoadBalancer`
+ * `grafana.service.type` to `LoadBalancer`
+ * `grafana.service.port` to `3000`
+ * Example
+ ```yaml
+ alertmanager:
+
+ service:
+ type: LoadBalancer
+
+ ...
+
+ grafana:
+
+ service:
+ type: LoadBalancer
+ port: 3000
+
+ ...
+
+ prometheus:
+
+ service:
+ type: LoadBalancer
+
+ ...
+ ```
+ * Note:
+ * If you want to customize the Prometheus Operator deployment by using Helm Charts, you'll need to set the following configurations to monitor Scalar products:
+ * Set `serviceMonitorSelectorNilUsesHelmValues` and `ruleSelectorNilUsesHelmValues` to `false` (`true` by default) so that Prometheus Operator can detect `ServiceMonitor` and `PrometheusRule` for Scalar products.
+
+ * If you want to use Scalar Manager, you'll need to set the following configurations to enable Scalar Manager to collect CPU and memory resources:
+ * Set `kubeStateMetrics.enabled`, `nodeExporter.enabled`, and `kubelet.enabled` to `true`.
+
+ * If you want to use Scalar Manager, you'll need to set the following configurations to enable Scalar Manager to embed Grafana:
+ * Set `grafana.ini.security.allow_embedding` and `grafana.ini.auth.anonymous.enabled` to `true`.
+ * Set `grafana.ini.auth.anonymous.org_name` to the organization you are using. If you're using the sample custom values, the value is `Main Org.`.
+ * Set `grafana.ini.auth.anonymous.org_role` to `Editor`.
+
+## Step 3. Deploy `kube-prometheus-stack`
+
+1. Add the `prometheus-community` helm repository.
+ ```console
+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+ ```
+
+1. Create a namespace `monitoring` on the Kubernetes.
+ ```console
+ kubectl create namespace monitoring
+ ```
+
+1. Deploy the `kube-prometheus-stack`.
+ ```console
+ helm install scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 4. Deploy (or Upgrade) Scalar products using Helm Charts
+
+* Note:
+ * The following explains the minimum steps. If you want to know more details about the deployment of ScalarDB and ScalarDL, please refer to the following documents.
+ * [Getting Started with Helm Charts (ScalarDB Server)](getting-started-scalardb.mdx)
+ * [Getting Started with Helm Charts (ScalarDL Ledger / Ledger only)](getting-started-scalardl-ledger.mdx)
+ * [Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode)](getting-started-scalardl-auditor.mdx)
+
+1. To enable Prometheus monitoring of Scalar products, set `true` to the following configurations in the custom values file.
+ * Configurations
+ * `*.prometheusRule.enabled`
+ * `*.grafanaDashboard.enabled`
+ * `*.serviceMonitor.enabled`
+ * Sample configuration files
+ * ScalarDB (scalardb-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ scalardb:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+ * ScalarDL Ledger (scalardl-ledger-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ ledger:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+ * ScalarDL Auditor (scalardl-auditor-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ auditor:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+
+1. Deploy (or Upgrade) Scalar products using Helm Charts with the above custom values file.
+ * Examples
+ * ScalarDB
+ ```console
+ helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+ * ScalarDL Ledger
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+ * ScalarDL Auditor
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+
+## Step 5. Access Dashboards
+
+### If you use minikube
+
+1. To expose each service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command.
+ ```console
+ minikube tunnel
+ ```
+
+ After running the `minikube tunnel` command, you can see the EXTERNAL-IP of each service resource as `127.0.0.1`.
+ ```console
+ kubectl get svc -n monitoring scalar-monitoring-kube-pro-prometheus scalar-monitoring-kube-pro-alertmanager scalar-monitoring-grafana
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ scalar-monitoring-kube-pro-prometheus LoadBalancer 10.98.11.12 127.0.0.1 9090:30550/TCP 26m
+ scalar-monitoring-kube-pro-alertmanager LoadBalancer 10.98.151.66 127.0.0.1 9093:31684/TCP 26m
+ scalar-monitoring-grafana LoadBalancer 10.103.19.4 127.0.0.1 3000:31948/TCP 26m
+ ```
+
+1. Access each Dashboard.
+ * Prometheus
+ ```console
+ http://localhost:9090/
+ ```
+ * Alertmanager
+ ```console
+ http://localhost:9093/
+ ```
+ * Grafana
+ ```console
+ http://localhost:3000/
+ ```
+ * Note:
+ * You can see the user and password of Grafana as follows.
+ * user
+ ```console
+ kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-user}' | base64 -d
+ ```
+ * password
+ ```console
+ kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d
+ ```
+
+### If you use other Kubernetes than minikube
+
+If you use a Kubernetes cluster other than minikube, you need to access the LoadBalancer service according to the manner of each Kubernetes cluster. For example, using a Load Balancer provided by cloud service or the `kubectl port-forward` command.
+
+## Step 6. Delete all resources
+
+After completing the Monitoring tests on the Kubernetes cluster, remove all resources.
+
+1. Terminate the `minikube tunnel` command. (If you use minikube)
+ ```console
+ Ctrl + C
+ ```
+
+1. Uninstall `kube-prometheus-stack`.
+ ```console
+ helm uninstall scalar-monitoring -n monitoring
+ ```
+
+1. Delete minikube. (Optional / If you use minikube)
+ ```console
+ minikube delete --all
+ ```
+ * Note:
+ * If you deploy the ScalarDB or ScalarDL, you need to remove them before deleting minikube.
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalar-helm-charts.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalar-helm-charts.mdx
new file mode 100644
index 00000000..dcc94494
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalar-helm-charts.mdx
@@ -0,0 +1,81 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Getting Started with Scalar Helm Charts
+
+This document explains how to get started with Scalar Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Tools
+
+We will use the following tools for testing.
+
+1. minikube (If you use other Kubernetes distributions, minikube is not necessary.)
+1. kubectl
+1. Helm
+1. cfssl / cfssljson
+
+## Step 1. Install tools
+
+First, you need to install the following tools used in this guide.
+
+1. Install the `minikube` command according to the [minikube documentation](https://minikube.sigs.k8s.io/docs/start/)
+
+1. Install the `kubectl` command according to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
+
+1. Install the `helm` command according to the [Helm documentation](https://helm.sh/docs/intro/install/)
+
+1. Install the `cfssl` and `cfssljson` according to the [CFSSL documentation](https://github.com/cloudflare/cfssl)
+
+:::note
+
+You need to install the `cfssl` and `cfssljson` command when following these getting started guides:
+
+* [ScalarDB Cluster with TLS](getting-started-scalardb-cluster-tls.mdx)
+* [ScalarDL Ledger and Auditor with TLS (Auditor mode)](getting-started-scalardl-auditor-tls.mdx)
+* [ScalarDL Ledger (Ledger only)](getting-started-scalardl-ledger.mdx)
+* [ScalarDL Ledger and Auditor (Auditor mode)](getting-started-scalardl-auditor.mdx)
+
+:::
+
+## Step 2. Start minikube with docker driver (Optional / If you use minikube)
+
+1. Start minikube.
+ ```console
+ minikube start
+ ```
+
+1. Check the status of the minikube and pods.
+ ```console
+ kubectl get pod -A
+ ```
+ [Command execution result]
+ ```console
+ NAMESPACE NAME READY STATUS RESTARTS AGE
+ kube-system coredns-64897985d-lbsfr 1/1 Running 1 (20h ago) 21h
+ kube-system etcd-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-apiserver-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-controller-manager-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-proxy-gsl6j 1/1 Running 1 (20h ago) 21h
+ kube-system kube-scheduler-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system storage-provisioner 1/1 Running 2 (19s ago) 21h
+ ```
+ If the minikube starts properly, you can see some pods are **Running** in the kube-system namespace.
+
+## Step 3.
+
+After the Kubernetes cluster starts, you can try each Scalar Helm Charts on it. Please refer to the following documents for more details.
+
+* [ScalarDB Cluster with TLS](getting-started-scalardb-cluster-tls.mdx)
+* [ScalarDB Cluster with TLS by Using cert-manager](getting-started-scalardb-cluster-tls-cert-manager.mdx)
+* [ScalarDB Analytics with PostgreSQL](getting-started-scalardb-analytics-postgresql.mdx)
+* [ScalarDL Ledger and Auditor with TLS (Auditor mode)](getting-started-scalardl-auditor-tls.mdx)
+* [ScalarDL Ledger and Auditor with TLS by Using cert-manager (Auditor mode)](getting-started-scalardl-auditor-tls-cert-manager.mdx)
+* [ScalarDL Ledger (Ledger only)](getting-started-scalardl-ledger.mdx)
+* [ScalarDL Ledger and Auditor (Auditor mode)](getting-started-scalardl-auditor.mdx)
+* [Monitoring using Prometheus Operator](getting-started-monitoring.mdx)
+ * [Logging using Loki Stack](getting-started-logging.mdx)
+ * [Scalar Manager](getting-started-scalar-manager.mdx)
+* [[Deprecated] ScalarDB Server](getting-started-scalardb.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalar-manager.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalar-manager.mdx
new file mode 100644
index 00000000..a09fb0b9
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalar-manager.mdx
@@ -0,0 +1,181 @@
+---
+tags:
+ - Enterprise Option
+---
+
+# Getting Started with Helm Charts (Scalar Manager)
+
+Scalar Manager is a centralized management and monitoring solution for ScalarDB and ScalarDL within Kubernetes cluster environments that allows you to:
+
+* Check the availability of ScalarDB or ScalarDL.
+* Schedule or execute pausing jobs that create transactionally consistent periods in the databases used by ScalarDB or ScalarDL.
+* Check the time-series metrics and logs of ScalarDB or ScalarDL through Grafana dashboards.
+
+For more details, refer to [Scalar Manager Overview](../scalar-manager/overview.mdx).
+
+This guide will show you how to deploy and access Scalar Manager on a Kubernetes cluster.
+
+## Assumption
+
+This guide assumes that you are aware of how to deploy ScalarDB or ScalarDL with the [monitoring](getting-started-monitoring.mdx) and [logging](getting-started-logging.mdx) tools to a Kubernetes cluster.
+
+## Requirement
+
+* You must deploy `kube-prometheus-stack` according to the instructions in [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx).
+* You must deploy `loki-stack` according to the instructions in [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx).
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +----------------------+ |
+| | scalar-manager | |
+| | | |
+| | +------------------+ | --------------------------(Manage)--------------------------+ |
+| | | Scalar Manager | | | |
+| | +------------------+ | | |
+| +--+-------------------+ | |
+| | | |
+| +------------------------------------+ | |
+| | loki-stack | V |
+| | | +-----------------+ |
+| | +--------------+ +--------------+ | <----------------(Log)--------------- | Scalar Products | |
+| | | Loki | | Promtail | | | | |
+| | +--------------+ +--------------+ | | +-----------+ | |
+| +------------------------------------+ | | ScalarDB | | |
+| | | +-----------+ | |
+| +------------------------------------------------------+ | | |
+| | kube-prometheus-stack | | +-----------+ | |
+| | | | | ScalarDL | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ |
+| | +-------+------+ +------+-------+ +------+-------+ | |
+| | | | | | |
+| | +----------------+-----------------+ | |
+| | | | |
+| +--------------------------+---------------------------+ |
+| | | |
+| | | Kubernetes |
++----+-----------------------+---------------------------------------------------------------------+
+ | |
+ expose to localhost (127.0.0.1) or use load balancer etc to access
+ | |
+ (Access Dashboard through HTTP)
+ | |
++----+----+ +----+----+
+| Browser | <-(Embed)-- + Browser |
++---------+ +---------+
+```
+
+## Step 1. Upgrade the `kube-prometheus-stack` to allow Grafana to be embedded
+
+1. Add or revise this value to the custom values file (e.g. scalar-prometheus-custom-values.yaml) of the `kube-prometheus-stack`
+ ```yaml
+ kubeStateMetrics:
+ enabled: true
+ nodeExporter:
+ enabled: true
+ kubelet:
+ enabled: true
+ grafana:
+ grafana.ini:
+ users:
+ default_theme: light
+ security:
+ allow_embedding: true
+ auth.anonymous:
+ enabled: true
+ org_name: "Main Org."
+ org_role: Editor
+ ```
+
+1. Upgrade the Helm installation
+ ```console
+ helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 2. Prepare a custom values file for Scalar Manager
+
+1. Create an empty .yaml file named `scalar-manager-custom-values.yaml` for `scalar-manager`.
+
+1. Set the service type to access Scalar Manager. The default value is `ClusterIP`, but if we access using the `minikube tunnel` command or some load balancer, we can set it as `LoadBalancer`.
+ ```yaml
+ service:
+ type: LoadBalancer
+ port: 8000
+ ```
+
+## Step 3. Deploy `scalar-manager`
+
+1. Deploy the `scalar-manager` Helm Chart.
+ ```console
+ helm install scalar-manager scalar-labs/scalar-manager -f scalar-manager-custom-values.yaml
+ ```
+
+## Step 4. Access Scalar Manager
+
+### If you use minikube
+
+1. To expose Scalar Manager's service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command.
+ ```console
+ minikube tunnel
+ ```
+
+1. Open the browser with URL `http://localhost:8000`
+
+### If you use other Kubernetes than minikube
+
+If you're using a Kubernetes cluster other than minikube, you'll need to access the `LoadBalancer` service according to the manner of each Kubernetes cluster. For example, you'll need to use a load balancer provided by your cloud services provider or use the `kubectl port-forward` command.
+
+:::note
+
+Scalar Manager will try to detect the external IP of Grafana and then embed Grafana based on the IP. Therefore, you must configure the Grafana service type as `LoadBalancer`, and the external IP must be accessible from your browser.
+
+:::
+
+## Step 5. Delete Scalar Manager
+1. Uninstall `scalar-manager`
+ ```console
+ helm uninstall scalar-manager
+ ```
+
+## Additional details
+
+This section provides additional details related to configurations and resource discovery.
+
+### Configurations
+
+You can see configurations for Scalar Manager in [Configure a custom values file for Scalar Manager](./configure-custom-values-scalar-manager.mdx)
+
+### Resource discovery
+
+Scalar Manager discovers the following Kubernetes resources in a cluster by using specific label selectors:
+
+- Dependencies
+ - Prometheus service
+ - Loki service
+ - Grafana service
+- Targets
+ - ScalarDB Cluster deployments
+ - ScalarDL Ledger deployments
+ - ScalarDL Auditor deployments
+
+The following sections explain how Scalar Manager discovers these resources.
+
+#### Dependencies
+
+Scalar Manager searches for the default labels and values set in the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) and [loki-stack](https://github.com/grafana/helm-charts/tree/main/charts/loki-stack) Helm Charts. For more information on the default labels and values that Scalar Manager uses to discover dependencies, see [Properties that you can set in `api.applicationProperties`](./configure-custom-values-scalar-manager.mdx#properties-that-you-can-set-in-apiapplicationProperties).
+
+Also, if you customized any values when installing `kube-prometheus-stack` or `loki-stack`, you will need to update the label selectors in the Scalar Manager custom value `api.applicationProperties`.
+
+#### Targets
+
+Scalar Manager searches for ScalarDB Cluster, ScalarDL Ledger, and ScalarDL Auditor deployments by using the following labels and values:
+
+- **ScalarDB Cluster:** `app.kubernetes.io/app=scalardb-cluster`
+- **ScalarDL Ledger:** `app.kubernetes.io/app=ledger`
+- **ScalarDL Auditor:** `app.kubernetes.io/app=auditor`
+
+Scalar Helm Charts use fixed labels and values for ScalarDB Cluster, ScalarDL Ledger, and ScalarDL Auditor deployments so that if you install ScalarDB and ScalarDL by using [Scalar Helm Charts](https://github.com/scalar-labs/helm-charts), Scalar Manager will automatically discover these deployments.
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-analytics-postgresql.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-analytics-postgresql.mdx
new file mode 100644
index 00000000..36fa1a0a
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-analytics-postgresql.mdx
@@ -0,0 +1,510 @@
+---
+tags:
+ - Community
+---
+
+# Getting Started with Helm Charts (ScalarDB Analytics with PostgreSQL)
+
+This guide explains how to get started with ScalarDB Analytics with PostgreSQL by using a Helm Chart in a Kubernetes cluster as a test environment. In addition, the contents of this guide assume that you already have a Mac or Linux environment set up for testing. Although **minikube** is mentioned, the steps described should work in any Kubernetes cluster.
+
+## What you will create
+
+You will deploy the following components in a Kubernetes cluster:
+
+```
++-------------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes cluster] |
+| |
+| [Pod] [Pod] [Pod] |
+| |
+| +------------------------------------+ |
+| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ |
+| | +------------------------------------+ | +---> | MySQL ("customer" schema) | <---+ |
+| | | | +-----------------------------+ | |
+| +-------------+ +---------+ | +------------------------------------+ | | | |
+| | OLAP client | ---> | Service | ---+---> | ScalarDB Analytics with PostgreSQL | ---+---+ +---+ |
+| +-------------+ +---------+ | +------------------------------------+ | | | | |
+| | | | +-----------------------------+ | | |
+| | +------------------------------------+ | +---> | PostgreSQL ("order" schema) | <---+ | |
+| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ | |
+| +------------------------------------+ | |
+| | |
+| +-------------+ | |
+| | OLTP client | ---(Load sample data with a test OLTP workload)-----------------------------------------------------------------------+ |
+| +-------------+ |
+| |
++-------------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you're using a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start MySQL and PostgreSQL pods
+
+ScalarDB including ScalarDB Analytics with PostgreSQL can use several types of database systems as a backend database. In this guide, you will use MySQL and PostgreSQL.
+
+You can deploy MySQL and PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Update the helm repository.
+
+ ```console
+ helm repo update bitnami
+ ```
+
+1. Deploy MySQL.
+
+ ```console
+ helm install mysql-scalardb bitnami/mysql \
+ --set auth.rootPassword=mysql \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Deploy PostgreSQL.
+
+ ```console
+ helm install postgresql-scalardb bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the MySQL and PostgreSQL pods are running.
+
+ ```console
+ kubectl get pod
+ ```
+
+ You should see the following output:
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ mysql-scalardb-0 1/1 Running 0 3m17s
+ postgresql-scalardb-0 1/1 Running 0 3m12s
+ ```
+
+## Step 3. Create a working directory
+
+Since you'll be creating some configuration files locally, create a working directory for those files.
+
+ ```console
+ mkdir -p ~/scalardb-analytics-postgresql-test/
+ ```
+
+## Step 4. Set the versions of ScalarDB, ScalarDB Analytics with PostgreSQL, and the chart
+
+Set the following three environment variables. If you want to use another version of ScalarDB and ScalarDB Analytics with PostgreSQL, be sure to set them to the versions that you want to use.
+
+:::note
+
+You must use the same minor versions (for example, 3.10.x) of ScalarDB Analytics with PostgreSQL as ScalarDB, but you don't need to make the patch versions match. For example, you can use ScalarDB 3.10.1 and ScalarDB Analytics with PostgreSQL 3.10.3 together.
+
+:::
+
+```console
+SCALARDB_VERSION=3.10.1
+```
+
+```console
+SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION=3.10.3
+```
+
+```console
+CHART_VERSION=$(helm search repo scalar-labs/scalardb-analytics-postgresql -l | grep -e ${SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION} | awk '{print $2}' | sort --version-sort -r | head -n 1)
+```
+
+## Step 5. Run OLTP transactions to load sample data to MySQL and PostgreSQL
+
+Before deploying ScalarDB Analytics with PostgreSQL, run the OLTP transactions to create sample data.
+
+1. Start an OLTP client pod in the Kubernetes cluster.
+
+ ```console
+ kubectl run oltp-client --image eclipse-temurin:8-jdk-jammy --env SCALARDB_VERSION=${SCALARDB_VERSION} -- sleep inf
+ ```
+
+1. Check if the OLTP client pod is running.
+
+ ```console
+ kubectl get pod oltp-client
+ ```
+
+ You should see the following output:
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ oltp-client 1/1 Running 0 17s
+ ```
+
+1. Run bash in the OLTP client pod.
+
+ ```console
+ kubectl exec -it oltp-client -- bash
+ ```
+
+ After this step, run each command in the OLTP client pod.
+
+1. Install the git and curl commands in the OLTP client pod.
+
+ ```console
+ apt update && apt install -y curl git
+ ```
+
+1. Clone the ScalarDB samples repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardb-samples.git
+ ```
+
+1. Go to the directory `scalardb-samples/multi-storage-transaction-sample/`.
+
+ ```console
+ cd scalardb-samples/multi-storage-transaction-sample/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ You should see the following output:
+
+ ```console
+ # pwd
+ /scalardb-samples/multi-storage-transaction-sample
+ ```
+
+1. Create a configuration file (`database.properties`) to access MySQL and PostgreSQL in the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > database.properties
+ scalar.db.storage=multi-storage
+ scalar.db.multi_storage.storages=storage0,storage1
+
+ # Storage 0
+ scalar.db.multi_storage.storages.storage0.storage=jdbc
+ scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/
+ scalar.db.multi_storage.storages.storage0.username=root
+ scalar.db.multi_storage.storages.storage0.password=mysql
+
+ # Storage 1
+ scalar.db.multi_storage.storages.storage1.storage=jdbc
+ scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.multi_storage.storages.storage1.username=postgres
+ scalar.db.multi_storage.storages.storage1.password=postgres
+
+ scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1
+ scalar.db.multi_storage.default_storage=storage1
+ EOF
+ ```
+
+1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v${SCALARDB_VERSION}/scalardb-schema-loader-${SCALARDB_VERSION}.jar
+ ```
+
+1. Run Schema Loader to create sample tables.
+
+ ```console
+ java -jar scalardb-schema-loader-${SCALARDB_VERSION}.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+1. Load initial data for the sample workload.
+
+ ```console
+ ./gradlew run --args="LoadInitialData"
+ ```
+
+1. Run the sample workload of OLTP transactions. Running these commands will create several `order` entries as sample data.
+
+ ```console
+ ./gradlew run --args="PlaceOrder 1 1:3,2:2"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 1 5:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 2 3:1,4:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 2 2:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 1:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 2:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 3:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 5:1"
+ ```
+
+
+1. Exit from OLTP client.
+
+ ```console
+ exit
+ ```
+
+## Step 6. Deploy ScalarDB Analytics with PostgreSQL
+
+After creating sample data via ScalarDB in the backend databases, deploy ScalarDB Analytics with PostgreSQL.
+
+1. Create a custom values file for ScalarDB Analytics with PostgreSQL (`scalardb-analytics-postgresql-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml
+ scalardbAnalyticsPostgreSQL:
+ databaseProperties: |
+ scalar.db.storage=multi-storage
+ scalar.db.multi_storage.storages=storage0,storage1
+
+ # Storage 0
+ scalar.db.multi_storage.storages.storage0.storage=jdbc
+ scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/
+ scalar.db.multi_storage.storages.storage0.username=root
+ scalar.db.multi_storage.storages.storage0.password=mysql
+
+ # Storage 1
+ scalar.db.multi_storage.storages.storage1.storage=jdbc
+ scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.multi_storage.storages.storage1.username=postgres
+ scalar.db.multi_storage.storages.storage1.password=postgres
+
+ scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1
+ scalar.db.multi_storage.default_storage=storage1
+ schemaImporter:
+ namespaces:
+ - customer
+ - order
+ EOF
+ ```
+
+1. Create a secret resource to set a superuser password for PostgreSQL.
+
+ ```console
+ kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password=scalardb-analytics
+ ```
+
+1. Deploy ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ helm install scalardb-analytics-postgresql scalar-labs/scalardb-analytics-postgresql -n default -f ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml --version ${CHART_VERSION}
+ ```
+
+## Step 7. Run an OLAP client pod
+
+To run some queries via ScalarDB Analytics with PostgreSQL, run an OLAP client pod.
+
+1. Start an OLAP client pod in the Kubernetes cluster.
+
+ ```console
+ kubectl run olap-client --image postgres:latest -- sleep inf
+ ```
+
+1. Check if the OLAP client pod is running.
+
+ ```console
+ kubectl get pod olap-client
+ ```
+
+ You should see the following output:
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ olap-client 1/1 Running 0 10s
+ ```
+
+## Step 8. Run sample queries via ScalarDB Analytics with PostgreSQL
+
+After running the OLAP client pod, you can run some queries via ScalarDB Analytics with PostgreSQL.
+
+1. Run bash in the OLAP client pod.
+
+ ```console
+ kubectl exec -it olap-client -- bash
+ ```
+
+ After this step, run each command in the OLAP client pod.
+
+1. Run the psql command to access ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ psql -h scalardb-analytics-postgresql -p 5432 -U postgres -d scalardb
+ ```
+
+ The password is `scalardb-analytics`.
+
+1. Read sample data in the `customer.customers` table.
+
+ ```sql
+ SELECT * FROM customer.customers;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ customer_id | name | credit_limit | credit_total
+ -------------+---------------+--------------+--------------
+ 1 | Yamada Taro | 10000 | 10000
+ 2 | Yamada Hanako | 10000 | 9500
+ 3 | Suzuki Ichiro | 10000 | 8500
+ (3 rows)
+ ```
+
+1. Read sample data in the `order.orders` table.
+
+ ```sql
+ SELECT * FROM "order".orders;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".orders;
+ customer_id | timestamp | order_id
+ -------------+---------------+--------------------------------------
+ 1 | 1700124015601 | 5ae2a41b-990d-4a16-9700-39355e29adf8
+ 1 | 1700124021273 | f3f23d93-3862-48be-8a57-8368b7c8689e
+ 2 | 1700124028182 | 696a895a-8998-4c3b-b112-4d5763bfcfd8
+ 2 | 1700124036158 | 9215d63a-a9a2-4471-a990-45897f091ca5
+ 3 | 1700124043744 | 9be70cd4-4f93-4753-9d89-68e250b2ac51
+ 3 | 1700124051162 | 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8
+ 3 | 1700124058096 | 658b6682-2819-41f2-91ee-2802a1f02857
+ 3 | 1700124071240 | 4e2f94f4-53ec-4570-af98-7c648d8ed80f
+ (8 rows)
+ ```
+
+1. Read sample data in the `order.statements` table.
+
+ ```sql
+ SELECT * FROM "order".statements;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".statements;
+ order_id | item_id | count
+ --------------------------------------+---------+-------
+ 5ae2a41b-990d-4a16-9700-39355e29adf8 | 2 | 2
+ 5ae2a41b-990d-4a16-9700-39355e29adf8 | 1 | 3
+ f3f23d93-3862-48be-8a57-8368b7c8689e | 5 | 1
+ 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 4 | 1
+ 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 3 | 1
+ 9215d63a-a9a2-4471-a990-45897f091ca5 | 2 | 1
+ 9be70cd4-4f93-4753-9d89-68e250b2ac51 | 1 | 1
+ 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8 | 2 | 1
+ 658b6682-2819-41f2-91ee-2802a1f02857 | 3 | 1
+ 4e2f94f4-53ec-4570-af98-7c648d8ed80f | 5 | 1
+ (10 rows)
+ ```
+
+1. Read sample data in the `order.items` table.
+
+ ```sql
+ SELECT * FROM "order".items;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".items;
+ item_id | name | price
+ ---------+--------+-------
+ 5 | Melon | 3000
+ 2 | Orange | 2000
+ 4 | Mango | 5000
+ 1 | Apple | 1000
+ 3 | Grape | 2500
+ (5 rows)
+ ```
+
+1. Run the `JOIN` query. For example, you can see the credit remaining information of each user as follows.
+
+ ```sql
+ SELECT * FROM (
+ SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items
+ FROM "order".orders o
+ JOIN customer.customers c ON o.customer_id = c.customer_id
+ JOIN "order".statements s ON o.order_id = s.order_id
+ JOIN "order".items i ON s.item_id = i.item_id
+ ) AS remaining_info GROUP BY name, remaining, items;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM (
+ scalardb(# SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items
+ scalardb(# FROM "order".orders o
+ scalardb(# JOIN customer.customers c ON o.customer_id = c.customer_id
+ scalardb(# JOIN "order".statements s ON o.order_id = s.order_id
+ scalardb(# JOIN "order".items i ON s.item_id = i.item_id
+ scalardb(# ) AS remaining_info GROUP BY name, remaining, items;
+ name | remaining | items
+ ---------------+-----------+----------------------------
+ Suzuki Ichiro | 1500 | {Grape,Orange,Apple,Melon}
+ Yamada Hanako | 500 | {Orange,Grape,Mango}
+ Yamada Taro | 0 | {Orange,Melon,Apple}
+ (3 rows)
+ ```
+
+1. Exit from the psql command.
+
+ ```console
+ \q
+ ```
+
+1. Exit from the OLAP client pod.
+
+ ```console
+ exit
+ ```
+
+## Step 9. Delete all resources
+
+After completing the ScalarDB Analytics with PostgreSQL tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall MySQL, PostgreSQL, and ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ helm uninstall mysql-scalardb postgresql-scalardb scalardb-analytics-postgresql
+ ```
+
+1. Remove the client pods.
+
+ ```console
+ kubectl delete pod oltp-client olap-client --grace-period 0
+ ```
+
+1. Remove the secret resource.
+
+ ```console
+ kubectl delete secrets scalardb-analytics-postgresql-superuser-password
+ ```
+
+1. Remove the working directory and sample files.
+
+ ```console
+ cd ~
+ ```
+
+ ```console
+ rm -rf ~/scalardb-analytics-postgresql-test/
+ ```
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls-cert-manager.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls-cert-manager.mdx
new file mode 100644
index 00000000..f3fd69e7
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls-cert-manager.mdx
@@ -0,0 +1,596 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Getting Started with Helm Charts (ScalarDB Cluster with TLS by Using cert-manager)
+
+This tutorial explains how to get started with ScalarDB Cluster with TLS configurations by using Helm Charts and cert-manager on a Kubernetes cluster in a test environment. Before starting, you should already have a Mac or Linux environment for testing. In addition, although this tutorial mentions using **minikube**, the steps described should work in any Kubernetes cluster.
+
+## Requirements
+
+* You need to have a license key (trial license or commercial license) for ScalarDB Cluster. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact).
+* You need to use ScalarDB Cluster 3.12 or later, which supports TLS.
+
+## What you'll create
+
+In this tutorial, you'll deploy the following components on a Kubernetes cluster in the following way:
+
+```
++----------------------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +------------------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDB Cluster node | ---+ |
+| [Pod] | +-------+ | | +------------------------+ | |
+| | | | | |
+| +-----------+ +---------+ | +-------+ | +--------------------+ | +------------------------+ | +---------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Cluster node | ---+---> | PostgreSQL | |
+| | (SQL CLI) | | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | +------------------------+ | | (For Ledger) | |
+| +-----------+ +---------+ | | +--------------------+ | | +---------------+ |
+| | +-------+ | | +------------------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDB Cluster node | ---+ |
+| +-------+ +------------------------+ |
+| |
+| +----------------------------------------------------------------------------------+ +---------------------+ |
+| | cert-manager (create private key and certificate for Envoy and ScalarDB Cluster) | | Issuer (Private CA) | |
+| +----------------------------------------------------------------------------------+ +---------------------+ |
+| |
++----------------------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+cert-manager automatically creates the following private key and certificate files for TLS connections.
+
+```
+ +----------------------+
+ +---> | For Scalar Envoy |
+ | +----------------------+
+ | | tls.key |
+ | | tls.crt |
++-------------------------+ | +----------------------+
+| Issuer (Self-signed CA) | ---(Sign certificates)---+
++-------------------------+ | +----------------------+
+| tls.key | +---> | For ScalarDB Cluster |
+| tls.crt | +----------------------+
+| ca.crt | | tls.key |
++-------------------------+ | tls.crt |
+ +----------------------+
+```
+
+Scalar Helm Charts automatically mount each private key and certificate file for Envoy and ScalarDB Cluster as follows to enable TLS in each connection. You'll manually mount a root CA certificate file on the client.
+
+```
++-------------------------------------+ +------------------------------------------------+ +--------------------------------+
+| Client | ---(CRUD/SQL requests)---> | Envoy for ScalarDB Cluster | ---> | ScalarDB Cluster nodes |
++-------------------------------------+ +------------------------------------------------+ +--------------------------------+
+| ca.crt (to verify tls.crt of Envoy) | | tls.key | | tls.key |
++-------------------------------------+ | tls.crt | | tls.crt |
+ | ca.crt (to verify tls.crt of ScalarDB Cluster) | | ca.crt (to check health) |
+ +------------------------------------------------+ +--------------------------------+
+```
+
+The following connections exist amongst the ScalarDB Cluster–related components:
+
+* **`Client - Envoy for ScalarDB Cluster`:** When you execute a CRUD API or SQL API function, the client accesses Envoy for ScalarDB Cluster.
+* **`Envoy for ScalarDB Cluster - ScalarDB Cluster`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDB Cluster.
+* **`ScalarDB Cluster node - ScalarDB Cluster node`:** A ScalarDB Cluster node accesses other ScalarDB Cluster nodes. In other words, the cluster's internal communications exist amongst all ScalarDB Cluster nodes.
+
+## Step 1. Start a Kubernetes cluster and install tools
+
+You need to prepare a Kubernetes cluster and install some tools (`kubectl`, `helm`, `cfssl`, and `cfssljson`). For more details on how to install them, see [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx).
+
+## Step 2. Start the PostgreSQL containers
+
+ScalarDB Cluster must use some type of database system as a backend database. In this tutorial, you'll use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for ScalarDB Cluster.
+
+ ```console
+ helm install postgresql-scalardb-cluster bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Check if the PostgreSQL containers are running.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-cluster-0 1/1 Running 0 34s
+ ```
+
+## Step 3. Create a working directory
+
+You'll create some configuration files locally. Be sure to create a working directory for those files.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ${HOME}/scalardb-cluster-test/
+ ```
+
+
+## Step 4. Deploy cert-manager and issuer resource
+
+This tutorial uses cert-manager to issue and manage your private keys and certificates. You can deploy cert-manager on the Kubernetes cluster as follows:
+
+1. Add the Jetstack helm repository.
+
+ ```console
+ helm repo add jetstack https://charts.jetstack.io
+ ```
+
+1. Deploy cert-manager.
+
+ ```console
+ helm install cert-manager jetstack/cert-manager \
+ --create-namespace \
+ --set installCRDs=true \
+ -n cert-manager
+ ```
+
+1. Check if the cert-manager containers are running.
+
+ ```console
+ kubectl get pod -n cert-manager
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ cert-manager-6dc66985d4-6lvtt 1/1 Running 0 26s
+ cert-manager-cainjector-c7d4dbdd9-xlrpn 1/1 Running 0 26s
+ cert-manager-webhook-847d7676c9-ckcz2 1/1 Running 0 26s
+ ```
+
+1. Change the working directory to `${HOME}/scalardb-cluster-test/`.
+
+ ```console
+ cd ${HOME}/scalardb-cluster-test/
+ ```
+
+1. Create a custom values file for the private CA (`private-ca-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/private-ca-custom-values.yaml
+ apiVersion: cert-manager.io/v1
+ kind: Issuer
+ metadata:
+ name: self-signed-issuer
+ spec:
+ selfSigned: {}
+ ---
+ apiVersion: cert-manager.io/v1
+ kind: Certificate
+ metadata:
+ name: self-signed-ca-cert
+ spec:
+ isCA: true
+ commonName: self-signed-ca
+ secretName: self-signed-ca-cert-secret
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: self-signed-issuer
+ kind: Issuer
+ group: cert-manager.io
+ ---
+ apiVersion: cert-manager.io/v1
+ kind: Issuer
+ metadata:
+ name: self-signed-ca
+ spec:
+ ca:
+ secretName: self-signed-ca-cert-secret
+ EOF
+ ```
+
+1. Deploy a self-signed CA.
+
+ ```console
+ kubectl apply -f ./private-ca-custom-values.yaml
+ ```
+
+1. Check if the issuer resources are `True`.
+
+ ```console
+ kubectl get issuer
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY AGE
+ self-signed-ca True 6s
+ self-signed-issuer True 6s
+ ```
+
+## Step 5. Deploy ScalarDB Cluster on the Kubernetes cluster by using Helm Charts
+
+1. Add the Scalar Helm Charts repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Set your license key and certificate as environment variables. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact). For details about the value of ``, see [How to Configure a Product License Key](https://scalardb.scalar-labs.com/docs/latest/scalar-licensing/).
+
+ ```console
+ SCALAR_DB_CLUSTER_LICENSE_KEY=''
+ SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM=''
+ ```
+
+1. Create a custom values file for ScalarDB Cluster (`scalardb-cluster-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/scalardb-cluster-custom-values.yaml
+ envoy:
+
+ enabled: true
+
+ tls:
+ downstream:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - envoy.scalar.example.com
+ upstream:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+
+ scalardbCluster:
+
+ image:
+ repository: "ghcr.io/scalar-labs/scalardb-cluster-node-byol-premium"
+
+ scalardbClusterNodeProperties: |
+ ### Necessary configurations for deployment on Kuberetes
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+
+ ### Storage configurations
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb-cluster.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DB_CLUSTER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_CLUSTER_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+
+ ### SQL configurations
+ scalar.db.sql.enabled=true
+
+ ### Auth configurations
+ scalar.db.cluster.auth.enabled=true
+ scalar.db.cross_partition_scan.enabled=true
+
+ ### TLS configurations
+ scalar.db.cluster.tls.enabled=true
+ scalar.db.cluster.tls.ca_root_cert_path=/tls/scalardb-cluster/certs/ca.crt
+ scalar.db.cluster.node.tls.cert_chain_path=/tls/scalardb-cluster/certs/tls.crt
+ scalar.db.cluster.node.tls.private_key_path=/tls/scalardb-cluster/certs/tls.key
+ scalar.db.cluster.tls.override_authority=cluster.scalardb.example.com
+
+ ### License key configurations
+ scalar.db.cluster.node.licensing.license_key=${env:SCALAR_DB_CLUSTER_LICENSE_KEY}
+ scalar.db.cluster.node.licensing.license_check_cert_pem=${env:SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - cluster.scalardb.example.com
+
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `scalardb-credentials-secret` that includes credentials and license keys.
+
+ ```console
+ kubectl create secret generic scalardb-credentials-secret \
+ --from-literal=SCALAR_DB_CLUSTER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DB_CLUSTER_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DB_CLUSTER_LICENSE_KEY="${SCALAR_DB_CLUSTER_LICENSE_KEY}" \
+ --from-file=SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Set the chart version of ScalarDB Cluster.
+
+ ```console
+ SCALAR_DB_CLUSTER_VERSION=3.12.2
+ SCALAR_DB_CLUSTER_CHART_VERSION=$(helm search repo scalar-labs/scalardb-cluster -l | grep -F "${SCALAR_DB_CLUSTER_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDB Cluster.
+
+ ```console
+ helm install scalardb-cluster scalar-labs/scalardb-cluster -f ${HOME}/scalardb-cluster-test/scalardb-cluster-custom-values.yaml --version ${SCALAR_DB_CLUSTER_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDB Cluster pods are deployed.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-cluster-0 1/1 Running 0 4m30s
+ scalardb-cluster-envoy-7cc948dfb-4rb8l 1/1 Running 0 18s
+ scalardb-cluster-envoy-7cc948dfb-hwt96 1/1 Running 0 18s
+ scalardb-cluster-envoy-7cc948dfb-rzbrx 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-445kj 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-4z54q 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-vcv96 1/1 Running 0 18s
+ ```
+
+ If the ScalarDB Cluster pods are deployed properly, the `STATUS` column for those pods will be displayed as `Running`.
+
+1. Check if the ScalarDB Cluster services are deployed.
+
+ ```console
+ kubectl get svc -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 7h34m
+ postgresql-scalardb-cluster ClusterIP 10.96.92.27 5432/TCP 4m52s
+ postgresql-scalardb-cluster-hl ClusterIP None 5432/TCP 4m52s
+ scalardb-cluster-envoy ClusterIP 10.96.250.175 60053/TCP 40s
+ scalardb-cluster-envoy-metrics ClusterIP 10.96.40.197 9001/TCP 40s
+ scalardb-cluster-headless ClusterIP None 60053/TCP 40s
+ scalardb-cluster-metrics ClusterIP 10.96.199.135 9080/TCP 40s
+ ```
+
+ If the ScalarDB Cluster services are deployed properly, you can see private IP addresses in the `CLUSTER-IP` column.
+
+:::note
+
+The `CLUSTER-IP` values for `postgresql-scalardb-cluster-hl` and `scalardb-cluster-headless` are `None` since they have no IP addresses.
+
+:::
+
+## Step 6. Start a client container
+
+You'll use the CA certificate file in a client container. Therefore, you'll need to create a secret resource and mount it to the client container.
+
+1. Create a secret resource named `client-ca-cert`.
+
+ ```console
+ kubectl create secret generic client-ca-cert --from-file=ca.crt=<(kubectl get secret self-signed-ca-cert-secret -o "jsonpath={.data['ca\.crt']}" | base64 -d) -n default
+ ```
+
+1. Create a manifest file for a client pod (`scalardb-cluster-client-pod.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardb-cluster-client"
+ spec:
+ containers:
+ - name: scalardb-cluster-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ env:
+ - name: SCALAR_DB_CLUSTER_VERSION
+ value: SCALAR_DB_CLUSTER_CLIENT_POD_SCALAR_DB_CLUSTER_VERSION
+ volumeMounts:
+ - name: "client-ca-cert"
+ mountPath: "/certs/"
+ readOnly: true
+ volumes:
+ - name: "client-ca-cert"
+ secret:
+ secretName: "client-ca-cert"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Set the ScalarDB Cluster version in the manifest file.
+
+ ```console
+ sed -i s/SCALAR_DB_CLUSTER_CLIENT_POD_SCALAR_DB_CLUSTER_VERSION/${SCALAR_DB_CLUSTER_VERSION}/ ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml
+ ```
+
+1. Deploy the client pod.
+
+ ```console
+ kubectl apply -f ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml -n default
+ ```
+
+1. Check if the client container is running.
+
+ ```console
+ kubectl get pod scalardb-cluster-client -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardb-cluster-client 1/1 Running 0 26s
+ ```
+
+## Step 7. Run the ScalarDB Cluster SQL CLI in the client container
+
+1. Run bash in the client container.
+
+ ```console
+ kubectl exec -it scalardb-cluster-client -n default -- bash
+ ```
+
+ The commands in the following steps must be run in the client container.
+
+1. Download the ScalarDB Cluster SQL CLI from [Releases](https://github.com/scalar-labs/scalardb/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v${SCALAR_DB_CLUSTER_VERSION}/scalardb-cluster-sql-cli-${SCALAR_DB_CLUSTER_VERSION}-all.jar
+ ```
+
+1. Create a `database.properties` file and add configurations.
+
+ ```console
+ cat << 'EOF' > /database.properties
+ # ScalarDB Cluster configurations
+ scalar.db.sql.connection_mode=cluster
+ scalar.db.sql.cluster_mode.contact_points=indirect:scalardb-cluster-envoy.default.svc.cluster.local
+
+ # Auth configurations
+ scalar.db.cluster.auth.enabled=true
+ scalar.db.sql.cluster_mode.username=admin
+ scalar.db.sql.cluster_mode.password=admin
+
+ # TLS configurations
+ scalar.db.cluster.tls.enabled=true
+ scalar.db.cluster.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.db.cluster.tls.override_authority=envoy.scalar.example.com
+ EOF
+ ```
+
+1. Run the ScalarDB Cluster SQL CLI.
+
+ ```console
+ java -jar /scalardb-cluster-sql-cli-${SCALAR_DB_CLUSTER_VERSION}-all.jar --config /database.properties
+ ```
+
+1. Create a sample namespace named `ns`.
+
+ ```sql
+ CREATE NAMESPACE ns;
+ ```
+
+1. Create a sample table named `tbl` under the namespace `ns`.
+
+ ```sql
+ CREATE TABLE ns.tbl (a INT, b INT, c INT, PRIMARY KEY(a, b));
+ ```
+
+1. Insert sample records.
+
+ ```sql
+ INSERT INTO ns.tbl VALUES (1,2,3), (4,5,6), (7,8,9);
+ ```
+
+1. Select the sample records that you inserted.
+
+ ```sql
+ SELECT * FROM ns.tbl;
+ ```
+
+ [Command execution result]
+
+ ```sql
+ 0: scalardb> SELECT * FROM ns.tbl;
+ +---+---+---+
+ | a | b | c |
+ +---+---+---+
+ | 7 | 8 | 9 |
+ | 1 | 2 | 3 |
+ | 4 | 5 | 6 |
+ +---+---+---+
+ 3 rows selected (0.059 seconds)
+ ```
+
+1. Press `Ctrl + D` to exit from ScalarDB Cluster SQL CLI.
+
+ ```console
+ ^D
+ ```
+
+1. Exit from the client container.
+
+ ```console
+ exit
+ ```
+
+## Step 8. Delete all resources
+
+After completing the ScalarDB Cluster tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDB Cluster and PostgreSQL.
+
+ ```console
+ helm uninstall -n default scalardb-cluster postgresql-scalardb-cluster
+ ```
+
+1. Remove the self-signed CA.
+
+ ```
+ kubectl delete -f ./private-ca-custom-values.yaml
+ ```
+
+1. Uninstall cert-manager.
+
+ ```console
+ helm uninstall -n cert-manager cert-manager
+ ```
+
+1. Remove the client container.
+
+ ```
+ kubectl delete pod scalardb-cluster-client --grace-period 0 -n default
+ ```
+
+1. Remove the secret resources.
+
+ ```
+ kubectl delete secrets scalardb-credentials-secret self-signed-ca-cert-secret scalardb-cluster-envoy-tls-cert scalardb-cluster-tls-cert client-ca-cert
+ ```
+
+1. Remove the namespace `cert-manager`.
+
+ ```
+ kubectl delete ns cert-manager
+ ```
+
+1. Remove the working directory and the sample configuration files.
+
+ ```console
+ cd ${HOME}
+ ```
+
+ ```console
+ rm -rf ${HOME}/scalardb-cluster-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following tutorials:
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls.mdx
new file mode 100644
index 00000000..9c14843c
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb-cluster-tls.mdx
@@ -0,0 +1,633 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Getting Started with Helm Charts (ScalarDB Cluster with TLS)
+
+This tutorial explains how to get started with ScalarDB Cluster with TLS configurations by using Helm Charts on a Kubernetes cluster in a test environment. Before starting, you should already have a Mac or Linux environment for testing. In addition, although this tutorial mentions using **minikube**, the steps described should work in any Kubernetes cluster.
+
+## Requirements
+
+* You need to have a license key (trial license or commercial license) for ScalarDB Cluster. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact).
+* You need to use ScalarDB Cluster 3.12 or later, which supports TLS.
+
+## What you'll create
+
+In this tutorial, you'll deploy the following components on a Kubernetes cluster in the following way:
+
+```
++----------------------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +------------------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDB Cluster node | ---+ |
+| [Pod] | +-------+ | | +------------------------+ | |
+| | | | | |
+| +-----------+ +---------+ | +-------+ | +--------------------+ | +------------------------+ | +---------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Cluster node | ---+---> | PostgreSQL | |
+| | (SQL CLI) | | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | +------------------------+ | | (For Ledger) | |
+| +-----------+ +---------+ | | +--------------------+ | | +---------------+ |
+| | +-------+ | | +------------------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDB Cluster node | ---+ |
+| +-------+ +------------------------+ |
+| |
++----------------------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+You'll also create the following private key and certificate files for TLS connections.
+
+```
+ +-------------------------------+
+ +---> | For Scalar Envoy |
+ | +-------------------------------+
+ | | envoy-key.pem |
+ | | envoy.pem |
++----------------------+ | +-------------------------------+
+| Self-signed CA | ---(Sign certificates)---+
++----------------------+ | +-------------------------------+
+| ca-key.pem | +---> | For ScalarDB Cluster |
+| ca.pem | +-------------------------------+
++----------------------+ | scalardb-cluster-key.pem |
+ | scalardb-cluster.pem |
+ +-------------------------------+
+```
+
+You'll set each private key and certificate file as follows to enable TLS in each connection.
+
+```
++--------------------------------+ +-----------------------------------------+ +-----------------------------------------+
+| Client | ---(CRUD/SQL requests)---> | Envoy for ScalarDB Cluster | ---> | ScalarDB Cluster nodes |
++--------------------------------+ +-----------------------------------------+ +-----------------------------------------+
+| ca.pem (to verify envoy.pem) | | envoy-key.pem | | scalardb-cluster-key.pem |
++--------------------------------+ | envoy.pem | | scalardb-cluster.pem |
+ | ca.pem (to verify scalardb-cluster.pem) | | ca.pem (used for health check) |
+ +-----------------------------------------+ +-----------------------------------------+
+```
+
+The following connections exist amongst the ScalarDB Cluster–related components:
+
+* **`Client - Envoy for ScalarDB Cluster`:** When you execute a CRUD API or SQL API function, the client accesses Envoy for ScalarDB Cluster.
+* **`Envoy for ScalarDB Cluster - ScalarDB Cluster`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDB Cluster.
+* **`ScalarDB Cluster node - ScalarDB Cluster node`:** A ScalarDB Cluster node accesses other ScalarDB Cluster nodes. In other words, the cluster's internal communications exist amongst all ScalarDB Cluster nodes.
+
+## Step 1. Start a Kubernetes cluster and install tools
+
+You need to prepare a Kubernetes cluster and install some tools (`kubectl`, `helm`, `cfssl`, and `cfssljson`). For more details on how to install them, see [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx).
+
+## Step 2. Start the PostgreSQL containers
+
+ScalarDB Cluster must use some type of database system as a backend database. In this tutorial, you'll use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for ScalarDB Cluster.
+
+ ```console
+ helm install postgresql-scalardb-cluster bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Check if the PostgreSQL containers are running.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-cluster-0 1/1 Running 0 34s
+ ```
+
+## Step 3. Create a working directory
+
+You'll create some configuration files and private key and certificate files locally. Be sure to create a working directory for those files.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ${HOME}/scalardb-cluster-test/certs/
+ ```
+
+## Step 4. Create private key and certificate files
+
+You'll create private key and a certificate files.
+
+1. Change the working directory to `${HOME}/scalardb-cluster-test/certs/`.
+
+ ```console
+ cd ${HOME}/scalardb-cluster-test/certs/
+ ```
+
+1. Create a JSON file that includes CA information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/certs/ca.json
+ {
+ "CN": "scalar-test-ca",
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "Scalar Test CA"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create the CA private key and certificate files.
+
+ ```console
+ cfssl gencert -initca ca.json | cfssljson -bare ca
+ ```
+
+1. Create a JSON file that includes CA configurations.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/certs/ca-config.json
+ {
+ "signing": {
+ "default": {
+ "expiry": "87600h"
+ },
+ "profiles": {
+ "scalar-test-ca": {
+ "expiry": "87600h",
+ "usages": [
+ "signing",
+ "key encipherment",
+ "server auth"
+ ]
+ }
+ }
+ }
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Envoy information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/certs/envoy.json
+ {
+ "CN": "scalar-envoy",
+ "hosts": [
+ "envoy.scalar.example.com",
+ "localhost"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "Scalar Envoy Test"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes ScalarDB Cluster information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/certs/scalardb-cluster.json
+ {
+ "CN": "scalardb-cluster",
+ "hosts": [
+ "cluster.scalardb.example.com",
+ "localhost"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "ScalarDB Cluster Test"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create private key and certificate files for Envoy.
+
+ ```console
+ cfssl gencert -ca ca.pem -ca-key ca-key.pem -config ca-config.json -profile scalar-test-ca envoy.json | cfssljson -bare envoy
+ ```
+
+1. Create private key and certificate files for ScalarDB Cluster.
+
+ ```console
+ cfssl gencert -ca ca.pem -ca-key ca-key.pem -config ca-config.json -profile scalar-test-ca scalardb-cluster.json | cfssljson -bare scalardb-cluster
+ ```
+
+1. Confirm that the private key and certificate files were created.
+
+ ```console
+ ls -1
+ ```
+
+ [Command execution result]
+
+ ```console
+ ca-config.json
+ ca-key.pem
+ ca.csr
+ ca.json
+ ca.pem
+ envoy-key.pem
+ envoy.csr
+ envoy.json
+ envoy.pem
+ scalardb-cluster-key.pem
+ scalardb-cluster.csr
+ scalardb-cluster.json
+ scalardb-cluster.pem
+ ```
+
+## Step 5. Deploy ScalarDB Cluster on the Kubernetes cluster by using Helm Charts
+
+1. Add the Scalar Helm Charts repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Set your license key and certificate as environment variables. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact). For details about the value of ``, see [How to Configure a Product License Key](https://scalardb.scalar-labs.com/docs/latest/scalar-licensing/).
+
+ ```console
+ SCALAR_DB_CLUSTER_LICENSE_KEY=''
+ SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM=''
+ ```
+
+1. Create a custom values file for ScalarDB Cluster (`scalardb-cluster-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/scalardb-cluster-custom-values.yaml
+ envoy:
+
+ enabled: true
+
+ tls:
+ downstream:
+ enabled: true
+ certChainSecret: "envoy-tls-cert"
+ privateKeySecret: "envoy-tls-key"
+ upstream:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+ caRootCertSecret: "scalardb-cluster-tls-ca"
+
+ scalardbCluster:
+
+ image:
+ repository: "ghcr.io/scalar-labs/scalardb-cluster-node-byol-premium"
+
+ scalardbClusterNodeProperties: |
+ ### Necessary configurations for deployment on Kuberetes
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+
+ ### Storage configurations
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb-cluster.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DB_CLUSTER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_CLUSTER_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+
+ ### SQL configurations
+ scalar.db.sql.enabled=true
+
+ ### Auth configurations
+ scalar.db.cluster.auth.enabled=true
+ scalar.db.cross_partition_scan.enabled=true
+
+ ### TLS configurations
+ scalar.db.cluster.tls.enabled=true
+ scalar.db.cluster.tls.ca_root_cert_path=/tls/scalardb-cluster/certs/ca.crt
+ scalar.db.cluster.node.tls.cert_chain_path=/tls/scalardb-cluster/certs/tls.crt
+ scalar.db.cluster.node.tls.private_key_path=/tls/scalardb-cluster/certs/tls.key
+ scalar.db.cluster.tls.override_authority=cluster.scalardb.example.com
+
+ ### License key configurations
+ scalar.db.cluster.node.licensing.license_key=${env:SCALAR_DB_CLUSTER_LICENSE_KEY}
+ scalar.db.cluster.node.licensing.license_check_cert_pem=${env:SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "cluster.scalardb.example.com"
+ caRootCertSecret: "scalardb-cluster-tls-ca"
+ certChainSecret: "scalardb-cluster-tls-cert"
+ privateKeySecret: "scalardb-cluster-tls-key"
+
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `scalardb-credentials-secret` that includes credentials and license keys.
+
+ ```console
+ kubectl create secret generic scalardb-credentials-secret \
+ --from-literal=SCALAR_DB_CLUSTER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DB_CLUSTER_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DB_CLUSTER_LICENSE_KEY="${SCALAR_DB_CLUSTER_LICENSE_KEY}" \
+ --from-file=SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DB_CLUSTER_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Create secret resources that include the private key and certificate files for Envoy.
+
+ ```console
+ kubectl create secret generic envoy-tls-cert --from-file=tls.crt=${HOME}/scalardb-cluster-test/certs/envoy.pem -n default
+ kubectl create secret generic envoy-tls-key --from-file=tls.key=${HOME}/scalardb-cluster-test/certs/envoy-key.pem -n default
+ ```
+
+1. Create secret resources that include the key, certificate, and CA certificate files for ScalarDB Cluster.
+
+ ```console
+ kubectl create secret generic scalardb-cluster-tls-ca --from-file=ca.crt=${HOME}/scalardb-cluster-test/certs/ca.pem -n default
+ kubectl create secret generic scalardb-cluster-tls-cert --from-file=tls.crt=${HOME}/scalardb-cluster-test/certs/scalardb-cluster.pem -n default
+ kubectl create secret generic scalardb-cluster-tls-key --from-file=tls.key=${HOME}/scalardb-cluster-test/certs/scalardb-cluster-key.pem -n default
+ ```
+
+1. Set the chart version of ScalarDB Cluster.
+
+ ```console
+ SCALAR_DB_CLUSTER_VERSION=3.12.2
+ SCALAR_DB_CLUSTER_CHART_VERSION=$(helm search repo scalar-labs/scalardb-cluster -l | grep -F "${SCALAR_DB_CLUSTER_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDB Cluster.
+
+ ```console
+ helm install scalardb-cluster scalar-labs/scalardb-cluster -f ${HOME}/scalardb-cluster-test/scalardb-cluster-custom-values.yaml --version ${SCALAR_DB_CLUSTER_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDB Cluster pods are deployed.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-cluster-0 1/1 Running 0 4m30s
+ scalardb-cluster-envoy-7cc948dfb-4rb8l 1/1 Running 0 18s
+ scalardb-cluster-envoy-7cc948dfb-hwt96 1/1 Running 0 18s
+ scalardb-cluster-envoy-7cc948dfb-rzbrx 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-445kj 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-4z54q 1/1 Running 0 18s
+ scalardb-cluster-node-7c6959c79d-vcv96 1/1 Running 0 18s
+ ```
+ If the ScalarDB Cluster pods are deployed properly, the `STATUS` column for those pods will be displayed as `Running`.
+
+1. Check if the ScalarDB Cluster services are deployed.
+
+ ```console
+ kubectl get svc -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 7h34m
+ postgresql-scalardb-cluster ClusterIP 10.96.92.27 5432/TCP 4m52s
+ postgresql-scalardb-cluster-hl ClusterIP None 5432/TCP 4m52s
+ scalardb-cluster-envoy ClusterIP 10.96.250.175 60053/TCP 40s
+ scalardb-cluster-envoy-metrics ClusterIP 10.96.40.197 9001/TCP 40s
+ scalardb-cluster-headless ClusterIP None 60053/TCP 40s
+ scalardb-cluster-metrics ClusterIP 10.96.199.135 9080/TCP 40s
+ ```
+
+ If the ScalarDB Cluster services are deployed properly, you can see private IP addresses in the `CLUSTER-IP` column.
+
+:::note
+
+The `CLUSTER-IP` values for `postgresql-scalardb-cluster-hl` and `scalardb-cluster-headless` are `None` since they have no IP addresses.
+
+:::
+
+## Step 6. Start a client container
+
+You'll use the CA certificate file in a client container. Therefore, you'll need to create a secret resource and mount it to the client container.
+
+1. Create a secret resource named `client-ca-cert`.
+
+ ```console
+ kubectl create secret generic client-ca-cert --from-file=ca.crt=${HOME}/scalardb-cluster-test/certs/ca.pem -n default
+ ```
+
+1. Create a manifest file for a client pod (`scalardb-cluster-client-pod.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardb-cluster-client"
+ spec:
+ containers:
+ - name: scalardb-cluster-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ env:
+ - name: SCALAR_DB_CLUSTER_VERSION
+ value: SCALAR_DB_CLUSTER_CLIENT_POD_SCALAR_DB_CLUSTER_VERSION
+ volumeMounts:
+ - name: "client-ca-cert"
+ mountPath: "/certs/"
+ readOnly: true
+ volumes:
+ - name: "client-ca-cert"
+ secret:
+ secretName: "client-ca-cert"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Set the ScalarDB Cluster version in the manifest file.
+
+ ```console
+ sed -i s/SCALAR_DB_CLUSTER_CLIENT_POD_SCALAR_DB_CLUSTER_VERSION/${SCALAR_DB_CLUSTER_VERSION}/ ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml
+ ```
+
+1. Deploy the client pod.
+
+ ```console
+ kubectl apply -f ${HOME}/scalardb-cluster-test/scalardb-cluster-client-pod.yaml -n default
+ ```
+
+1. Check if the client container is running.
+
+ ```console
+ kubectl get pod scalardb-cluster-client -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardb-cluster-client 1/1 Running 0 26s
+ ```
+
+## Step 7. Run the ScalarDB Cluster SQL CLI in the client container
+
+1. Run bash in the client container.
+
+ ```console
+ kubectl exec -it scalardb-cluster-client -n default -- bash
+ ```
+ The commands in the following steps must be run in the client container.
+
+1. Download the ScalarDB Cluster SQL CLI from [Releases](https://github.com/scalar-labs/scalardb/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v${SCALAR_DB_CLUSTER_VERSION}/scalardb-cluster-sql-cli-${SCALAR_DB_CLUSTER_VERSION}-all.jar
+ ```
+
+1. Create a `database.properties` file and add configurations.
+
+ ```console
+ cat << 'EOF' > /database.properties
+ # ScalarDB Cluster configurations
+ scalar.db.sql.connection_mode=cluster
+ scalar.db.sql.cluster_mode.contact_points=indirect:scalardb-cluster-envoy.default.svc.cluster.local
+
+ # Auth configurations
+ scalar.db.cluster.auth.enabled=true
+ scalar.db.sql.cluster_mode.username=admin
+ scalar.db.sql.cluster_mode.password=admin
+
+ # TLS configurations
+ scalar.db.cluster.tls.enabled=true
+ scalar.db.cluster.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.db.cluster.tls.override_authority=envoy.scalar.example.com
+ EOF
+ ```
+
+1. Run the ScalarDB Cluster SQL CLI.
+
+ ```console
+ java -jar /scalardb-cluster-sql-cli-${SCALAR_DB_CLUSTER_VERSION}-all.jar --config /database.properties
+ ```
+
+1. Create a sample namespace named `ns`.
+
+ ```sql
+ CREATE NAMESPACE ns;
+ ```
+
+1. Create a sample table named `tbl` under the namespace `ns`.
+
+ ```sql
+ CREATE TABLE ns.tbl (a INT, b INT, c INT, PRIMARY KEY(a, b));
+ ```
+
+1. Insert sample records.
+
+ ```sql
+ INSERT INTO ns.tbl VALUES (1,2,3), (4,5,6), (7,8,9);
+ ```
+
+1. Select the sample records that you inserted.
+
+ ```sql
+ SELECT * FROM ns.tbl;
+ ```
+
+ [Command execution result]
+
+ ```sql
+ 0: scalardb> SELECT * FROM ns.tbl;
+ +---+---+---+
+ | a | b | c |
+ +---+---+---+
+ | 7 | 8 | 9 |
+ | 1 | 2 | 3 |
+ | 4 | 5 | 6 |
+ +---+---+---+
+ 3 rows selected (0.059 seconds)
+ ```
+
+1. Press `Ctrl + D` to exit from ScalarDB Cluster SQL CLI.
+
+ ```console
+ ^D
+ ```
+
+1. Exit from the client container.
+
+ ```console
+ exit
+ ```
+
+## Step 8. Delete all resources
+
+After completing the ScalarDB Cluster tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDB Cluster and PostgreSQL.
+
+ ```console
+ helm uninstall -n default scalardb-cluster postgresql-scalardb-cluster
+ ```
+
+1. Remove the client container.
+
+ ```
+ kubectl delete pod scalardb-cluster-client --grace-period 0 -n default
+ ```
+
+1. Remove the secret resources.
+
+ ```
+ kubectl delete secrets scalardb-credentials-secret scalardb-cluster-tls-key scalardb-cluster-tls-cert scalardb-cluster-tls-ca envoy-tls-key envoy-tls-cert client-ca-cert
+ ```
+
+1. Remove the working directory and sample files (configuration file, private key, and certificate).
+
+ ```console
+ cd ${HOME}
+ ```
+
+ ```console
+ rm -rf ${HOME}/scalardb-cluster-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following tutorials:
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardb.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb.mdx
new file mode 100644
index 00000000..d9402c64
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardb.mdx
@@ -0,0 +1,382 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+ - Deprecated
+---
+
+# [Deprecated] Getting Started with Helm Charts (ScalarDB Server)
+
+:::note
+
+ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart) instead.
+
+:::
+
+This document explains how to get started with ScalarDB Server using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+* You need to subscribe to ScalarDB in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get container images (`scalardb-server` and `scalardb-envoy`). Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| |
+| [Pod] [Pod] [Pod] [Pod] |
+| |
+| +-------+ +-----------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ |
+| | +-------+ | | +-----------------+ | |
+| | | | | |
+| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Server | ---+---> | PostgreSQL | |
+| +--------+ | (Envoy) | | +-------+ | | (ScalarDB Server) | | +-----------------+ | +------------+ |
+| +---------+ | | +-------------------+ | | |
+| | +-------+ | | +-----------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ |
+| +-------+ +-----------------+ |
+| |
++--------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start a PostgreSQL container
+
+ScalarDB uses some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL.
+ ```console
+ helm install postgresql-scalardb bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL container is running.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-0 1/1 Running 0 2m42s
+ ```
+
+## Step 3. Deploy ScalarDB Server on the Kubernetes cluster using Helm Charts
+
+1. Add the Scalar helm repository.
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDB container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+ * Azure Marketplace
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+1. Create a custom values file for ScalarDB Server (scalardb-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > scalardb-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ scalardb:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-server"
+ tag: "3.7.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }}
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > scalardb-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardb-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ scalardb:
+ image:
+ repository: "/scalarinc/scalardb-server"
+ tag: "3.7.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }}
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+
+1. Create a Secret resource that includes a username and password for PostgreSQL.
+ ```console
+ kubectl create secret generic scalardb-credentials-secret \
+ --from-literal=SCALAR_DB_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DB_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy ScalarDB Server.
+ ```console
+ helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+
+1. Check if the ScalarDB Server pods are deployed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-0 1/1 Running 0 9m48s
+ scalardb-765598848b-75csp 1/1 Running 0 6s
+ scalardb-765598848b-w864f 1/1 Running 0 6s
+ scalardb-765598848b-x8rqj 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-kpz2p 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-n74tk 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-zbrwz 1/1 Running 0 6s
+ ```
+ If the ScalarDB Server Pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDB Server services are deployed.
+ ```console
+ kubectl get svc
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-scalardb ClusterIP 10.109.118.122 5432/TCP 10m
+ postgresql-scalardb-hl ClusterIP None 5432/TCP 10m
+ scalardb-envoy ClusterIP 10.110.110.250 60051/TCP 41s
+ scalardb-envoy-metrics ClusterIP 10.107.98.227 9001/TCP 41s
+ scalardb-headless ClusterIP None 60051/TCP 41s
+ scalardb-metrics ClusterIP 10.108.188.10 8080/TCP 41s
+ ```
+ If the ScalarDB Server services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardb-headless` has no CLUSTER-IP.)
+
+## Step 4. Start a Client container
+
+1. Start a Client container on the Kubernetes cluster.
+ ```console
+ kubectl run scalardb-client --image eclipse-temurin:8 --command sleep inf
+ ```
+
+1. Check if the Client container is running.
+ ```console
+ kubectl get pod scalardb-client
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardb-client 1/1 Running 0 23s
+ ```
+
+## Step 5. Run ScalarDB sample applications in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDB, please refer to the [Getting Started with ScalarDB](https://scalardb.scalar-labs.com/docs/latest/getting-started-with-scalardb).
+
+1. Run bash in the Client container.
+ ```console
+ kubectl exec -it scalardb-client -- bash
+ ```
+ After this step, run each command in the Client container.
+
+1. Install the git and curl commands in the Client container.
+ ```console
+ apt update && apt install -y git curl
+ ```
+
+1. Clone ScalarDB git repository.
+ ```console
+ git clone https://github.com/scalar-labs/scalardb.git
+ ```
+
+1. Change the directory to `scalardb/`.
+ ```console
+ cd scalardb/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+ /scalardb
+ ```
+
+1. Change branch to arbitrary version.
+ ```console
+ git checkout -b v3.7.0 refs/tags/v3.7.0
+ ```
+ ```console
+ git branch
+ ```
+ [Command execution result]
+
+ ```console
+ master
+ * v3.7.0
+ ```
+
+ If you want to use another version, please specify the version (tag) you want to use.
+
+1. Change the directory to `docs/getting-started/`.
+ ```console
+ cd docs/getting-started/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+ /scalardb/docs/getting-started
+ ```
+
+1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases).
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v3.7.0/scalardb-schema-loader-3.7.0.jar
+ ```
+ You need to use the same version of ScalarDB and Schema Loader.
+
+1. Create a configuration file (scalardb.properties) to access ScalarDB Server on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' > scalardb.properties
+ scalar.db.contact_points=scalardb-envoy.default.svc.cluster.local
+ scalar.db.contact_port=60051
+ scalar.db.storage=grpc
+ scalar.db.transaction_manager=grpc
+ EOF
+ ```
+
+1. Create a JSON file (emoney-transaction.json) that defines DB Schema for the sample applications.
+ ```console
+ cat << 'EOF' > emoney-transaction.json
+ {
+ "emoney.account": {
+ "transaction": true,
+ "partition-key": [
+ "id"
+ ],
+ "clustering-key": [],
+ "columns": {
+ "id": "TEXT",
+ "balance": "INT"
+ }
+ }
+ }
+ EOF
+ ```
+
+1. Run Schema Loader (Create sample TABLE).
+ ```console
+ java -jar ./scalardb-schema-loader-3.7.0.jar --config ./scalardb.properties -f emoney-transaction.json --coordinator
+ ```
+
+1. Run the sample applications.
+ * Charge `1000` to `user1`:
+ ```console
+ ./gradlew run --args="-action charge -amount 1000 -to user1"
+ ```
+ * Charge `0` to `merchant1` (Just create an account for `merchant1`):
+ ```console
+ ./gradlew run --args="-action charge -amount 0 -to merchant1"
+ ```
+ * Pay `100` from `user1` to `merchant1`:
+ ```console
+ ./gradlew run --args="-action pay -amount 100 -from user1 -to merchant1"
+ ```
+ * Get the balance of `user1`:
+ ```console
+ ./gradlew run --args="-action getBalance -id user1"
+ ```
+ * Get the balance of `merchant1`:
+ ```console
+ ./gradlew run --args="-action getBalance -id merchant1"
+ ```
+
+1. (Optional) You can see the inserted and modified (INSERT/UPDATE) data through the sample applications using the following command. (This command needs to run on your localhost, not on the Client container.)
+ ```console
+ kubectl exec -it postgresql-scalardb-0 -- bash -c 'export PGPASSWORD=postgres && psql -U postgres -d postgres -c "SELECT * FROM emoney.account"'
+ ```
+ [Command execution result]
+ ```sql
+ id | balance | tx_id | tx_state | tx_version | tx_prepared_at | tx_committed_at | before_tx_id | before_tx_state | before_tx_version | before_tx_prepared_at | before_tx_committed_at | before_balance
+ -----------+---------+--------------------------------------+----------+------------+----------------+-----------------+--------------------------------------+-----------------+-------------------+-----------------------+------------------------+----------------
+ merchant1 | 100 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 3633df99-a8ed-4301-a8b9-db1344807d7b | 3 | 1 | 1667361902466 | 1667361902485 | 0
+ user1 | 900 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 5520cba4-625a-4886-b81f-6089bf846d18 | 3 | 1 | 1667361897283 | 1667361897317 | 1000
+ (2 rows)
+ ```
+ * Note:
+ * Usually, you need to access data (records) through ScalarDB. The above command is used to explain and confirm the working of the sample applications.
+
+## Step 6. Delete all resources
+
+After completing the ScalarDB Server tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDB Server and PostgreSQL.
+ ```console
+ helm uninstall scalardb postgresql-scalardb
+ ```
+
+1. Remove the Client container.
+ ```
+ kubectl delete pod scalardb-client --force --grace-period 0
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls-cert-manager.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls-cert-manager.mdx
new file mode 100644
index 00000000..4689d15e
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls-cert-manager.mdx
@@ -0,0 +1,948 @@
+---
+---
+
+# Getting Started with Helm Charts (ScalarDL Ledger and Auditor with TLS by Using cert-manager / Auditor Mode)
+
+This tutorial explains how to get started with ScalarDL Ledger and ScalarDL Auditor with TLS configurations by using Helm Charts and cert-manager on a Kubernetes cluster as a test environment. Before starting, you should already have a Mac or Linux environment for testing. In addition, although this tutorial mentions using **minikube**, the steps described should work in any Kubernetes cluster.
+
+## Requirements
+
+* You need to have a license key (trial license or commercial license) for ScalarDL. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact).
+* You need to use ScalarDL 3.9 or later, which supports TLS.
+
+:::note
+
+To make Byzantine-fault detection with auditing work properly, ScalarDL Ledger and ScalarDL Auditor should be deployed and managed in different administrative domains. However, in this tutorial, we will deploy ScalarDL Ledger and ScalarDL Auditor in the same Kubernetes cluster to make the test easier.
+
+:::
+
+## What you'll create
+
+In this tutorial, you'll deploy the following components on a Kubernetes cluster in the following way:
+
+```
++-----------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +---------+ |
+| +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| | +-------+ | | +---------+ | |
+| | | | | |
+| +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Ledger | ---+---> | PostgreSQL | |
+| | | (Envoy) | | +-------+ | | (Ledger) | | +---------+ | | (For Ledger) | |
+| | +---------+ | | +-----------+ | | +---------------+ |
+| [Pod] | | +-------+ | | +---------+ | |
+| | +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | Client | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| | | +-------+ | | +---------+ | |
+| | | | | | |
+| | +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Auditor | ---+---> | PostgreSQL | |
+| | (Envoy) | | +-------+ | | (Auditor) | | +---------+ | | (For Auditor) | |
+| +---------+ | | +-----------+ | | +---------------+ |
+| | +-------+ | | +---------+ | |
+| +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| +-------+ +---------+ |
+| |
+| +--------------------------------------------------------------------------+ +---------------------+ |
+| | cert-manager (create private key and certificate for Envoy and ScalarDL) | | Issuer (Private CA) | |
+| +--------------------------------------------------------------------------+ +---------------------+ |
+| |
++-----------------------------------------------------------------------------------------------------------------------------+
+```
+
+cert-manager automatically creates the following private key and certificate files for TLS connections.
+
+```
+ +----------------------+
+ +---> | For Scalar Envoy |
+ | +----------------------+
+ | | tls.key |
+ | | tls.crt |
+ | +----------------------+
+ |
++-------------------------+ | +----------------------+
+| Issuer (Self-signed CA) | ---(Sign certificates)---+---> | For ScalarDL Ledger |
++-------------------------+ | +----------------------+
+| tls.key | | | tls.key |
+| tls.crt | | | tls.crt |
+| ca.crt | | +----------------------+
++-------------------------+ |
+ | +----------------------+
+ +---> | For ScalarDL Auditor |
+ +----------------------+
+ | tls.key |
+ | tls.crt |
+ +----------------------+
+```
+
+Scalar Helm Charts automatically mount each private key and certificate file for Envoy and ScalarDL as follows to enable TLS in each connection. You'll manually mount a root CA certificate file on the client.
+
+```
+ +------------------------------------------------+ +--------------------------------------+
+ +-------(Normal request)-----> | Envoy for ScalarDL Ledger | ---> | ScalarDL Ledger |
+ | +------------------------------------------------+ +--------------------------------------+
+ | +---(Recovery request)---> | tls.key | ---> | tls.key |
+ | | | tls.crt | | tls.crt |
+ | | | ca.crt (to verify tls.crt of ScalarDL Ledger) | | ca.crt (to check health) |
+ | | +------------------------------------------------+ +--------------------------------------+
++---------------------------------------+ | |
+| Client | ---+ |
++---------------------------------------+ | +------------------------------------------------------------------------------------------------------------------------------+
+| ca.crt (to verify tls.crt of Envoy) | | |
++---------------------------------------+ | |
+ | +------------------------------------------------+ +--------------------------------------+ |
+ +-------(Normal request)-----> | Envoy for ScalarDL Auditor | ---> | ScalarDL Auditor | ---+
+ +------------------------------------------------+ +--------------------------------------+
+ | tls.key | | tls.key |
+ | tls.crt | | tls.crt |
+ | ca.crt (to verify tls.crt of ScalarDL Auditor) | | ca.crt (to check health) |
+ +------------------------------------------------+ | ca.crt (to verify tls.crt of Envoy) |
+ +--------------------------------------+
+```
+
+The following connections exist amongst the ScalarDL-related components:
+
+* **`Client - Envoy for ScalarDL Ledger`:** When you execute a ScalarDL API function, the client accesses Envoy for ScalarDL Ledger.
+* **`Client - Envoy for ScalarDL Auditor`:** When you execute a ScalarDL API function, the client accesses Envoy for ScalarDL Auditor.
+* **`Envoy for ScalarDL Ledger - ScalarDL Ledger`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDL Ledger.
+* **`Envoy for ScalarDL Auditor - ScalarDL Auditor`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDL Auditor.
+* **`ScalarDL Auditor - Envoy for ScalarDL Ledger (ScalarDL Ledger)`:** When ScalarDL needs to run the recovery process to keep data consistent, ScalarDL Auditor runs the request against ScalarDL Ledger via Envoy.
+
+## Step 1. Start a Kubernetes cluster and install tools
+
+You need to prepare a Kubernetes cluster and install some tools (`kubectl`, `helm`, `cfssl`, and `cfssljson`). For more details on how to install them, see [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx).
+
+## Step 2. Start the PostgreSQL containers
+
+ScalarDL Ledger and ScalarDL Auditor must use some type of database system as a backend database. In this tutorial, you'll use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for Ledger.
+
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Deploy PostgreSQL for Auditor.
+
+ ```console
+ helm install postgresql-auditor bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Check if the PostgreSQL containers are running.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 11s
+ postgresql-ledger-0 1/1 Running 0 16s
+ ```
+
+## Step 3. Create a working directory
+
+You'll create some configuration files and private key and certificate files locally. Be sure to create a working directory for those files.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ${HOME}/scalardl-test/
+ ```
+
+## Step 4. Deploy cert-manager and issuer resource
+
+This tutorial uses cert-manager to issue and manage private keys and certificates. You can deploy cert-manager on the Kubernetes cluster as follows:
+
+1. Add the Jetstack helm repository.
+
+ ```console
+ helm repo add jetstack https://charts.jetstack.io
+ ```
+
+1. Deploy cert-manager.
+
+ ```console
+ helm install cert-manager jetstack/cert-manager \
+ --create-namespace \
+ --set installCRDs=true \
+ -n cert-manager
+ ```
+
+1. Check if the cert-manager containers are running.
+
+ ```console
+ kubectl get pod -n cert-manager
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ cert-manager-6dc66985d4-6lvtt 1/1 Running 0 26s
+ cert-manager-cainjector-c7d4dbdd9-xlrpn 1/1 Running 0 26s
+ cert-manager-webhook-847d7676c9-ckcz2 1/1 Running 0 26s
+ ```
+
+1. Change the working directory to `${HOME}/scalardl-test/`.
+
+ ```console
+ cd ${HOME}/scalardl-test/
+ ```
+
+1. Create a custom values file for private CA (`private-ca-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/private-ca-custom-values.yaml
+ apiVersion: cert-manager.io/v1
+ kind: Issuer
+ metadata:
+ name: self-signed-issuer
+ spec:
+ selfSigned: {}
+ ---
+ apiVersion: cert-manager.io/v1
+ kind: Certificate
+ metadata:
+ name: self-signed-ca-cert
+ spec:
+ isCA: true
+ commonName: self-signed-ca
+ secretName: self-signed-ca-cert-secret
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: self-signed-issuer
+ kind: Issuer
+ group: cert-manager.io
+ ---
+ apiVersion: cert-manager.io/v1
+ kind: Issuer
+ metadata:
+ name: self-signed-ca
+ spec:
+ ca:
+ secretName: self-signed-ca-cert-secret
+ EOF
+ ```
+
+1. Deploy self-signed CA.
+
+ ```console
+ kubectl apply -f ./private-ca-custom-values.yaml
+ ```
+
+1. Check if the issuer resources are `True`.
+
+ ```console
+ kubectl get issuer
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY AGE
+ self-signed-ca True 6s
+ self-signed-issuer True 6s
+ ```
+
+## Step 5. Create database schemas for ScalarDL Ledger and ScalarDL Auditor by using Helm Charts
+
+You'll deploy two ScalarDL Schema Loader pods on the Kubernetes cluster by using Helm Charts. The ScalarDL Schema Loader will create the database schemas for ScalarDL Ledger and Auditor in PostgreSQL.
+
+1. Add the Scalar Helm Charts repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a custom values file for ScalarDL Schema Loader for Ledger (`schema-loader-ledger-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_LEDGER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_LEDGER_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+ secretName: "schema-ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Schema Loader for Auditor (`schema-loader-auditor-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_AUDITOR_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_AUDITOR_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+ secretName: "schema-auditor-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `schema-ledger-credentials-secret` that includes a username and password for PostgreSQL for ScalarDL Ledger.
+
+ ```console
+ kubectl create secret generic schema-ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres \
+ -n default
+ ```
+
+1. Create a secret resource named `schema-auditor-credentials-secret` that includes a username and password for PostgreSQL for ScalarDL Auditor.
+
+ ```console
+ kubectl create secret generic schema-auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres \
+ -n default
+ ```
+
+1. Set the chart version of ScalarDL Schema Loader.
+
+ ```console
+ SCALAR_DL_VERSION=3.9.1
+ SCALAR_DL_SCHEMA_LOADER_CHART_VERSION=$(helm search repo scalar-labs/schema-loading -l | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDL Schema Loader for ScalarDL Ledger.
+
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ${HOME}/scalardl-test/schema-loader-ledger-custom-values.yaml --version ${SCALAR_DL_SCHEMA_LOADER_CHART_VERSION} -n default
+ ```
+
+1. Deploy ScalarDL Schema Loader for ScalarDL Auditor.
+
+ ```console
+ helm install schema-loader-auditor scalar-labs/schema-loading -f ${HOME}/scalardl-test/schema-loader-auditor-custom-values.yaml --version ${SCALAR_DL_SCHEMA_LOADER_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDL Schema Loader pods are deployed with the status `Completed`.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 2m56s
+ postgresql-ledger-0 1/1 Running 0 3m1s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 6s
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 10s
+ ```
+
+ If the status of the ScalarDL Schema Loader pods are **ContainerCreating** or **Running**, wait for the `STATUS` column for those pods to show as `Completed`.
+
+## Step 6. Deploy ScalarDL Ledger and ScalarDL Auditor on the Kubernetes cluster by using Helm Charts
+
+1. Set your license key and certificate as environment variables. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact). For details about the value of `` and ``, see [How to Configure a Product License Key](https://scalardl.scalar-labs.com/docs/latest/scalar-licensing/).
+
+ ```console
+ SCALAR_DL_LEDGER_LICENSE_KEY=''
+ SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM=''
+ SCALAR_DL_AUDITOR_LICENSE_KEY=''
+ SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM=''
+ ```
+
+1. Create a custom values file for ScalarDL Ledger (`scalardl-ledger-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+
+ tls:
+ downstream:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - envoy.scalar.example.com
+ upstream:
+ enabled: true
+ overrideAuthority: "ledger.scalardl.example.com"
+
+ ledger:
+
+ image:
+ repository: "ghcr.io/scalar-labs/scalardl-ledger-byol"
+
+ ledgerProperties: |
+ ### Storage configurations
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_LEDGER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_LEDGER_POSTGRES_PASSWORD}
+
+ ### Ledger configurations
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.authentication.method=hmac
+ scalar.dl.ledger.authentication.hmac.cipher_key=${env:SCALAR_DL_LEDGER_HMAC_CIPHER_KEY}
+ scalar.dl.ledger.servers.authentication.hmac.secret_key=${env:SCALAR_DL_LEDGER_HMAC_SECRET_KEY}
+
+ ### TLS configurations
+ scalar.dl.ledger.server.tls.enabled=true
+ scalar.dl.ledger.server.tls.cert_chain_path=/tls/scalardl-ledger/certs/tls.crt
+ scalar.dl.ledger.server.tls.private_key_path=/tls/scalardl-ledger/certs/tls.key
+
+ ### License key configurations
+ scalar.dl.licensing.license_key=${env:SCALAR_DL_LEDGER_LICENSE_KEY}
+ scalar.dl.licensing.license_check_cert_pem=${env:SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "ledger.scalardl.example.com"
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - ledger.scalardl.example.com
+
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Auditor (`scalardl-auditor-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+
+ tls:
+ downstream:
+ enabled: true
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - envoy.scalar.example.com
+ upstream:
+ enabled: true
+ overrideAuthority: "auditor.scalardl.example.com"
+
+
+ auditor:
+
+ image:
+ repository: "ghcr.io/scalar-labs/scalardl-auditor-byol"
+
+ auditorProperties: |
+ ### Storage configurations
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_AUDITOR_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_AUDITOR_POSTGRES_PASSWORD}
+
+ ### Auditor configurations
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.authentication.method=hmac
+ scalar.dl.auditor.authentication.hmac.cipher_key=${env:SCALAR_DL_AUDITOR_HMAC_CIPHER_KEY}
+ scalar.dl.auditor.servers.authentication.hmac.secret_key=${env:SCALAR_DL_AUDITOR_HMAC_SECRET_KEY}
+
+ ### TLS configurations
+ scalar.dl.auditor.server.tls.enabled=true
+ scalar.dl.auditor.server.tls.cert_chain_path=/tls/scalardl-auditor/certs/tls.crt
+ scalar.dl.auditor.server.tls.private_key_path=/tls/scalardl-auditor/certs/tls.key
+ scalar.dl.auditor.tls.enabled=true
+ scalar.dl.auditor.tls.ca_root_cert_path=/tls/scalardl-ledger/certs/ca.crt
+ scalar.dl.auditor.tls.override_authority=envoy.scalar.example.com
+
+ ### License key configurations
+ scalar.dl.licensing.license_key=${env:SCALAR_DL_AUDITOR_LICENSE_KEY}
+ scalar.dl.licensing.license_check_cert_pem=${env:SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "auditor.scalardl.example.com"
+ certManager:
+ enabled: true
+ issuerRef:
+ name: self-signed-ca
+ dnsNames:
+ - auditor.scalardl.example.com
+
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `ledger-credentials-secret` that includes credentials and a license key.
+
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DL_LEDGER_HMAC_CIPHER_KEY=ledger-hmac-cipher-key \
+ --from-literal=SCALAR_DL_LEDGER_HMAC_SECRET_KEY=scalardl-hmac-secret-key \
+ --from-literal=SCALAR_DL_LEDGER_LICENSE_KEY="${SCALAR_DL_LEDGER_LICENSE_KEY}" \
+ --from-file=SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Create a secret resource named `auditor-credentials-secret` that includes credentials and a license key.
+
+ ```console
+ kubectl create secret generic auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_HMAC_CIPHER_KEY=auditor-hmac-cipher-key \
+ --from-literal=SCALAR_DL_AUDITOR_HMAC_SECRET_KEY=scalardl-hmac-secret-key \
+ --from-literal=SCALAR_DL_AUDITOR_LICENSE_KEY="${SCALAR_DL_AUDITOR_LICENSE_KEY}" \
+ --from-file=SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Create a secret resource named `auditor-keys` to disable the `digital-signature` authentication method. In this tutorial, you'll use the `hmac` authentication method instead of `digital-signature`.
+
+ ```console
+ kubectl create secret generic auditor-keys \
+ --from-literal=tls.key=dummy-data-to-disable-digital-signature-method \
+ --from-literal=certificate=dummy-data-to-disable-digital-signature-method \
+ -n default
+ ```
+
+ Note: If you use `hmac` as an authentication method, you have to create a dummy secret `auditor-key` to disable `digital-signature` on the Helm Chart side.
+
+1. Set the chart version of ScalarDL Ledger and ScalarDL Auditor.
+
+ ```console
+ SCALAR_DL_LEDGER_CHART_VERSION=$(helm search repo scalar-labs/scalardl -l | grep -v -e "scalar-labs/scalardl-audit" | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ SCALAR_DL_AUDITOR_CHART_VERSION=$(helm search repo scalar-labs/scalardl-audit -l | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDL Ledger.
+
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ${HOME}/scalardl-test/scalardl-ledger-custom-values.yaml --version ${SCALAR_DL_LEDGER_CHART_VERSION} -n default
+ ```
+
+1. Deploy ScalarDL Auditor.
+
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ${HOME}/scalardl-test/scalardl-auditor-custom-values.yaml --version ${SCALAR_DL_AUDITOR_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDL Ledger and ScalarDL Auditor pods are deployed.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 14m
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-auditor-auditor-5b885ff4c8-fwkpf 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-g69cb 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-nsmnq 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-5mn6v 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-fpq8j 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-lsz2t 1/1 Running 0 18s
+ scalardl-ledger-envoy-547bbf7546-n7p5x 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-p8nwp 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-pskpb 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-5zsbj 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-vnmrw 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-wpjvs 1/1 Running 0 26s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 11m
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 11m
+ ```
+
+ If the ScalarDL Ledger and ScalarDL Auditor pods are deployed properly, the `STATUS` column for those pods will be displayed as `Running`.
+
+1. Check if the ScalarDL Ledger and ScalarDL Auditor services are deployed.
+
+ ```console
+ kubectl get svc -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-auditor ClusterIP 10.107.9.78 5432/TCP 15m
+ postgresql-auditor-hl ClusterIP None 5432/TCP 15m
+ postgresql-ledger ClusterIP 10.108.241.181 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-auditor-envoy ClusterIP 10.100.61.202 40051/TCP,40052/TCP 55s
+ scalardl-auditor-envoy-metrics ClusterIP 10.99.6.227 9001/TCP 55s
+ scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 55s
+ scalardl-auditor-metrics ClusterIP 10.108.1.147 8080/TCP 55s
+ scalardl-ledger-envoy ClusterIP 10.101.191.116 50051/TCP,50052/TCP 61s
+ scalardl-ledger-envoy-metrics ClusterIP 10.106.52.103 9001/TCP 61s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 61s
+ scalardl-ledger-metrics ClusterIP 10.99.122.106 8080/TCP 61s
+ ```
+
+ If the ScalarDL Ledger and ScalarDL Auditor services are deployed properly, you can see private IP addresses in the `CLUSTER-IP` column.
+
+:::note
+
+The `CLUSTER-IP` values for `scalardl-ledger-headless`, `scalardl-auditor-headless`, `postgresql-ledger-hl`, and `postgresql-auditor-hl` are `None` since they have no IP addresses.
+
+:::
+
+## Step 7. Start a client container
+
+You'll use the CA certificate file in a client container. Therefore, you'll need to create a secret resource and mount it to the client container.
+
+1. Create a secret resource named `client-ca-cert`.
+
+ ```console
+ kubectl create secret generic client-ca-cert --from-file=ca.crt=<(kubectl get secret self-signed-ca-cert-secret -o "jsonpath={.data['ca\.crt']}" | base64 -d) -n default
+ ```
+
+1. Create a manifest file for a client pod (`scalardl-client-pod.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-client-pod.yaml
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ env:
+ - name: SCALAR_DL_VERSION
+ value: SCALAR_DL_CLIENT_POD_SCALAR_DL_VERSION
+ volumeMounts:
+ - name: "client-ca-cert"
+ mountPath: "/certs/"
+ readOnly: true
+ volumes:
+ - name: "client-ca-cert"
+ secret:
+ secretName: "client-ca-cert"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Set the ScalarDL version in the manifest file.
+
+ ```console
+ sed -i s/SCALAR_DL_CLIENT_POD_SCALAR_DL_VERSION/${SCALAR_DL_VERSION}/ ${HOME}/scalardl-test/scalardl-client-pod.yaml
+ ```
+
+1. Deploy the client pod.
+
+ ```console
+ kubectl apply -f ${HOME}/scalardl-test/scalardl-client-pod.yaml -n default
+ ```
+
+1. Check if the client container is running.
+
+ ```console
+ kubectl get pod scalardl-client -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 4s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the client container
+
+The following explains the minimum steps needed to run sample contracts. For more details about ScalarDL Ledger and ScalarDL Auditor, see the following:
+
+* [Getting Started with ScalarDL](https://scalardl.scalar-labs.com/docs/latest/getting-started/)
+* [Getting Started with ScalarDL Auditor](https://scalardl.scalar-labs.com/docs/latest/getting-started-auditor/)
+
+1. Run bash in the client container.
+
+ ```console
+ kubectl exec -it scalardl-client -n default -- bash
+ ```
+
+ The commands in the following steps must be run in the client container.
+
+1. Install the git, curl, and unzip commands in the client container.
+
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone the ScalarDL Java Client SDK git repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the working directory to `scalardl-java-client-sdk/`.
+
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ [Command execution result]
+
+ ```console
+ /scalardl-java-client-sdk
+ ```
+
+1. Change the branch to the version you're using.
+
+ ```console
+ git checkout -b v${SCALAR_DL_VERSION} refs/tags/v${SCALAR_DL_VERSION}
+ ```
+
+1. Build the sample contracts.
+
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download the CLI tools for ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v${SCALAR_DL_VERSION}/scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip
+ ```
+
+1. Unzip the `scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip` file.
+
+ ```console
+ unzip ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip
+ ```
+
+1. Create a configuration file named `client.properties` to access ScalarDL Ledger and ScalarDL Auditor on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > client.properties
+ # Ledger configuration
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.client.tls.enabled=true
+ scalar.dl.client.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.dl.client.tls.override_authority=envoy.scalar.example.com
+
+ # Auditor configuration
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+ scalar.dl.client.auditor.tls.enabled=true
+ scalar.dl.client.auditor.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.dl.client.auditor.tls.override_authority=envoy.scalar.example.com
+
+ # Client configuration
+ scalar.dl.client.authentication_method=hmac
+ scalar.dl.client.entity.id=client
+ scalar.dl.client.entity.identity.hmac.secret_key=scalardl-hmac-client-secert-key
+ EOF
+ ```
+
+1. Register the client secret.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-secret --config ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Register the contract `ValidateLedger` to execute a validate request.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id validate-ledger --contract-binary-name com.scalar.dl.client.contract.ValidateLedger --contract-class-file ./build/classes/java/main/com/scalar/dl/client/contract/ValidateLedger.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl execute-contract --config ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl execute-contract --config ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+
+ [Command execution result]
+
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+
+ ### Reference
+
+ * If the asset data is not tampered with, running the `execute-contract` command to request contract execution will return `OK` as a result.
+ * If the asset data is tampered with (for example, if the `state` value in the database is tampered with), running the `execute-contract` command to request contract execution will return a value other than `OK` (for example, `INCONSISTENT_STATES`) as a result. See the following as an example for how ScalarDL detects data tampering.
+
+ [Command execution result (if the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+1. Execute a validation request for the asset.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl validate-ledger --config ./client.properties --asset-id "test_asset"
+ ```
+
+ [Command execution result]
+
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEYCIQDiiXqzw6K+Ml4uvn8rK43o5wHWESU3hoXnZPi6/OeKVwIhAM+tFBcapl6zg47Uq0Uc8nVNGWNHZLBDBGve3F0xkzTR"
+ },
+ "Auditor" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEUCIQDLsfUR2PmxSvfpL3YvHJUkz00RDpjCdctkroZKXE8d5QIgH73FQH2e11jfnynD00Pp9DrIG1vYizxDsvxUsMPo9IU="
+ }
+ }
+ ```
+
+ ### Reference
+
+ * If the asset data is not tampered with, running the `validate-ledger` command to request validation will return `OK` as the result.
+ * If the asset data is tampered with (for example, if the `state` value in the database is tampered with), running the `validate-ledger` command to request validation will return a value other than `OK` (for example, `INVALID_OUTPUT`) as a result. See the following as an example for how ScalarDL detects data tampering.
+
+ [Command execution result (if the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+1. Exit from the client container.
+
+ ```console
+ exit
+ ```
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger and ScalarDL Auditor tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Auditor, ScalarDL Schema Loader, and PostgreSQL.
+
+ ```console
+ helm uninstall -n default scalardl-ledger schema-loader-ledger postgresql-ledger scalardl-auditor schema-loader-auditor postgresql-auditor
+ ```
+
+1. Remove the self-signed CA.
+
+ ```
+ kubectl delete -f ./private-ca-custom-values.yaml
+ ```
+
+1. Uninstall cert-manager.
+
+ ```console
+ helm uninstall -n cert-manager cert-manager
+ ```
+
+1. Remove the client container.
+
+ ```
+ kubectl delete pod scalardl-client --grace-period 0 -n default
+ ```
+
+1. Remove the secret resources.
+
+ ```
+ kubectl delete secrets self-signed-ca-cert-secret schema-ledger-credentials-secret schema-auditor-credentials-secret scalardl-ledger-tls-cert scalardl-ledger-envoy-tls-cert scalardl-auditor-tls-cert scalardl-auditor-envoy-tls-cert ledger-credentials-secret auditor-credentials-secret client-ca-cert auditor-keys
+ ```
+
+1. Remove the namespace `cert-manager`.
+
+ ```
+ kubectl delete ns cert-manager
+ ```
+
+1. Remove the working directory and sample files (configuration files).
+
+ ```console
+ cd ${HOME}
+ ```
+
+ ```console
+ rm -rf ${HOME}/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following tutorials:
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls.mdx
new file mode 100644
index 00000000..d8d5adb4
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor-tls.mdx
@@ -0,0 +1,1030 @@
+---
+---
+
+# Getting Started with Helm Charts (ScalarDL Ledger and Auditor with TLS / Auditor Mode)
+
+This tutorial explains how to get started with ScalarDL Ledger and ScalarDL Auditor with TLS configurations by using Helm Charts on a Kubernetes cluster as a test environment. Before starting, you should already have a Mac or Linux environment for testing. In addition, although this tutorial mentions using **minikube**, the steps described should work in any Kubernetes cluster.
+
+## Requirements
+
+* You need to have a license key (trial license or commercial license) for ScalarDL. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact).
+* You need to use ScalarDL 3.9 or later, which supports TLS.
+
+:::note
+
+To make Byzantine fault detection with auditing work properly, ScalarDL Ledger and ScalarDL Auditor should be deployed and managed in different administrative domains. However, in this tutorial, we will deploy ScalarDL Ledger and ScalarDL Auditor in the same Kubernetes cluster to make the test easier.
+
+:::
+
+## What you'll create
+
+In this tutorial, you'll deploy the following components on a Kubernetes cluster in the following way:
+
+```
++-----------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +---------+ |
+| +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| | +-------+ | | +---------+ | |
+| | | | | |
+| +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Ledger | ---+---> | PostgreSQL | |
+| | | (Envoy) | | +-------+ | | (Ledger) | | +---------+ | | (For Ledger) | |
+| | +---------+ | | +-----------+ | | +---------------+ |
+| [Pod] | | +-------+ | | +---------+ | |
+| | +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | Client | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| | | +-------+ | | +---------+ | |
+| | | | | | |
+| | +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Auditor | ---+---> | PostgreSQL | |
+| | (Envoy) | | +-------+ | | (Auditor) | | +---------+ | | (For Auditor) | |
+| +---------+ | | +-----------+ | | +---------------+ |
+| | +-------+ | | +---------+ | |
+| +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| +-------+ +---------+ |
+| |
++-----------------------------------------------------------------------------------------------------------------------------+
+```
+
+You'll also create the following private key and certificate files for TLS connections.
+
+```
+ +----------------------+
+ +---> | For Scalar Envoy |
+ | +----------------------+
+ | | envoy-key.pem |
+ | | envoy.pem |
+ | +----------------------+
+ |
++----------------------+ | +----------------------+
+| Self-signed CA | ---(Sign certificates)---+---> | For ScalarDL Ledger |
++----------------------+ | +----------------------+
+| ca-key.pem | | | ledger-key.pem |
+| ca.pem | | | ledger.pem |
++----------------------+ | +----------------------+
+ |
+ | +----------------------+
+ +---> | For ScalarDL Auditor |
+ +----------------------+
+ | auditor-key.pem |
+ | auditor.pem |
+ +----------------------+
+```
+
+You'll set each private key and certificate file as follows to enable TLS in each connection.
+
+```
+ +--------------------------------+ +--------------------------------+
+ +-------(Normal request)-----> | Envoy for ScalarDL Ledger | ---> | ScalarDL Ledger |
+ | +--------------------------------+ +--------------------------------+
+ | +---(Recovery request)---> | envoy-key.pem | ---> | ledger-key.pem |
+ | | | envoy.pem | | ledger.pem |
+ | | | ca.pem (to verify ledger.pem) | | ca.pem (used for health check) |
+ | | +--------------------------------+ +--------------------------------+
++--------------------------------+ | |
+| Client | ---+ |
++--------------------------------+ | +--------------------------------------------------------------------------------------------------------+
+| ca.pem (to verify envoy.pem) | | |
++--------------------------------+ | |
+ | +--------------------------------+ +--------------------------------+ |
+ +-------(Normal request)-----> | Envoy for ScalarDL Auditor | ---> | ScalarDL Auditor | ---+
+ +--------------------------------+ +--------------------------------+
+ | envoy-key.pem | | auditor-key.pem |
+ | envoy.pem | | auditor.pem |
+ | ca.pem (to verify auditor.pem) | | ca.pem (used for health check) |
+ +--------------------------------+ | ca.pem (to verify ledger.pem) |
+ +--------------------------------+
+```
+
+The following connections exist amongst the ScalarDL-related components:
+
+* **`Client - Envoy for ScalarDL Ledger`:** When you execute a ScalarDL API function, the client accesses Envoy for ScalarDL Ledger.
+* **`Client - Envoy for ScalarDL Auditor`:** When you execute a ScalarDL API function, the client accesses Envoy for ScalarDL Auditor.
+* **`Envoy for ScalarDL Ledger - ScalarDL Ledger`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDL Ledger.
+* **`Envoy for ScalarDL Auditor - ScalarDL Auditor`:** Envoy works as an L7 (gRPC) load balancer in front of ScalarDL Auditor.
+* **`ScalarDL Auditor - Envoy for ScalarDL Ledger (ScalarDL Ledger)`:** When ScalarDL needs to run the recovery process to keep data consistent, ScalarDL Auditor runs the request against ScalarDL Ledger via Envoy.
+
+## Step 1. Start a Kubernetes cluster and install tools
+
+You need to prepare a Kubernetes cluster and install some tools (`kubectl`, `helm`, `cfssl`, and `cfssljson`). For more details on how to install them, see [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx).
+
+## Step 2. Start the PostgreSQL containers
+
+ScalarDL Ledger and ScalarDL Auditor must use some type of database system as a backend database. In this tutorial, you'll use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for Ledger.
+
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Deploy PostgreSQL for Auditor.
+
+ ```console
+ helm install postgresql-auditor bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false \
+ -n default
+ ```
+
+1. Check if the PostgreSQL containers are running.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 11s
+ postgresql-ledger-0 1/1 Running 0 16s
+ ```
+
+## Step 3. Create a working directory
+
+You'll create some configuration files and private key and certificate files locally. Be sure to create a working directory for those files.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ${HOME}/scalardl-test/certs/
+ ```
+
+## Step 4. Create private key and certificate files
+
+You'll create private key and a certificate files.
+
+1. Change the working directory to `${HOME}/scalardl-test/certs/`.
+
+ ```console
+ cd ${HOME}/scalardl-test/certs/
+ ```
+
+1. Create a JSON file that includes CA information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/certs/ca.json
+ {
+ "CN": "scalar-test-ca",
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "Scalar Test CA"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create the CA private key and certificate files.
+
+ ```console
+ cfssl gencert -initca ca.json | cfssljson -bare ca
+ ```
+
+1. Create a JSON file that includes CA configurations.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/certs/ca-config.json
+ {
+ "signing": {
+ "default": {
+ "expiry": "87600h"
+ },
+ "profiles": {
+ "scalar-test-ca": {
+ "expiry": "87600h",
+ "usages": [
+ "signing",
+ "key encipherment",
+ "server auth"
+ ]
+ }
+ }
+ }
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Envoy information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/certs/envoy.json
+ {
+ "CN": "scalar-envoy",
+ "hosts": [
+ "envoy.scalar.example.com",
+ "localhost"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "Scalar Envoy Test"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes ScalarDL Ledger information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/certs/ledger.json
+ {
+ "CN": "scalardl-ledger",
+ "hosts": [
+ "ledger.scalardl.example.com",
+ "localhost"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "ScalarDL Ledger Test"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes ScalarDL Auditor information.
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/certs/auditor.json
+ {
+ "CN": "scalardl-auditor",
+ "hosts": [
+ "auditor.scalardl.example.com",
+ "localhost"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "C": "JP",
+ "ST": "Tokyo",
+ "L": "Shinjuku",
+ "O": "ScalarDL Auditor Test"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create private key and certificate files for Envoy.
+
+ ```console
+ cfssl gencert -ca ca.pem -ca-key ca-key.pem -config ca-config.json -profile scalar-test-ca envoy.json | cfssljson -bare envoy
+ ```
+
+1. Create private key and certificate files for ScalarDL Ledger.
+
+ ```console
+ cfssl gencert -ca ca.pem -ca-key ca-key.pem -config ca-config.json -profile scalar-test-ca ledger.json | cfssljson -bare ledger
+ ```
+
+1. Create private key and certificate files for ScalarDL Auditor.
+
+ ```console
+ cfssl gencert -ca ca.pem -ca-key ca-key.pem -config ca-config.json -profile scalar-test-ca auditor.json | cfssljson -bare auditor
+ ```
+
+1. Confirm that the private key and certificate files were created.
+
+ ```console
+ ls -1
+ ```
+
+ [Command execution result]
+
+ ```console
+ auditor-key.pem
+ auditor.csr
+ auditor.json
+ auditor.pem
+ ca-config.json
+ ca-key.pem
+ ca.csr
+ ca.json
+ ca.pem
+ envoy-key.pem
+ envoy.csr
+ envoy.json
+ envoy.pem
+ ledger-key.pem
+ ledger.csr
+ ledger.json
+ ledger.pem
+ ```
+
+## Step 5. Create database schemas for ScalarDL Ledger and ScalarDL Auditor by using Helm Charts
+
+You'll deploy two ScalarDL Schema Loader pods on the Kubernetes cluster by using Helm Charts. The ScalarDL Schema Loader will create the database schemas for ScalarDL Ledger and Auditor in PostgreSQL.
+
+1. Change the working directory to `${HOME}/scalardl-test/`.
+
+ ```console
+ cd ${HOME}/scalardl-test/
+ ```
+
+1. Add the Scalar Helm Charts repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a custom values file for ScalarDL Schema Loader for Ledger (`schema-loader-ledger-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_LEDGER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_LEDGER_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+ secretName: "schema-ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Schema Loader for Auditor (`schema-loader-auditor-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_AUDITOR_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_AUDITOR_POSTGRES_PASSWORD}
+ scalar.db.storage=jdbc
+ secretName: "schema-auditor-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `schema-ledger-credentials-secret` that includes a username and password for PostgreSQL for ScalarDL Ledger.
+
+ ```console
+ kubectl create secret generic schema-ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres \
+ -n default
+ ```
+
+1. Create a secret resource named `schema-auditor-credentials-secret` that includes a username and password for PostgreSQL for ScalarDL Auditor.
+
+ ```console
+ kubectl create secret generic schema-auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres \
+ -n default
+ ```
+
+1. Set the chart version of ScalarDL Schema Loader.
+
+ ```console
+ SCALAR_DL_VERSION=3.9.1
+ SCALAR_DL_SCHEMA_LOADER_CHART_VERSION=$(helm search repo scalar-labs/schema-loading -l | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDL Schema Loader for ScalarDL Ledger.
+
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ${HOME}/scalardl-test/schema-loader-ledger-custom-values.yaml --version ${SCALAR_DL_SCHEMA_LOADER_CHART_VERSION} -n default
+ ```
+
+1. Deploy ScalarDL Schema Loader for ScalarDL Auditor.
+
+ ```console
+ helm install schema-loader-auditor scalar-labs/schema-loading -f ${HOME}/scalardl-test/schema-loader-auditor-custom-values.yaml --version ${SCALAR_DL_SCHEMA_LOADER_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDL Schema Loader pods are deployed with the status `Completed`.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 2m56s
+ postgresql-ledger-0 1/1 Running 0 3m1s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 6s
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 10s
+ ```
+
+ If the status of the ScalarDL Schema Loader pods are **ContainerCreating** or **Running**, wait for the `STATUS` column for those pods to show as `Completed`.
+
+## Step 6. Deploy ScalarDL Ledger and ScalarDL Auditor on the Kubernetes cluster by using Helm Charts
+
+1. Set your license key and certificate as environment variables. If you don't have a license key, please [contact us](https://www.scalar-labs.com/contact). Also, you can see the value of `` and `` in [our document](https://scalardb.scalar-labs.com/docs/latest/scalar-licensing/README/).
+
+ ```console
+ SCALAR_DL_LEDGER_LICENSE_KEY=''
+ SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM=''
+ SCALAR_DL_AUDITOR_LICENSE_KEY=''
+ SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM=''
+ ```
+
+1. Create a custom values file for ScalarDL Ledger (`scalardl-ledger-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+
+ tls:
+ downstream:
+ enabled: true
+ certChainSecret: "envoy-tls-cert"
+ privateKeySecret: "envoy-tls-key"
+ upstream:
+ enabled: true
+ overrideAuthority: "ledger.scalardl.example.com"
+ caRootCertSecret: "scalardl-ledger-tls-ca"
+
+ ledger:
+
+ image:
+ repository: "ghcr.io/scalar-labs/scalardl-ledger-byol"
+
+ ledgerProperties: |
+ ### Storage configurations
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_LEDGER_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_LEDGER_POSTGRES_PASSWORD}
+
+ ### Ledger configurations
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.authentication.method=hmac
+ scalar.dl.ledger.authentication.hmac.cipher_key=${env:SCALAR_DL_LEDGER_HMAC_CIPHER_KEY}
+ scalar.dl.ledger.servers.authentication.hmac.secret_key=${env:SCALAR_DL_LEDGER_HMAC_SECRET_KEY}
+
+ ### TLS configurations
+ scalar.dl.ledger.server.tls.enabled=true
+ scalar.dl.ledger.server.tls.cert_chain_path=/tls/scalardl-ledger/certs/tls.crt
+ scalar.dl.ledger.server.tls.private_key_path=/tls/scalardl-ledger/certs/tls.key
+
+ ### License key configurations
+ scalar.dl.licensing.license_key=${env:SCALAR_DL_LEDGER_LICENSE_KEY}
+ scalar.dl.licensing.license_check_cert_pem=${env:SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "ledger.scalardl.example.com"
+ caRootCertSecret: "scalardl-ledger-tls-ca"
+ certChainSecret: "scalardl-ledger-tls-cert"
+ privateKeySecret: "scalardl-ledger-tls-key"
+
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Auditor (`scalardl-auditor-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+
+ tls:
+ downstream:
+ enabled: true
+ certChainSecret: "envoy-tls-cert"
+ privateKeySecret: "envoy-tls-key"
+ upstream:
+ enabled: true
+ overrideAuthority: "auditor.scalardl.example.com"
+ caRootCertSecret: "scalardl-auditor-tls-ca"
+
+ auditor:
+ image:
+ repository: "ghcr.io/scalar-labs/scalardl-auditor-byol"
+
+ auditorProperties: |
+ ### Storage configurations
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username=${env:SCALAR_DL_AUDITOR_POSTGRES_USERNAME}
+ scalar.db.password=${env:SCALAR_DL_AUDITOR_POSTGRES_PASSWORD}
+
+ ### Auditor configurations
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.authentication.method=hmac
+ scalar.dl.auditor.authentication.hmac.cipher_key=${env:SCALAR_DL_AUDITOR_HMAC_CIPHER_KEY}
+ scalar.dl.auditor.servers.authentication.hmac.secret_key=${env:SCALAR_DL_AUDITOR_HMAC_SECRET_KEY}
+
+ ### TLS configurations
+ scalar.dl.auditor.server.tls.enabled=true
+ scalar.dl.auditor.server.tls.cert_chain_path=/tls/scalardl-auditor/certs/tls.crt
+ scalar.dl.auditor.server.tls.private_key_path=/tls/scalardl-auditor/certs/tls.key
+ scalar.dl.auditor.tls.enabled=true
+ scalar.dl.auditor.tls.ca_root_cert_path=/tls/scalardl-ledger/certs/ca.crt
+ scalar.dl.auditor.tls.override_authority=envoy.scalar.example.com
+
+ ### License key configurations
+ scalar.dl.licensing.license_key=${env:SCALAR_DL_AUDITOR_LICENSE_KEY}
+ scalar.dl.licensing.license_check_cert_pem=${env:SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM}
+
+ tls:
+ enabled: true
+ overrideAuthority: "auditor.scalardl.example.com"
+ caRootCertSecret: "scalardl-auditor-tls-ca"
+ certChainSecret: "scalardl-auditor-tls-cert"
+ privateKeySecret: "scalardl-auditor-tls-key"
+ caRootCertForLedgerSecret: "scalardl-auditor-tls-ca-for-ledger"
+
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource named `ledger-credentials-secret` that includes credentials and a license key.
+
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DL_LEDGER_HMAC_CIPHER_KEY=ledger-hmac-cipher-key \
+ --from-literal=SCALAR_DL_LEDGER_HMAC_SECRET_KEY=scalardl-hmac-secret-key \
+ --from-literal=SCALAR_DL_LEDGER_LICENSE_KEY="${SCALAR_DL_LEDGER_LICENSE_KEY}" \
+ --from-file=SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DL_LEDGER_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Create a secret resource named `auditor-credentials-secret` that includes credentials and a license key.
+
+ ```console
+ kubectl create secret generic auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_HMAC_CIPHER_KEY=auditor-hmac-cipher-key \
+ --from-literal=SCALAR_DL_AUDITOR_HMAC_SECRET_KEY=scalardl-hmac-secret-key \
+ --from-literal=SCALAR_DL_AUDITOR_LICENSE_KEY="${SCALAR_DL_AUDITOR_LICENSE_KEY}" \
+ --from-file=SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM=<(echo ${SCALAR_DL_AUDITOR_LICENSE_CHECK_CERT_PEM} | sed 's/\\n/\
+ /g') \
+ -n default
+ ```
+
+1. Create secret resources that include the private key and certificate files for Envoy.
+
+ ```console
+ kubectl create secret generic envoy-tls-cert --from-file=tls.crt=${HOME}/scalardl-test/certs/envoy.pem -n default
+ kubectl create secret generic envoy-tls-key --from-file=tls.key=${HOME}/scalardl-test/certs/envoy-key.pem -n default
+ ```
+
+1. Create secret resources that include the private key, certificate, and CA certificate files for ScalarDL Ledger.
+
+ ```console
+ kubectl create secret generic scalardl-ledger-tls-ca --from-file=ca.crt=${HOME}/scalardl-test/certs/ca.pem -n default
+ kubectl create secret generic scalardl-ledger-tls-cert --from-file=tls.crt=${HOME}/scalardl-test/certs/ledger.pem -n default
+ kubectl create secret generic scalardl-ledger-tls-key --from-file=tls.key=${HOME}/scalardl-test/certs/ledger-key.pem -n default
+ ```
+
+1. Create secret resources that include the private key, certificate, and CA certificate files for ScalarDL Auditor.
+
+ ```console
+ kubectl create secret generic scalardl-auditor-tls-ca --from-file=ca.crt=${HOME}/scalardl-test/certs/ca.pem -n default
+ kubectl create secret generic scalardl-auditor-tls-cert --from-file=tls.crt=${HOME}/scalardl-test/certs/auditor.pem -n default
+ kubectl create secret generic scalardl-auditor-tls-key --from-file=tls.key=${HOME}/scalardl-test/certs/auditor-key.pem -n default
+ kubectl create secret generic scalardl-auditor-tls-ca-for-ledger --from-file=ca.crt=${HOME}/scalardl-test/certs/ca.pem -n default
+ ```
+
+1. Create a secret resource named `auditor-keys` to disable the `digital-signature` authentication method. In this tutorial, you'll use the `hmac` authentication method instead of `digital-signature`.
+
+ ```console
+ kubectl create secret generic auditor-keys \
+ --from-literal=tls.key=dummy-data-to-disable-digital-signature-method \
+ --from-literal=certificate=dummy-data-to-disable-digital-signature-method \
+ -n default
+ ```
+ Note: If you use `hmac` as an authentication method, you have to create a dummy secret `auditor-key` to disable `digital-signature` on the helm chart side.
+
+1. Set the chart version of ScalarDL Ledger and ScalarDL Auditor.
+
+ ```console
+ SCALAR_DL_LEDGER_CHART_VERSION=$(helm search repo scalar-labs/scalardl -l | grep -v -e "scalar-labs/scalardl-audit" | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ SCALAR_DL_AUDITOR_CHART_VERSION=$(helm search repo scalar-labs/scalardl-audit -l | grep -F "${SCALAR_DL_VERSION}" | awk '{print $2}' | sort --version-sort -r | head -n 1)
+ ```
+
+1. Deploy ScalarDL Ledger.
+
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ${HOME}/scalardl-test/scalardl-ledger-custom-values.yaml --version ${SCALAR_DL_LEDGER_CHART_VERSION} -n default
+ ```
+
+1. Deploy ScalarDL Auditor.
+
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ${HOME}/scalardl-test/scalardl-auditor-custom-values.yaml --version ${SCALAR_DL_AUDITOR_CHART_VERSION} -n default
+ ```
+
+1. Check if the ScalarDL Ledger and ScalarDL Auditor pods are deployed.
+
+ ```console
+ kubectl get pod -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 14m
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-auditor-auditor-5b885ff4c8-fwkpf 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-g69cb 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-nsmnq 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-5mn6v 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-fpq8j 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-lsz2t 1/1 Running 0 18s
+ scalardl-ledger-envoy-547bbf7546-n7p5x 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-p8nwp 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-pskpb 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-5zsbj 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-vnmrw 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-wpjvs 1/1 Running 0 26s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 11m
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 11m
+ ```
+
+ If the ScalarDL Ledger and ScalarDL Auditor pods are deployed properly, the `STATUS` column for those pods will be displayed as `Running`.
+
+1. Check if the ScalarDL Ledger and ScalarDL Auditor services are deployed.
+
+ ```console
+ kubectl get svc -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-auditor ClusterIP 10.107.9.78 5432/TCP 15m
+ postgresql-auditor-hl ClusterIP None 5432/TCP 15m
+ postgresql-ledger ClusterIP 10.108.241.181 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-auditor-envoy ClusterIP 10.100.61.202 40051/TCP,40052/TCP 55s
+ scalardl-auditor-envoy-metrics ClusterIP 10.99.6.227 9001/TCP 55s
+ scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 55s
+ scalardl-auditor-metrics ClusterIP 10.108.1.147 8080/TCP 55s
+ scalardl-ledger-envoy ClusterIP 10.101.191.116 50051/TCP,50052/TCP 61s
+ scalardl-ledger-envoy-metrics ClusterIP 10.106.52.103 9001/TCP 61s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 61s
+ scalardl-ledger-metrics ClusterIP 10.99.122.106 8080/TCP 61s
+ ```
+
+ If the ScalarDL Ledger and ScalarDL Auditor services are deployed properly, you can see private IP addresses in the `CLUSTER-IP` column.
+
+:::note
+
+The `CLUSTER-IP` values for `scalardl-ledger-headless`, `scalardl-auditor-headless`, `postgresql-ledger-hl`, and `postgresql-auditor-hl` are `None` since they have no IP addresses.
+
+:::
+
+## Step 7. Start a client container
+
+You'll use the CA certificate file in a client container. Therefore, you'll need to create a secret resource and mount it to the client container.
+
+1. Create a secret resource named `client-ca-cert`.
+
+ ```console
+ kubectl create secret generic client-ca-cert --from-file=ca.crt=${HOME}/scalardl-test/certs/ca.pem -n default
+ ```
+
+1. Create a manifest file for a client pod (`scalardl-client-pod.yaml`).
+
+ ```console
+ cat << 'EOF' > ${HOME}/scalardl-test/scalardl-client-pod.yaml
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ env:
+ - name: SCALAR_DL_VERSION
+ value: SCALAR_DL_CLIENT_POD_SCALAR_DL_VERSION
+ volumeMounts:
+ - name: "client-ca-cert"
+ mountPath: "/certs/"
+ readOnly: true
+ volumes:
+ - name: "client-ca-cert"
+ secret:
+ secretName: "client-ca-cert"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Set the ScalarDL version in the manifest file.
+
+ ```console
+ sed -i s/SCALAR_DL_CLIENT_POD_SCALAR_DL_VERSION/${SCALAR_DL_VERSION}/ ${HOME}/scalardl-test/scalardl-client-pod.yaml
+ ```
+
+1. Deploy the client pod.
+
+ ```console
+ kubectl apply -f ${HOME}/scalardl-test/scalardl-client-pod.yaml -n default
+ ```
+
+1. Check if the client container is running.
+
+ ```console
+ kubectl get pod scalardl-client -n default
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 4s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the client container
+
+The following explains the minimum steps needed to run sample contracts. For more details about ScalarDL Ledger and ScalarDL Auditor, see the following:
+
+* [Getting Started with ScalarDL](https://scalardl.scalar-labs.com/docs/latest/getting-started/)
+* [Getting Started with ScalarDL Auditor](https://scalardl.scalar-labs.com/docs/latest/getting-started-auditor/)
+
+1. Run bash in the client container.
+
+ ```console
+ kubectl exec -it scalardl-client -n default -- bash
+ ```
+ The commands in the following steps must be run in the client container.
+
+1. Install the git, curl, and unzip commands in the client container.
+
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone the ScalarDL Java Client SDK git repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the working directory to `scalardl-java-client-sdk/`.
+
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ [Command execution result]
+
+ ```console
+ /scalardl-java-client-sdk
+ ```
+
+1. Change the branch to the version you're using.
+
+ ```console
+ git checkout -b v${SCALAR_DL_VERSION} refs/tags/v${SCALAR_DL_VERSION}
+ ```
+
+1. Build the sample contracts.
+
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download the CLI tools for ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v${SCALAR_DL_VERSION}/scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip
+ ```
+ You need to use the same version of CLI tools and ScalarDL Ledger.
+
+1. Unzip the `scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip` file.
+
+ ```console
+ unzip ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}.zip
+ ```
+
+1. Create a configuration file named `client.properties` to access ScalarDL Ledger and ScalarDL Auditor on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > client.properties
+ # Ledger configuration
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.client.tls.enabled=true
+ scalar.dl.client.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.dl.client.tls.override_authority=envoy.scalar.example.com
+
+ # Auditor configuration
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+ scalar.dl.client.auditor.tls.enabled=true
+ scalar.dl.client.auditor.tls.ca_root_cert_path=/certs/ca.crt
+ scalar.dl.client.auditor.tls.override_authority=envoy.scalar.example.com
+
+ # Client configuration
+ scalar.dl.client.authentication_method=hmac
+ scalar.dl.client.entity.id=client
+ scalar.dl.client.entity.identity.hmac.secret_key=scalardl-hmac-client-secert-key
+ EOF
+ ```
+
+1. Register the client secret.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-secret --config ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Register the contract `ValidateLedger` to execute a validate request.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl register-contract --config ./client.properties --contract-id validate-ledger --contract-binary-name com.scalar.dl.client.contract.ValidateLedger --contract-class-file ./build/classes/java/main/com/scalar/dl/client/contract/ValidateLedger.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl execute-contract --config ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl execute-contract --config ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+
+ [Command execution result]
+
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+
+ ### Reference
+
+ * If the asset data is not tampered with, running the `execute-contract` command to request contract execution will return `OK` as a result.
+ * If the asset data is tampered with (for example, if the `state` value in the database is tampered with), running the `execute-contract` command to request contract execution will return a value other than `OK` (for example, `INCONSISTENT_STATES`) as a result. See the following as an example for how ScalarDL detects data tampering.
+
+ [Command execution result (if the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+1. Execute a validation request for the asset.
+
+ ```console
+ ./scalardl-java-client-sdk-${SCALAR_DL_VERSION}/bin/scalardl validate-ledger --config ./client.properties --asset-id "test_asset"
+ ```
+
+ [Command execution result]
+
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEYCIQDiiXqzw6K+Ml4uvn8rK43o5wHWESU3hoXnZPi6/OeKVwIhAM+tFBcapl6zg47Uq0Uc8nVNGWNHZLBDBGve3F0xkzTR"
+ },
+ "Auditor" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEUCIQDLsfUR2PmxSvfpL3YvHJUkz00RDpjCdctkroZKXE8d5QIgH73FQH2e11jfnynD00Pp9DrIG1vYizxDsvxUsMPo9IU="
+ }
+ }
+ ```
+
+ ### Reference
+
+ * If the asset data is not tampered with, running the `validate-ledger` command to request validation will return `OK` as the result.
+ * If the asset data is tampered with (for example, if the `state` value in the database is tampered with), running the `validate-ledger` command to request validation will return a value other than `OK` (for example, `INVALID_OUTPUT`) as a result. See the following as an example for how ScalarDL detects data tampering.
+
+ [Command execution result (if the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+1. Exit from the client container.
+
+ ```console
+ exit
+ ```
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger and ScalarDL Auditor tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Auditor, ScalarDL Schema Loader, and PostgreSQL.
+
+ ```console
+ helm uninstall -n default scalardl-ledger schema-loader-ledger postgresql-ledger scalardl-auditor schema-loader-auditor postgresql-auditor
+ ```
+
+1. Remove the client container.
+
+ ```
+ kubectl delete pod scalardl-client --grace-period 0 -n default
+ ```
+
+1. Remove the secret resources.
+
+ ```
+ kubectl delete secrets envoy-tls-key envoy-tls-cert schema-ledger-credentials-secret schema-auditor-credentials-secret ledger-credentials-secret scalardl-ledger-tls-ca scalardl-ledger-tls-cert scalardl-ledger-tls-key auditor-credentials-secret auditor-keys scalardl-auditor-tls-ca scalardl-auditor-tls-cert scalardl-auditor-tls-key scalardl-auditor-tls-ca-for-ledger client-ca-cert
+ ```
+
+1. Remove the working directory and sample files (configuration file, private key, and certificate).
+
+ ```console
+ cd ${HOME}
+ ```
+
+ ```console
+ rm -rf ${HOME}/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following tutorials:
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor.mdx
new file mode 100644
index 00000000..9ce7ebcd
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-auditor.mdx
@@ -0,0 +1,906 @@
+---
+---
+
+# Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode)
+
+This document explains how to get started with ScalarDL Ledger and Auditor using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+You need to subscribe to ScalarDL Ledger and ScalarDL Auditor in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images.
+ * AWS Marketplace
+ * scalar-ledger
+ * scalar-ledger-envoy
+ * scalardl-schema-loader-ledger
+ * scalar-auditor
+ * scalar-auditor-envoy
+ * scalardl-schema-loader-auditor
+ * Azure Marketplace
+ * scalar-ledger
+ * scalar-auditor
+ * scalardl-envoy
+ * scalardl-schema-loader
+
+Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+## Note
+To make Byzantine fault detection with auditing work properly, Ledger and Auditor should be deployed and managed in different administrative domains. However, in this guide, we will deploy Ledger and Auditor in the same Kubernetes cluster to make the test easier.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++-----------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +---------+ |
+| +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| | +-------+ | | +---------+ | |
+| | | | | |
+| +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Ledger | ---+---> | PostgreSQL | |
+| | | (Envoy) | | +-------+ | | (Ledger) | | +---------+ | | (For Ledger) | |
+| | +---------+ | | +-----------+ | | +---------------+ |
+| | | +-------+ | | +---------+ | |
+| | +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | Client | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| | | +-------+ | | +---------+ | |
+| | | | | | |
+| | +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Auditor | ---+---> | PostgreSQL | |
+| | (Envoy) | | +-------+ | | (Auditor) | | +---------+ | | (For Auditor) | |
+| +---------+ | | +-----------+ | | +---------------+ |
+| | +-------+ | | +---------+ | |
+| +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| +-------+ +---------+ |
+| |
++-----------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start PostgreSQL containers
+
+ScalarDL Ledger and Auditor use some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for Ledger.
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Deploy PostgreSQL for Auditor.
+ ```console
+ helm install postgresql-auditor bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL containers are running.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 11s
+ postgresql-ledger-0 1/1 Running 0 16s
+ ```
+
+## Step 3. Create a working directory
+
+We will create some configuration files and key/certificate files locally. So, create a working directory for them.
+
+1. Create a working directory.
+ ```console
+ mkdir -p ~/scalardl-test/certs/
+ ```
+
+## Step 4. Create key/certificate files
+
+Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production.
+
+1. Change the working directory to `~/scalardl-test/certs/` directory.
+ ```console
+ cd ~/scalardl-test/certs/
+ ```
+
+1. Create a JSON file that includes Ledger information.
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/ledger.json
+ {
+ "CN": "ledger",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "ledger",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Auditor information.
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/auditor.json
+ {
+ "CN": "auditor",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "auditor",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Client information.
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/client.json
+ {
+ "CN": "client",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "client",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create key/certificate files for the Ledger.
+ ```console
+ cfssl selfsign "" ./ledger.json | cfssljson -bare ledger
+ ```
+
+1. Create key/certificate files for the Auditor.
+ ```console
+ cfssl selfsign "" ./auditor.json | cfssljson -bare auditor
+ ```
+
+1. Create key/certificate files for the Client.
+ ```console
+ cfssl selfsign "" ./client.json | cfssljson -bare client
+ ```
+
+1. Confirm key/certificate files are created.
+ ```console
+ ls -1
+ ```
+ [Command execution result]
+ ```console
+ auditor-key.pem
+ auditor.csr
+ auditor.json
+ auditor.pem
+ client-key.pem
+ client.csr
+ client.json
+ client.pem
+ ledger-key.pem
+ ledger.csr
+ ledger.json
+ ledger.pem
+ ```
+
+## Step 5. Create DB schemas for ScalarDL Ledger and ScalarDL Auditor using Helm Charts
+
+We will deploy two ScalarDL Schema Loader pods on the Kubernetes cluster using Helm Charts.
+The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger and Auditor in PostgreSQL.
+
+1. Change the working directory to `~/scalardl-test/`.
+ ```console
+ cd ~/scalardl-test/
+ ```
+
+1. Add the Scalar helm repository.
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+ * Azure Marketplace
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+1. Create a custom values file for ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource that includes a username and password for PostgreSQL for Ledger.
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Create a secret resource that includes a username and password for PostgreSQL for Auditor.
+ ```console
+ kubectl create secret generic auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy the ScalarDL Schema Loader for Ledger.
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml
+ ```
+
+1. Deploy the ScalarDL Schema Loader for Auditor.
+ ```console
+ helm install schema-loader-auditor scalar-labs/schema-loading -f ./schema-loader-auditor-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Schema Loader pods are deployed and completed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 2m56s
+ postgresql-ledger-0 1/1 Running 0 3m1s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 6s
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 10s
+ ```
+ If the ScalarDL Schema Loader pods are **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**).
+
+## Step 6. Deploy ScalarDL Ledger and Auditor on the Kubernetes cluster using Helm Charts
+
+1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ ledger:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ ledger:
+ image:
+ repository: "/scalarinc/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+1. Create a custom values file for ScalarDL Auditor (scalardl-auditor-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ auditor:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ auditorProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.cert_path=/keys/certificate
+ scalar.dl.auditor.private_key_path=/keys/private-key
+ secretName: "auditor-credentials-secret"
+ extraVolumes:
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ extraVolumeMounts:
+ - name: "auditor-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ auditor:
+ image:
+ repository: "/scalarinc/scalar-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ auditorProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.cert_path=/keys/certificate
+ scalar.dl.auditor.private_key_path=/keys/private-key
+ secretName: "auditor-credentials-secret"
+ extraVolumes:
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ extraVolumeMounts:
+ - name: "auditor-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+1. Create secret resource `ledger-keys`.
+ ```console
+ kubectl create secret generic ledger-keys --from-file=certificate=./certs/ledger.pem --from-file=private-key=./certs/ledger-key.pem
+ ```
+
+1. Create secret resource `auditor-keys`.
+ ```console
+ kubectl create secret generic auditor-keys --from-file=certificate=./certs/auditor.pem --from-file=private-key=./certs/auditor-key.pem
+ ```
+
+1. Deploy the ScalarDL Ledger.
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+
+1. Deploy the ScalarDL Auditor.
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Ledger and Auditor pods are deployed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 14m
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-auditor-auditor-5b885ff4c8-fwkpf 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-g69cb 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-nsmnq 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-5mn6v 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-fpq8j 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-lsz2t 1/1 Running 0 18s
+ scalardl-ledger-envoy-547bbf7546-n7p5x 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-p8nwp 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-pskpb 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-5zsbj 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-vnmrw 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-wpjvs 1/1 Running 0 26s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 11m
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 11m
+ ```
+ If the ScalarDL Ledger and Auditor pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDL Ledger and Auditor services are deployed.
+ ```console
+ kubectl get svc
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-auditor ClusterIP 10.107.9.78 5432/TCP 15m
+ postgresql-auditor-hl ClusterIP None 5432/TCP 15m
+ postgresql-ledger ClusterIP 10.108.241.181 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-auditor-envoy ClusterIP 10.100.61.202 40051/TCP,40052/TCP 55s
+ scalardl-auditor-envoy-metrics ClusterIP 10.99.6.227 9001/TCP 55s
+ scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 55s
+ scalardl-auditor-metrics ClusterIP 10.108.1.147 8080/TCP 55s
+ scalardl-ledger-envoy ClusterIP 10.101.191.116 50051/TCP,50052/TCP 61s
+ scalardl-ledger-envoy-metrics ClusterIP 10.106.52.103 9001/TCP 61s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 61s
+ scalardl-ledger-metrics ClusterIP 10.99.122.106 8080/TCP 61s
+ ```
+ If the ScalarDL Ledger and Auditor services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` and `scalardl-auditor-headless` have no CLUSTER-IP.)
+
+## Step 7. Start a Client container
+
+We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container.
+
+1. Create secret resource `client-keys`.
+ ```
+ kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem
+ ```
+
+1. Start a Client container on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' | kubectl apply -f -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ volumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys/ledger"
+ readOnly: true
+ - name: "auditor-keys"
+ mountPath: "/keys/auditor"
+ readOnly: true
+ - name: "client-keys"
+ mountPath: "/keys/client"
+ readOnly: true
+ volumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ - name: "client-keys"
+ secret:
+ secretName: "client-keys"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Check if the Client container is running.
+ ```console
+ kubectl get pod scalardl-client
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 4s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDL Ledger and Auditor, please refer to the following documents.
+ * [Getting Started with ScalarDL](https://scalardl.scalar-labs.com/docs/latest/getting-started)
+ * [Getting Started with ScalarDL Auditor](https://scalardl.scalar-labs.com/docs/latest/getting-started-auditor)
+
+When you use Auditor, you need to register the certificate for the Ledger and Auditor before starting the client application. Ledger needs to register its certificate to Auditor, and Auditor needs to register its certificate to Ledger.
+
+1. Run bash in the Client container.
+ ```console
+ kubectl exec -it scalardl-client -- bash
+ ```
+ After this step, run each command in the Client container.
+
+1. Install the git, curl and unzip commands in the Client container.
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone ScalarDL Java Client SDK git repository.
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the directory to `scalardl-java-client-sdk/`.
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+
+ /scalardl-java-client-sdk
+ ```
+
+1. Change branch to arbitrary version.
+ ```console
+ git checkout -b v3.6.0 refs/tags/v3.6.0
+ ```
+ ```console
+ git branch
+ ```
+ [Command execution result]
+ ```console
+ master
+ * v3.6.0
+ ```
+ If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK.
+
+1. Build the sample contracts.
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip
+ ```
+ You need to use the same version of CLI tools and ScalarDL Ledger.
+
+1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file.
+ ```console
+ unzip ./scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+1. Create a configuration file (ledger.as.client.properties) to register the certificate of Ledger to Auditor.
+ ```console
+ cat << 'EOF' > ledger.as.client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=ledger
+ scalar.dl.client.cert_path=/keys/ledger/certificate
+ scalar.dl.client.private_key_path=/keys/ledger/private-key
+ EOF
+ ```
+
+1. Create a configuration file (auditor.as.client.properties) to register the certificate of Auditor to Ledger.
+ ```console
+ cat << 'EOF' > auditor.as.client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=auditor
+ scalar.dl.client.cert_path=/keys/auditor/certificate
+ scalar.dl.client.private_key_path=/keys/auditor/private-key
+ EOF
+ ```
+
+1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' > client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=client
+ scalar.dl.client.cert_path=/keys/client/certificate
+ scalar.dl.client.private_key_path=/keys/client/private-key
+ EOF
+ ```
+
+1. Register the certificate file of Ledger.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./ledger.as.client.properties
+ ```
+
+1. Register the certificate file of Auditor.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./auditor.as.client.properties
+ ```
+
+1. Register the certificate file of client.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Register the contract `ValdateLedger` to execute a validate request.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id validate-ledger --contract-binary-name com.scalar.dl.client.contract.ValidateLedger --contract-class-file ./build/classes/java/main/com/scalar/dl/client/contract/ValidateLedger.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+ [Command execution result]
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+ * Reference information
+ * If the asset data is not tampered with, the contract execution request (execute-contract command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the contract execution request (execute-contract command) returns a value other than `OK` (e.g. `INCONSISTENT_STATES`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+ * In this way, the ScalarDL can detect data tampering.
+
+1. Execute a validation request for the asset.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset"
+ ```
+ [Command execution result]
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEYCIQDiiXqzw6K+Ml4uvn8rK43o5wHWESU3hoXnZPi6/OeKVwIhAM+tFBcapl6zg47Uq0Uc8nVNGWNHZLBDBGve3F0xkzTR"
+ },
+ "Auditor" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEUCIQDLsfUR2PmxSvfpL3YvHJUkz00RDpjCdctkroZKXE8d5QIgH73FQH2e11jfnynD00Pp9DrIG1vYizxDsvxUsMPo9IU="
+ }
+ }
+ ```
+ * Reference information
+ * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+ * In this way, the ScalarDL Ledger can detect data tampering.
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL.
+ ```console
+ helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger scalardl-auditor schema-loader-auditor postgresql-auditor
+ ```
+
+1. Remove the Client container.
+ ```
+ kubectl delete pod scalardl-client --force --grace-period 0
+ ```
+
+1. Remove the working directory and sample files (configuration file, key, and certificate).
+ ```console
+ cd ~
+ ```
+ ```console
+ rm -rf ~/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-ledger.mdx b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-ledger.mdx
new file mode 100644
index 00000000..3adac8d5
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/getting-started-scalardl-ledger.mdx
@@ -0,0 +1,611 @@
+---
+---
+
+# Getting Started with Helm Charts (ScalarDL Ledger / Ledger only)
+
+This document explains how to get started with ScalarDL Ledger using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+You need to subscribe to ScalarDL Ledger in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images.
+ * AWS Marketplace
+ * scalar-ledger
+ * scalar-ledger-envoy
+ * scalardl-schema-loader-ledger
+ * Azure Marketplace
+ * scalar-ledger
+ * scalardl-envoy
+ * scalardl-schema-loader
+
+Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| |
+| [Pod] [Pod] [Pod] [Pod] |
+| |
+| +-------+ +-----------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ |
+| | +-------+ | | +-----------------+ | |
+| | | | | |
+| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDL Ledger | ---+---> | PostgreSQL | |
+| +--------+ | (Envoy) | | +-------+ | | (ScalarDL Ledger) | | +-----------------+ | +------------+ |
+| +---------+ | | +-------------------+ | | |
+| | +-------+ | | +-----------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ |
+| +-------+ +-----------------+ |
+| |
++--------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start a PostgreSQL container
+
+ScalarDL Ledger uses some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL.
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL container is running.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 11s
+ ```
+
+## Step 3. Create a working directory
+
+We will create some configuration files and key/certificate files locally. So, create a working directory for them.
+
+1. Create a working directory.
+ ```console
+ mkdir -p ~/scalardl-test/certs/
+ ```
+
+## Step 4. Create key/certificate files
+
+Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production.
+
+1. Change the working directory to `~/scalardl-test/certs/` directory.
+ ```console
+ cd ~/scalardl-test/certs/
+ ```
+
+1. Create a JSON file that includes Ledger information.
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/ledger.json
+ {
+ "CN": "ledger",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "ledger",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Client information.
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/client.json
+ {
+ "CN": "client",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "client",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create key/certificate files for the Ledger.
+ ```console
+ cfssl selfsign "" ./ledger.json | cfssljson -bare ledger
+ ```
+
+1. Create key/certificate files for the Client.
+ ```console
+ cfssl selfsign "" ./client.json | cfssljson -bare client
+ ```
+
+1. Confirm key/certificate files are created.
+ ```console
+ ls -1
+ ```
+ [Command execution result]
+ ```console
+ client-key.pem
+ client.csr
+ client.json
+ client.pem
+ ledger-key.pem
+ ledger.csr
+ ledger.json
+ ledger.pem
+ ```
+
+## Step 5. Create DB schemas for ScalarDL Ledger using Helm Charts
+
+We will deploy a ScalarDL Schema Loader on the Kubernetes cluster using Helm Charts.
+The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger in PostgreSQL.
+
+1. Change the working directory to `~/scalardl-test/`.
+ ```console
+ cd ~/scalardl-test/
+ ```
+
+1. Add the Scalar helm repository.
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+ * Azure Marketplace
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](../scalar-kubernetes/AwsMarketplaceGuide.mdx)
+ * [How to install Scalar products through Azure Marketplace](../scalar-kubernetes/AzureMarketplaceGuide.mdx)
+
+1. Create a custom values file for ScalarDL Schema Loader (schema-loader-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+
+1. Create a secret resource that includes a username and password for PostgreSQL.
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy the ScalarDL Schema Loader.
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Schema Loader pod is deployed and completed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 11m
+ schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3s
+ ```
+ If the ScalarDL Schema Loader pod is **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**).
+
+## Step 6. Deploy ScalarDL Ledger on the Kubernetes cluster using Helm Charts
+
+1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ ledger:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ ledger:
+ image:
+ repository: "/scalarinc/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+
+1. Create secret resource `ledger-keys`.
+ ```console
+ kubectl create secret generic ledger-keys --from-file=private-key=./certs/ledger-key.pem
+ ```
+
+1. Deploy the ScalarDL Ledger.
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Ledger pods are deployed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-ledger-envoy-547bbf7546-6cn88 1/1 Running 0 52s
+ scalardl-ledger-envoy-547bbf7546-rpg5p 1/1 Running 0 52s
+ scalardl-ledger-envoy-547bbf7546-x2vlg 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-29bzm 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-9fklw 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-9tw5x 1/1 Running 0 52s
+ schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3m38s
+ ```
+ If the ScalarDL Ledger pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDL Ledger services are deployed.
+ ```console
+ kubectl get svc
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-ledger ClusterIP 10.109.253.150 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-ledger-envoy ClusterIP 10.106.141.153 50051/TCP,50052/TCP 83s
+ scalardl-ledger-envoy-metrics ClusterIP 10.108.36.136 9001/TCP 83s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 83s
+ scalardl-ledger-metrics ClusterIP 10.98.4.217 8080/TCP 83s
+ ```
+ If the ScalarDL Ledger services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` has no CLUSTER-IP.)
+
+## Step 7. Start a Client container
+
+We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container.
+
+1. Create secret resource `client-keys`.
+ ```
+ kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem
+ ```
+
+1. Start a Client container on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' | kubectl apply -f -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ volumeMounts:
+ - name: "client-keys"
+ mountPath: "/keys"
+ readOnly: true
+ volumes:
+ - name: "client-keys"
+ secret:
+ secretName: "client-keys"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Check if the Client container is running.
+ ```console
+ kubectl get pod scalardl-client
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 11s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDL and the contract, please refer to the [Getting Started with ScalarDL](https://scalardl.scalar-labs.com/docs/latest/getting-started).
+
+1. Run bash in the Client container.
+ ```console
+ kubectl exec -it scalardl-client -- bash
+ ```
+ After this step, run each command in the Client container.
+
+1. Install the git, curl and unzip commands in the Client container.
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone ScalarDL Java Client SDK git repository.
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the directory to `scalardl-java-client-sdk/`.
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+
+ /scalardl-java-client-sdk
+ ```
+
+1. Change branch to arbitrary version.
+ ```console
+ git checkout -b v3.6.0 refs/tags/v3.6.0
+ ```
+ ```console
+ git branch
+ ```
+ [Command execution result]
+ ```console
+ master
+ * v3.6.0
+ ```
+ If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK.
+
+1. Build the sample contracts.
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip
+ ```
+ You need to use the same version of CLI tools and ScalarDL Ledger.
+
+1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file.
+ ```console
+ unzip ./scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' > client.properties
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.client.cert_holder_id=client
+ scalar.dl.client.cert_path=/keys/certificate
+ scalar.dl.client.private_key_path=/keys/private-key
+ EOF
+ ```
+
+1. Register the certificate file of the client.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+ [Command execution result]
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+
+1. Execute a validation request for the asset.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset"
+ ```
+ [Command execution result]
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3",
+ "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=",
+ "signature" : "MEQCIG6Xa4WOWGMIIbA3PnCje4aAapYfCMerF54xRW0gaUuzAiBCA1nCAPoFWgxArB34/u9b+KeoxQBMALI/pOzMNoLExg=="
+ },
+ "Auditor" : null
+ }
+ ```
+ * Reference information
+ * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+ ```console
+ {
+ "status_code" : "INVALID_OUTPUT",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3",
+ "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=",
+ "signature" : "MEQCIGtJerW7N93c/bvIBy/7NXxoQwGFznHMmV6RzsgHQg0dAiBu+eBxkfmMQKJY2d9fLNvCH+4b+9rl7gZ3OXJ2NYeVsA=="
+ },
+ "Auditor" : null
+ }
+ ```
+ * In this way, the ScalarDL Ledger can detect data tampering.
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL.
+ ```console
+ helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger
+ ```
+
+1. Remove the Client container.
+ ```
+ kubectl delete pod scalardl-client --force --grace-period 0
+ ```
+
+1. Remove the working directory and sample files (configuration file, key, and certificate).
+ ```console
+ cd ~
+ ```
+ ```console
+ rm -rf ~/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+* [Getting Started with Helm Charts (Scalar Manager)](getting-started-scalar-manager.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.mdx b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.mdx
new file mode 100644
index 00000000..cdf5f165
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.mdx
@@ -0,0 +1,33 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# How to deploy Scalar Admin for Kubernetes
+
+This document explains how to deploy Scalar Admin for Kubernetes by using Scalar Helm Charts. For details on the custom values file for Scalar Admin for Kubernetes, see [Configure a custom values file for Scalar Admin for Kubernetes](configure-custom-values-scalar-admin-for-kubernetes.mdx).
+
+## Deploy Scalar Admin for Kubernetes
+
+To deploy Scalar Admin for Kubernetes, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm install scalar-labs/scalar-admin-for-kubernetes -n -f / --version
+```
+
+## Upgrade a Scalar Admin for Kubernetes job
+
+To upgrade a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm upgrade scalar-labs/scalar-admin-for-kubernetes -n -f / --version
+```
+
+## Delete a Scalar Admin for Kubernetes job
+
+To delete a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm uninstall -n
+```
diff --git a/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-manager.mdx b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-manager.mdx
new file mode 100644
index 00000000..5795f265
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-manager.mdx
@@ -0,0 +1,62 @@
+---
+tags:
+ - Enterprise Option
+---
+
+# How to deploy Scalar Manager
+
+This document explains how to deploy Scalar Manager using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for Scalar Manager.
+
+* [Configure a custom values file for Scalar Manager](configure-custom-values-scalar-manager.mdx)
+
+## Deploy kube-prometheus-stack and loki-stack
+
+When you use Scalar Manager, you must deploy kube-prometheus-stack and loki-stack. Please refer to the following documents for more details on how to deploy them.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](getting-started-monitoring.mdx)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](getting-started-logging.mdx)
+
+When you deploy kube-prometheus-stack, you must set the following configuration in the custom values file for kube-prometheus-stack.
+
+```yaml
+kubeStateMetrics:
+ enabled: true
+nodeExporter:
+ enabled: true
+kubelet:
+ enabled: true
+grafana:
+ grafana.ini:
+ users:
+ default_theme: light
+ security:
+ allow_embedding: true
+ auth.anonymous:
+ enabled: true
+ org_name: "Main Org."
+ org_role: Editor
+```
+
+If you already have a deployment of kube-prometheus-stack, please upgrade the configuration using the following command.
+
+```console
+helm upgrade prometheus-community/kube-prometheus-stack -n -f / --version
+```
+
+## Deploy Scalar Manager
+
+```console
+helm install scalar-labs/scalar-manager -n -f / --version
+```
+
+## Upgrade the deployment of Scalar Manager
+
+```console
+helm upgrade scalar-labs/scalar-manager -n -f / --version
+```
+
+## Delete the deployment of Scalar Manager
+
+```console
+helm uninstall -n
+```
diff --git a/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-products.mdx b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-products.mdx
new file mode 100644
index 00000000..e2eb2781
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalar-products.mdx
@@ -0,0 +1,71 @@
+---
+tags:
+ - Enterprise Standard
+ - Enterprise Premium
+---
+
+# Deploy Scalar products using Scalar Helm Charts
+
+This document explains how to deploy Scalar products using Scalar Helm Charts. If you want to test Scalar products on your local environment using a minikube cluster, please refer to the following getting started guide.
+
+* [Getting Started with Scalar Helm Charts](getting-started-scalar-helm-charts.mdx)
+
+## Prerequisites
+
+### Install the helm command
+
+You must install the helm command to use Scalar Helm Charts. Please install the helm command according to the [Helm document](https://helm.sh/docs/intro/install/).
+
+### Add the Scalar Helm Charts repository
+
+```console
+helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+```
+```console
+helm repo update scalar-labs
+```
+
+### Prepare a Kubernetes cluster
+
+You must prepare a Kubernetes cluster for the deployment of Scalar products. If you use EKS (Amazon Elastic Kubernetes Service) or AKS (Azure Kubernetes Service) in the production environment. Please refer to the following document for more details.
+
+- [Guidelines for creating an Amazon EKS cluster for Scalar products](../scalar-kubernetes/CreateEKSClusterForScalarProducts.mdx)
+- [Guidelines for creating an AKS cluster for Scalar products](../scalar-kubernetes/CreateAKSClusterForScalarProducts.mdx)
+
+You must prepare a supported version of Kubernetes. For versions that Scalar Helm Charts supports, see [Kubernetes](https://scalardb.scalar-labs.com/docs/latest/requirements/#kubernetes).
+
+### Prepare a database (ScalarDB, ScalarDL Ledger, ScalarDL Auditor)
+
+You must prepare a database as a backend storage of ScalarDB/ScalarDL. You can see the supported databases by ScalarDB/ScalarDL in the following document.
+
+* [ScalarDB Supported Databases](https://scalardb.scalar-labs.com/docs/latest/requirements#databases)
+
+### Prepare a custom values file
+
+You must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to create a custom values file.
+
+* [Configure a custom values file for Scalar Helm Charts](configure-custom-values-file.mdx)
+
+### Get the container images
+
+If you're using commercially licensed Scalar products, you must get the container images of those products. For details, see [How to get the container images of Scalar products](../scalar-kubernetes/HowToGetContainerImages.mdx).
+
+If you're using any of the following products from the public container repository, you can get the container images from the public container repository with the default configuration of Scalar Helm Chart:
+
+* Scalar Envoy (deploy with ScalarDB Cluster, ScalarDL Ledger, or ScalarDL Auditor)
+* ScalarDL Schema Loader
+* Scalar Admin for Kubernetes
+* ScalarDB Analytics with PostgreSQL
+
+## Deploy Scalar products
+
+Please refer to the following documents for more details on how to deploy each product.
+
+* [ScalarDB Cluster](how-to-deploy-scalardb-cluster.mdx)
+* [ScalarDB Analytics with PostgreSQL](how-to-deploy-scalardb-analytics-postgresql.mdx)
+* [ScalarDL Ledger](how-to-deploy-scalardl-ledger.mdx)
+* [ScalarDL Auditor](how-to-deploy-scalardl-auditor.mdx)
+* [Scalar Admin for Kubernetes](how-to-deploy-scalar-admin-for-kubernetes.mdx)
+* [Scalar Manager](how-to-deploy-scalar-manager.mdx)
+* [[Deprecated] ScalarDB Server](how-to-deploy-scalardb.mdx)
+* [[Deprecated] ScalarDB GraphQL](how-to-deploy-scalardb-graphql.mdx)
diff --git a/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-analytics-postgresql.mdx b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-analytics-postgresql.mdx
new file mode 100644
index 00000000..90db2ce6
--- /dev/null
+++ b/versioned_docs/version-3.13/helm-charts/how-to-deploy-scalardb-analytics-postgresql.mdx
@@ -0,0 +1,40 @@
+---
+tags:
+ - Community
+---
+
+# How to deploy ScalarDB Analytics with PostgreSQL
+
+This document explains how to deploy ScalarDB Analytics with PostgreSQL by using Scalar Helm Charts. For details on the custom values file for ScalarDB Analytics with PostgreSQL, see [Configure a custom values file for ScalarDB Analytics with PostgreSQL](configure-custom-values-scalardb-analytics-postgresql.mdx).
+
+## Prepare a secret resource
+
+You must create a secret resource `scalardb-analytics-postgresql-superuser-password` with the key `superuser-password` that includes a superuser password for PostgreSQL before you deploy ScalarDB Analytics with PostgreSQL. Scalar Helm Chart mounts this secret resource and sets the `POSTGRES_PASSWORD` environment variable to the value of the `superuser-password` key.
+
+```console
+kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password= -n
+```
+
+## Deploy ScalarDB Analytics with PostgreSQL
+
+To deploy ScalarDB Analytics with PostgreSQL, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm install scalar-labs/scalardb-analytics-postgresql -n -f / --version
+```
+
+## Upgrade a ScalarDB Analytics with PostgreSQL deployment
+
+To upgrade a ScalarDB Analytics with PostgreSQL deployment, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm upgrade scalar-labs/scalardb-analytics-postgresql -n -f / --version