Skip to content

Commit

Permalink
feat: add dockertest instance bootstrapper (#54)
Browse files Browse the repository at this point in the history
* feat: add dockertest instance bootstrapper

* fix: go mod tidy
  • Loading branch information
mabdh authored Sep 23, 2022
1 parent 10d42d9 commit 069f2c6
Show file tree
Hide file tree
Showing 13 changed files with 1,307 additions and 30 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,6 @@
.idea
.vscode
expt/
temp.env
temp.env
.DS_Store
temp.env
4 changes: 2 additions & 2 deletions db/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ import (

"github.com/jmoiron/sqlx"
"github.com/odpf/salt/db"
"github.com/ory/dockertest"
"github.com/ory/dockertest/docker"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
)

Expand Down
76 changes: 76 additions & 0 deletions dockertest/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# dockertest

This package is an abstraction of several dockerized data storages using `ory/dockertest` to bootstrap a specific dockerized instance.

Example postgres

```go
// create postgres instance
pgDocker, err := dockertest.CreatePostgres(
dockertest.PostgresWithDetail(
pgUser, pgPass, pgDBName,
),
)

// get connection string
connString := pgDocker.GetExternalConnString()

// purge docker
if err := pgDocker.GetPool().Purge(pgDocker.GetResouce()); err != nil {
return fmt.Errorf("could not purge resource: %w", err)
}
```

Example spice db

- bootsrap spice db with postgres and wire them internally via network bridge

```go
// create custom pool
pool, err := dockertest.NewPool("")
if err != nil {
return nil, err
}

// create a bridge network for testing
network, err = pool.Client.CreateNetwork(docker.CreateNetworkOptions{
Name: fmt.Sprintf("bridge-%s", uuid.New().String()),
})
if err != nil {
return nil, err
}


// create postgres instance
pgDocker, err := dockertest.CreatePostgres(
dockertest.PostgresWithDockerPool(pool),
dockertest.PostgresWithDockerNetwork(network),
dockertest.PostgresWithDetail(
pgUser, pgPass, pgDBName,
),
)

// get connection string
connString := pgDocker.GetInternalConnString()

// create spice db instance
spiceDocker, err := dockertest.CreateSpiceDB(connString,
dockertest.SpiceDBWithDockerPool(pool),
dockertest.SpiceDBWithDockerNetwork(network),
)

if err := dockertest.MigrateSpiceDB(connString,
dockertest.MigrateSpiceDBWithDockerPool(pool),
dockertest.MigrateSpiceDBWithDockerNetwork(network),
); err != nil {
return err
}

// purge docker resources
if err := pool.Purge(spiceDocker.GetResouce()); err != nil {
return fmt.Errorf("could not purge resource: %w", err)
}
if err := pool.Purge(pgDocker.GetResouce()); err != nil {
return fmt.Errorf("could not purge resource: %w", err)
}
```
121 changes: 121 additions & 0 deletions dockertest/configs/cortex/single_process_cortex.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# Configuration for running Cortex in single-process mode.
# This configuration should not be used in production.
# It is only for getting started and development.

# Disable the requirement that every request to Cortex has a
# X-Scope-OrgID header. `fake` will be substituted in instead.
auth_enabled: false

server:
http_listen_port: 9009

# Configure the server to allow messages up to 100MB.
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
grpc_server_max_concurrent_streams: 1000

distributor:
shard_by_all_labels: true
pool:
health_check_ingesters: true

ingester_client:
grpc_client_config:
# Configure the client to allow messages up to 100MB.
max_recv_msg_size: 104857600
max_send_msg_size: 104857600
grpc_compression: gzip

ingester:
# We want our ingesters to flush chunks at the same time to optimise
# deduplication opportunities.
spread_flushes: true
chunk_age_jitter: 0

walconfig:
wal_enabled: true
recover_from_wal: true
wal_dir: /tmp/cortex/wal

lifecycler:
# The address to advertise for this ingester. Will be autodiscovered by
# looking up address on eth0 or en0; can be specified if this fails.
# address: 127.0.0.1

# We want to start immediately and flush on shutdown.
join_after: 0
min_ready_duration: 0s
final_sleep: 0s
num_tokens: 512
tokens_file_path: /tmp/cortex/wal/tokens

# Use an in memory ring store, so we don't need to launch a Consul.
ring:
kvstore:
store: inmemory
replication_factor: 1

# Use local storage - BoltDB for the index, and the filesystem
# for the chunks.
schema:
configs:
- from: 2019-07-29
store: boltdb
object_store: filesystem
schema: v10
index:
prefix: index_
period: 1w

storage:
boltdb:
directory: /tmp/cortex/index

filesystem:
directory: /tmp/cortex/chunks

delete_store:
store: boltdb

purger:
object_store_type: filesystem

frontend_worker:
# Configure the frontend worker in the querier to match worker count
# to max_concurrent on the queriers.
match_max_concurrent: true

# Configure the ruler to scan the /tmp/cortex/rules directory for prometheus
# rules: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules
ruler:
enable_api: true
enable_sharding: false
# alertmanager_url: http://cortex-am:9009/api/prom/alertmanager/
rule_path: /tmp/cortex/rules
storage:
type: s3
s3:
# endpoint: http://minio1:9000
bucketnames: cortex
secret_access_key: minio123
access_key_id: minio
s3forcepathstyle: true

alertmanager:
enable_api: true
sharding_enabled: false
data_dir: data/
external_url: /api/prom/alertmanager
storage:
type: s3
s3:
# endpoint: http://minio1:9000
bucketnames: cortex
secret_access_key: minio123
access_key_id: minio
s3forcepathstyle: true

alertmanager_storage:
backend: local
local:
path: tmp/cortex/alertmanager
Loading

0 comments on commit 069f2c6

Please sign in to comment.