diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml
index 347ca9526..faf17c835 100644
--- a/.github/workflows/integration_test.yml
+++ b/.github/workflows/integration_test.yml
@@ -51,7 +51,7 @@ jobs:
echo ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} | docker login --username appflowyinc --password-stdin
docker push appflowyinc/appflowy_cloud:${GITHUB_SHA}
docker push appflowyinc/appflowy_history:${GITHUB_SHA}
- APPFLOWY_HISTORY_VERSION=${GITHUB_SHA}
+ APPFLOWY_HISTORY_VERSION=${GITHUB_SHA}
APPFLOWY_CLOUD_VERSION=0.1.1
test:
@@ -93,6 +93,7 @@ jobs:
sed -i "s|LOCAL_AI_AWS_SECRET_ACCESS_KEY=.*|LOCAL_AI_AWS_SECRET_ACCESS_KEY=${{ secrets.LOCAL_AI_AWS_SECRET_ACCESS_KEY }}|" .env
sed -i 's|APPFLOWY_INDEXER_REDIS_URL=.*|APPFLOWY_INDEXER_REDIS_URL=redis://localhost:6379|' .env
sed -i 's|APPFLOWY_INDEXER_DATABASE_URL=.*|APPFLOWY_INDEXER_DATABASE_URL=postgres://postgres:password@localhost:5432/postgres|' .env
+ sed -i 's|APPFLOWY_WEB_URL=.*|APPFLOWY_WEB_URL=http://localhost:3000|' .env
shell: bash
- name: Update Nginx Configuration
@@ -125,4 +126,4 @@ jobs:
- name: Remove Docker Images from Docker Hub
run: |
TOKEN=$(curl -s -H "Content-Type: application/json" -X POST -d '{"username": "appflowyinc", "password": "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}"}' https://hub.docker.com/v2/users/login/ | jq -r .token)
- curl -s -X DELETE -H "Authorization: JWT ${TOKEN}" https://hub.docker.com/v2/repositories/appflowyinc/${{ matrix.test_service }}/tags/${GITHUB_SHA}/
\ No newline at end of file
+ curl -s -X DELETE -H "Authorization: JWT ${TOKEN}" https://hub.docker.com/v2/repositories/appflowyinc/${{ matrix.test_service }}/tags/${GITHUB_SHA}/
diff --git a/.sqlx/query-343cdf36e68c8333ecc6b778789d8de543c15f2aa0318dac2d10c5f1ef0f0232.json b/.sqlx/query-0c3ae560880e82218d13c5992540386ea1566e45e31acd5fb51886aabcd98479.json
similarity index 57%
rename from .sqlx/query-343cdf36e68c8333ecc6b778789d8de543c15f2aa0318dac2d10c5f1ef0f0232.json
rename to .sqlx/query-0c3ae560880e82218d13c5992540386ea1566e45e31acd5fb51886aabcd98479.json
index c86517cab..bfc48c6b2 100644
--- a/.sqlx/query-343cdf36e68c8333ecc6b778789d8de543c15f2aa0318dac2d10c5f1ef0f0232.json
+++ b/.sqlx/query-0c3ae560880e82218d13c5992540386ea1566e45e31acd5fb51886aabcd98479.json
@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
- "query": "\n WITH request_id_workspace_member_count AS (\n SELECT\n request_id,\n COUNT(*) AS member_count\n FROM af_access_request\n JOIN af_workspace_member USING (workspace_id)\n WHERE request_id = $1\n GROUP BY request_id\n )\n SELECT\n request_id,\n view_id,\n (\n workspace_id,\n af_workspace.database_storage_id,\n af_workspace.owner_uid,\n owner_profile.name,\n af_workspace.created_at,\n af_workspace.workspace_type,\n af_workspace.deleted_at,\n af_workspace.workspace_name,\n af_workspace.icon,\n request_id_workspace_member_count.member_count\n ) AS \"workspace!: AFWorkspaceWithMemberCountRow\",\n (\n af_user.uuid,\n af_user.name,\n af_user.email,\n af_user.metadata ->> 'icon_url'\n ) AS \"requester!: AFAccessRequesterColumn\",\n status AS \"status: AFAccessRequestStatusColumn\",\n af_access_request.created_at AS created_at\n FROM af_access_request\n JOIN af_user USING (uid)\n JOIN af_workspace USING (workspace_id)\n JOIN af_user AS owner_profile ON af_workspace.owner_uid = owner_profile.uid\n JOIN request_id_workspace_member_count USING (request_id)\n WHERE request_id = $1\n ",
+ "query": "\n WITH request_id_workspace_member_count AS (\n SELECT\n request_id,\n COUNT(*) AS member_count\n FROM af_access_request\n JOIN af_workspace_member USING (workspace_id)\n WHERE request_id = $1\n GROUP BY request_id\n )\n SELECT\n request_id,\n view_id,\n (\n workspace_id,\n af_workspace.database_storage_id,\n af_workspace.owner_uid,\n owner_profile.name,\n owner_profile.email,\n af_workspace.created_at,\n af_workspace.workspace_type,\n af_workspace.deleted_at,\n af_workspace.workspace_name,\n af_workspace.icon,\n request_id_workspace_member_count.member_count\n ) AS \"workspace!: AFWorkspaceWithMemberCountRow\",\n (\n af_user.uuid,\n af_user.name,\n af_user.email,\n af_user.metadata ->> 'icon_url'\n ) AS \"requester!: AFAccessRequesterColumn\",\n status AS \"status: AFAccessRequestStatusColumn\",\n af_access_request.created_at AS created_at\n FROM af_access_request\n JOIN af_user USING (uid)\n JOIN af_workspace USING (workspace_id)\n JOIN af_user AS owner_profile ON af_workspace.owner_uid = owner_profile.uid\n JOIN request_id_workspace_member_count USING (request_id)\n WHERE request_id = $1\n ",
"describe": {
"columns": [
{
@@ -48,5 +48,5 @@
false
]
},
- "hash": "343cdf36e68c8333ecc6b778789d8de543c15f2aa0318dac2d10c5f1ef0f0232"
+ "hash": "0c3ae560880e82218d13c5992540386ea1566e45e31acd5fb51886aabcd98479"
}
diff --git a/.sqlx/query-29a6f76da0baf71c215b69078cce66d55f43d63f5c1c9e6786a4e80b52b4c5df.json b/.sqlx/query-cf7b8baaba35e74671911e13f1efcdfa3a642d2b7276c2a81f877a6217a0d473.json
similarity index 59%
rename from .sqlx/query-29a6f76da0baf71c215b69078cce66d55f43d63f5c1c9e6786a4e80b52b4c5df.json
rename to .sqlx/query-cf7b8baaba35e74671911e13f1efcdfa3a642d2b7276c2a81f877a6217a0d473.json
index c2ce5ae79..4fa9b15eb 100644
--- a/.sqlx/query-29a6f76da0baf71c215b69078cce66d55f43d63f5c1c9e6786a4e80b52b4c5df.json
+++ b/.sqlx/query-cf7b8baaba35e74671911e13f1efcdfa3a642d2b7276c2a81f877a6217a0d473.json
@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
- "query": "\n INSERT INTO public.af_workspace (owner_uid, workspace_name)\n VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)\n RETURNING\n workspace_id,\n database_storage_id,\n owner_uid,\n (SELECT name FROM public.af_user WHERE uid = owner_uid) AS owner_name,\n created_at,\n workspace_type,\n deleted_at,\n workspace_name,\n icon\n ;\n ",
+ "query": "\n WITH new_workspace AS (\n INSERT INTO public.af_workspace (owner_uid, workspace_name)\n VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)\n RETURNING *\n )\n SELECT\n workspace_id,\n database_storage_id,\n owner_uid,\n owner_profile.name AS owner_name,\n owner_profile.email AS owner_email,\n new_workspace.created_at,\n workspace_type,\n new_workspace.deleted_at,\n workspace_name,\n icon\n FROM new_workspace\n JOIN public.af_user AS owner_profile ON new_workspace.owner_uid = owner_profile.uid;\n ",
"describe": {
"columns": [
{
@@ -25,26 +25,31 @@
},
{
"ordinal": 4,
+ "name": "owner_email",
+ "type_info": "Text"
+ },
+ {
+ "ordinal": 5,
"name": "created_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 5,
+ "ordinal": 6,
"name": "workspace_type",
"type_info": "Int4"
},
{
- "ordinal": 6,
+ "ordinal": 7,
"name": "deleted_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 7,
+ "ordinal": 8,
"name": "workspace_name",
"type_info": "Text"
},
{
- "ordinal": 8,
+ "ordinal": 9,
"name": "icon",
"type_info": "Text"
}
@@ -59,7 +64,8 @@
false,
false,
false,
- null,
+ false,
+ false,
true,
false,
true,
@@ -67,5 +73,5 @@
false
]
},
- "hash": "29a6f76da0baf71c215b69078cce66d55f43d63f5c1c9e6786a4e80b52b4c5df"
+ "hash": "cf7b8baaba35e74671911e13f1efcdfa3a642d2b7276c2a81f877a6217a0d473"
}
diff --git a/.sqlx/query-2ebeb1af741d6866849af544be78ab44a44f9800265e49adf156b8b40b2d0f46.json b/.sqlx/query-dbebcabe81603dca27ad9fc5a5df0f1e56a62016246c5a522423102a9e9b6dae.json
similarity index 64%
rename from .sqlx/query-2ebeb1af741d6866849af544be78ab44a44f9800265e49adf156b8b40b2d0f46.json
rename to .sqlx/query-dbebcabe81603dca27ad9fc5a5df0f1e56a62016246c5a522423102a9e9b6dae.json
index b94bd6306..5ab3dbf69 100644
--- a/.sqlx/query-2ebeb1af741d6866849af544be78ab44a44f9800265e49adf156b8b40b2d0f46.json
+++ b/.sqlx/query-dbebcabe81603dca27ad9fc5a5df0f1e56a62016246c5a522423102a9e9b6dae.json
@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
- "query": "\n SELECT\n w.workspace_id,\n w.database_storage_id,\n w.owner_uid,\n (SELECT name FROM public.af_user WHERE uid = w.owner_uid) AS owner_name,\n w.created_at,\n w.workspace_type,\n w.deleted_at,\n w.workspace_name,\n w.icon\n FROM af_workspace w\n JOIN af_workspace_member wm ON w.workspace_id = wm.workspace_id\n WHERE wm.uid = (\n SELECT uid FROM public.af_user WHERE uuid = $1\n );\n ",
+ "query": "\n SELECT\n w.workspace_id,\n w.database_storage_id,\n w.owner_uid,\n u.name AS owner_name,\n u.email AS owner_email,\n w.created_at,\n w.workspace_type,\n w.deleted_at,\n w.workspace_name,\n w.icon\n FROM af_workspace w\n JOIN af_workspace_member wm ON w.workspace_id = wm.workspace_id\n JOIN public.af_user u ON w.owner_uid = u.uid\n WHERE wm.uid = (\n SELECT uid FROM public.af_user WHERE uuid = $1\n );\n ",
"describe": {
"columns": [
{
@@ -25,26 +25,31 @@
},
{
"ordinal": 4,
+ "name": "owner_email",
+ "type_info": "Text"
+ },
+ {
+ "ordinal": 5,
"name": "created_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 5,
+ "ordinal": 6,
"name": "workspace_type",
"type_info": "Int4"
},
{
- "ordinal": 6,
+ "ordinal": 7,
"name": "deleted_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 7,
+ "ordinal": 8,
"name": "workspace_name",
"type_info": "Text"
},
{
- "ordinal": 8,
+ "ordinal": 9,
"name": "icon",
"type_info": "Text"
}
@@ -58,7 +63,8 @@
false,
false,
false,
- null,
+ false,
+ false,
true,
false,
true,
@@ -66,5 +72,5 @@
false
]
},
- "hash": "2ebeb1af741d6866849af544be78ab44a44f9800265e49adf156b8b40b2d0f46"
+ "hash": "dbebcabe81603dca27ad9fc5a5df0f1e56a62016246c5a522423102a9e9b6dae"
}
diff --git a/.sqlx/query-04eb046efaa45999587db62cb32fa314f61997652c070870b44d23753ad48b5c.json b/.sqlx/query-f448ae1b28ef69f884040016072b12694e530b64a105e03a040c65b779c9d91e.json
similarity index 68%
rename from .sqlx/query-04eb046efaa45999587db62cb32fa314f61997652c070870b44d23753ad48b5c.json
rename to .sqlx/query-f448ae1b28ef69f884040016072b12694e530b64a105e03a040c65b779c9d91e.json
index 93cd3969c..5b7e78448 100644
--- a/.sqlx/query-04eb046efaa45999587db62cb32fa314f61997652c070870b44d23753ad48b5c.json
+++ b/.sqlx/query-f448ae1b28ef69f884040016072b12694e530b64a105e03a040c65b779c9d91e.json
@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
- "query": "\n SELECT\n workspace_id,\n database_storage_id,\n owner_uid,\n (SELECT name FROM public.af_user WHERE uid = owner_uid) AS owner_name,\n created_at,\n workspace_type,\n deleted_at,\n workspace_name,\n icon\n FROM public.af_workspace WHERE workspace_id = $1\n ",
+ "query": "\n SELECT\n workspace_id,\n database_storage_id,\n owner_uid,\n owner_profile.name as owner_name,\n owner_profile.email as owner_email,\n af_workspace.created_at,\n workspace_type,\n af_workspace.deleted_at,\n workspace_name,\n icon\n FROM public.af_workspace\n JOIN public.af_user owner_profile ON af_workspace.owner_uid = owner_profile.uid\n WHERE workspace_id = $1\n ",
"describe": {
"columns": [
{
@@ -25,26 +25,31 @@
},
{
"ordinal": 4,
+ "name": "owner_email",
+ "type_info": "Text"
+ },
+ {
+ "ordinal": 5,
"name": "created_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 5,
+ "ordinal": 6,
"name": "workspace_type",
"type_info": "Int4"
},
{
- "ordinal": 6,
+ "ordinal": 7,
"name": "deleted_at",
"type_info": "Timestamptz"
},
{
- "ordinal": 7,
+ "ordinal": 8,
"name": "workspace_name",
"type_info": "Text"
},
{
- "ordinal": 8,
+ "ordinal": 9,
"name": "icon",
"type_info": "Text"
}
@@ -58,7 +63,8 @@
false,
false,
false,
- null,
+ false,
+ false,
true,
false,
true,
@@ -66,5 +72,5 @@
false
]
},
- "hash": "04eb046efaa45999587db62cb32fa314f61997652c070870b44d23753ad48b5c"
+ "hash": "f448ae1b28ef69f884040016072b12694e530b64a105e03a040c65b779c9d91e"
}
diff --git a/Cargo.lock b/Cargo.lock
index 49caecec1..6f001c063 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -723,6 +723,7 @@ dependencies = [
"prometheus-client",
"prost",
"rand 0.8.5",
+ "rayon",
"redis 0.25.4",
"secrecy",
"semver",
@@ -2799,6 +2800,7 @@ dependencies = [
"bytes",
"chrono",
"collab-entity",
+ "prost",
"serde",
"serde_json",
"serde_repr",
diff --git a/Cargo.toml b/Cargo.toml
index ac37e817a..cd10988b1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -147,7 +147,7 @@ lettre = { version = "0.11.7", features = ["tokio1", "tokio1-native-tls"] }
handlebars = "5.1.2"
pin-project = "1.1.5"
byteorder = "1.5.0"
-rayon = "1.10.0"
+rayon.workspace = true
[dev-dependencies]
@@ -247,6 +247,7 @@ actix-web = { version = "4.5.1", default-features = false, features = [
actix-http = { version = "3.6.0", default-features = false }
tokio = { version = "1.36.0", features = ["sync"] }
tokio-stream = "0.1.14"
+rayon = "1.10.0"
futures-util = "0.3.30"
bincode = "1.3.3"
client-websocket = { path = "libs/client-websocket" }
diff --git a/assets/mailer_templates/build_production/access_request.html b/assets/mailer_templates/build_production/access_request.html
new file mode 100644
index 000000000..960352d3e
--- /dev/null
+++ b/assets/mailer_templates/build_production/access_request.html
@@ -0,0 +1,123 @@
+
+
+
+
+
+
+
+
+
+
+ Request to join the workspace
+
+
+
+
+ Approve a user's request to join the workspace.
+ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏
+
+
+
+
+
+
+
+
+
+
+ {{ username }}
+ has requested access to
+ {{ workspace_name }}
+
+
+
+
+
+
+
+
+
+
+ {{ workspace_name }}
+
+ {{ workspace_member_count }} members
+
+
+
+
+
+
+ By clicking "Approve request" above, the user will be added to the
+ workspace.
+
+
+
+
+
+
+
+
+
+
+
+
+ Bring projects, knowledge, and teams together with the power of AI.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/assets/mailer_templates/build_production/access_request_approved_notification.html b/assets/mailer_templates/build_production/access_request_approved_notification.html
new file mode 100644
index 000000000..4b6f8e6ea
--- /dev/null
+++ b/assets/mailer_templates/build_production/access_request_approved_notification.html
@@ -0,0 +1,131 @@
+
+
+
+
+
+
+
+
+
+
+ Your access request has been approved
+
+
+
+
+ Workspace access request approved notification
+ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏ ͏
+
+
+
+
+
+
+
+ Your request to access
+ {{ workspace_name }}
+ has been approved
+
+
+
+
+
+
+
+
+
+
+ {{ workspace_name }}
+
+ {{ workspace_member_count }} members
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Bring projects, knowledge, and teams together with the power of AI.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy.env b/deploy.env
index e161ffb3e..743592e51 100644
--- a/deploy.env
+++ b/deploy.env
@@ -78,8 +78,12 @@ GOTRUE_EXTERNAL_APPLE_SECRET=
GOTRUE_EXTERNAL_APPLE_REDIRECT_URI=http://your-host/gotrue/callback
# File Storage
-# This is where storage like images, files, etc. will be stored
-# By default, Minio is used as the default file storage which uses host's file system
+# Create the bucket if not exists on AppFlowy Cloud start up.
+# Set this to false if the bucket has been created externally.
+APPFLOWY_S3_CREATE_BUCKET=true
+# This is where storage like images, files, etc. will be stored.
+# By default, Minio is used as the default file storage which uses host's file system.
+# Keep this as true if you are using other S3 compatible storage provider other than AWS.
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=http://minio:9000 # change this if you are using a different address for minio
APPFLOWY_S3_ACCESS_KEY=minioadmin
@@ -141,4 +145,4 @@ APPFLOWY_COLLABORATE_MULTI_THREAD=false
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
# AppFlowy Web
-APPFLOWY_WEB_URL=
+APPFLOWY_WEB_URL=http://localhost:3000
diff --git a/dev.env b/dev.env
index bbec40c50..8a79184b5 100644
--- a/dev.env
+++ b/dev.env
@@ -76,6 +76,7 @@ GOTRUE_EXTERNAL_APPLE_SECRET=
GOTRUE_EXTERNAL_APPLE_REDIRECT_URI=http://localhost:9999/callback
# File Storage
+APPFLOWY_S3_CREATE_BUCKET=true
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=http://localhost:9000 # change this if you are using a different address for minio
APPFLOWY_S3_ACCESS_KEY=minioadmin
@@ -127,4 +128,4 @@ APPFLOWY_COLLABORATE_MULTI_THREAD=false
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
# AppFlowy Web
-APPFLOWY_WEB_URL=
+APPFLOWY_WEB_URL=http://localhost:3000
diff --git a/docker-compose-ci.yml b/docker-compose-ci.yml
index 6c52b93c3..1e5702142 100644
--- a/docker-compose-ci.yml
+++ b/docker-compose-ci.yml
@@ -111,6 +111,7 @@ services:
- APPFLOWY_DATABASE_MAX_CONNECTIONS=20
- APPFLOWY_AI_SERVER_HOST=${APPFLOWY_AI_SERVER_HOST}
- APPFLOWY_AI_SERVER_PORT=${APPFLOWY_AI_SERVER_PORT}
+ - APPFLOWY_WEB_URL=${APPFLOWY_WEB_URL}
build:
context: .
dockerfile: Dockerfile
diff --git a/docker-compose.yml b/docker-compose.yml
index 5ebf11f90..a25cc112b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -98,6 +98,7 @@ services:
- APPFLOWY_GOTRUE_EXT_URL=${API_EXTERNAL_URL}
- APPFLOWY_GOTRUE_ADMIN_EMAIL=${GOTRUE_ADMIN_EMAIL}
- APPFLOWY_GOTRUE_ADMIN_PASSWORD=${GOTRUE_ADMIN_PASSWORD}
+ - APPFLOWY_S3_CREATE_BUCKET=${APPFLOWY_S3_CREATE_BUCKET}
- APPFLOWY_S3_USE_MINIO=${APPFLOWY_S3_USE_MINIO}
- APPFLOWY_S3_MINIO_URL=${APPFLOWY_S3_MINIO_URL}
- APPFLOWY_S3_ACCESS_KEY=${APPFLOWY_S3_ACCESS_KEY}
diff --git a/email_template/config.js b/email_template/config.js
index 3e6ed1aa7..08ff1a252 100644
--- a/email_template/config.js
+++ b/email_template/config.js
@@ -14,23 +14,25 @@
module.exports = {
build: {
templates: {
- source: 'src/templates',
+ source: "src/templates",
destination: {
- path: 'build_local',
+ path: "build_local",
},
assets: {
- source: 'src/images',
- destination: 'images',
+ source: "src/images",
+ destination: "images",
},
},
},
locals: {
- cdnBaseUrl: '',
+ cdnBaseUrl: "",
userIconUrl: "https://cdn-icons-png.flaticon.com/512/1077/1077012.png",
userName: "John Doe",
acceptUrl: "https://appflowy.io",
+ approveUrl: "https://appflowy.io",
+ launchWorkspaceUrl: "https://appflowy.io",
workspaceName: "AppFlowy",
workspaceMembersCount: "100",
workspaceIconURL: "https://cdn-icons-png.flaticon.com/512/1078/1078013.png",
},
-}
+};
diff --git a/email_template/config.production.js b/email_template/config.production.js
index 0b37b3177..a2a859935 100644
--- a/email_template/config.production.js
+++ b/email_template/config.production.js
@@ -15,15 +15,18 @@ module.exports = {
build: {
templates: {
destination: {
- path: '../assets/mailer_templates/build_production',
+ path: "../assets/mailer_templates/build_production",
},
},
},
locals: {
- cdnBaseUrl: 'https://raw.githubusercontent.com/AppFlowy-IO/AppFlowy-Cloud/main/assets/mailer_templates/build_production/',
+ cdnBaseUrl:
+ "https://raw.githubusercontent.com/AppFlowy-IO/AppFlowy-Cloud/main/assets/mailer_templates/build_production/",
userIconUrl: "{{ user_icon_url }}",
userName: "{{ username }}",
acceptUrl: "{{ accept_url }}",
+ approveUrl: "{{ approve_url }}",
+ launchWorkspaceUrl: "{{ launch_workspace_url }}",
workspaceName: "{{ workspace_name }}",
workspaceMembersCount: "{{ workspace_member_count }}",
workspaceIconURL: "{{ workspace_icon_url }}",
@@ -32,4 +35,4 @@ module.exports = {
removeUnusedCSS: true,
shorthandCSS: true,
prettify: true,
-}
+};
diff --git a/email_template/src/templates/access_request.html b/email_template/src/templates/access_request.html
new file mode 100644
index 000000000..f8aa56123
--- /dev/null
+++ b/email_template/src/templates/access_request.html
@@ -0,0 +1,129 @@
+---
+title: "Request to join the workspace"
+preheader: "Approve a user's request to join the workspace."
+bodyClass: bg-purple-50
+---
+
+
+
+
+
+
+
+
+
+
+ {{ userName }}
+ has requested access to
+ {{ workspaceName }}
+
+
+
+
+
+
+
+
+
+
+ {{ workspaceName }}
+
+ {{ workspaceMembersCount }} members
+
+
+
+
+
+ Approve request
+
+
+ By clicking "Approve request" above, the user will be added to the
+ workspace.
+
+
+
+
+
+
+
+
+
+
+
+
+ Bring projects, knowledge, and teams together with the power of AI.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/email_template/src/templates/access_request_approved_notification.html b/email_template/src/templates/access_request_approved_notification.html
new file mode 100644
index 000000000..120e04f80
--- /dev/null
+++ b/email_template/src/templates/access_request_approved_notification.html
@@ -0,0 +1,135 @@
+---
+title: "Your access request has been approved"
+preheader: "Workspace access request approved notification"
+bodyClass: bg-purple-50
+---
+
+
+
+
+
+
+
+ Your request to access
+ {{ workspaceName }}
+ has been approved
+
+
+
+
+
+
+
+
+
+
+ {{ workspaceName }}
+
+ {{ workspaceMembersCount }} members
+
+
+
+
+
+ View workspace
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Bring projects, knowledge, and teams together with the power of AI.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/libs/database-entity/Cargo.toml b/libs/database-entity/Cargo.toml
index 5206caf95..95bd75241 100644
--- a/libs/database-entity/Cargo.toml
+++ b/libs/database-entity/Cargo.toml
@@ -22,3 +22,4 @@ app-error = { workspace = true }
bincode = "1.3.3"
appflowy-ai-client = { workspace = true, features = ["dto"] }
bytes.workspace = true
+prost = "0.12"
diff --git a/libs/database-entity/src/dto.rs b/libs/database-entity/src/dto.rs
index f53625ddd..978db060c 100644
--- a/libs/database-entity/src/dto.rs
+++ b/libs/database-entity/src/dto.rs
@@ -1,8 +1,12 @@
+use crate::error::EntityError;
+use crate::error::EntityError::{DeserializationError, InvalidData};
use crate::util::{validate_not_empty_payload, validate_not_empty_str};
use appflowy_ai_client::dto::AIModel;
use bytes::Bytes;
use chrono::{DateTime, Utc};
+use collab_entity::proto;
use collab_entity::CollabType;
+use prost::Message;
use serde::{Deserialize, Serialize};
use serde_repr::{Deserialize_repr, Serialize_repr};
use std::cmp::Ordering;
@@ -62,7 +66,7 @@ impl CreateCollabParams {
pub struct CollabIndexParams {}
-#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
+#[derive(Debug, Clone, Validate, Serialize, Deserialize, PartialEq)]
pub struct CollabParams {
#[validate(custom = "validate_not_empty_str")]
pub object_id: String,
@@ -107,7 +111,50 @@ impl CollabParams {
},
}
}
+
+ pub fn to_proto(&self) -> proto::collab::CollabParams {
+ proto::collab::CollabParams {
+ object_id: self.object_id.clone(),
+ encoded_collab: self.encoded_collab_v1.to_vec(),
+ collab_type: self.collab_type.to_proto() as i32,
+ embeddings: self
+ .embeddings
+ .as_ref()
+ .map(|embeddings| embeddings.to_proto()),
+ }
+ }
+
+ pub fn to_protobuf_bytes(&self) -> Vec {
+ self.to_proto().encode_to_vec()
+ }
+
+ pub fn from_protobuf_bytes(bytes: &[u8]) -> Result {
+ match proto::collab::CollabParams::decode(bytes) {
+ Ok(proto) => Self::try_from(proto),
+ Err(err) => Err(DeserializationError(err.to_string())),
+ }
+ }
}
+
+impl TryFrom for CollabParams {
+ type Error = EntityError;
+
+ fn try_from(proto: proto::collab::CollabParams) -> Result {
+ let collab_type_proto = proto::collab::CollabType::try_from(proto.collab_type).unwrap();
+ let collab_type = CollabType::from_proto(&collab_type_proto);
+ let embeddings = proto
+ .embeddings
+ .map(AFCollabEmbeddings::from_proto)
+ .transpose()?;
+ Ok(Self {
+ object_id: proto.object_id,
+ encoded_collab_v1: Bytes::from(proto.encoded_collab),
+ collab_type,
+ embeddings,
+ })
+ }
+}
+
#[derive(Serialize, Deserialize)]
struct CollabParamsV0 {
object_id: String,
@@ -527,6 +574,8 @@ pub struct AFWorkspace {
pub database_storage_id: Uuid,
pub owner_uid: i64,
pub owner_name: String,
+ #[serde(default)]
+ pub owner_email: String,
pub workspace_type: i32,
pub workspace_name: String,
pub created_at: DateTime,
@@ -916,12 +965,72 @@ pub struct AFCollabEmbeddingParams {
pub embedding: Option>,
}
+impl AFCollabEmbeddingParams {
+ pub fn from_proto(proto: &proto::collab::CollabEmbeddingsParams) -> Result {
+ let collab_type_proto = proto::collab::CollabType::try_from(proto.collab_type).unwrap();
+ let collab_type = CollabType::from_proto(&collab_type_proto);
+ let content_type_proto =
+ proto::collab::EmbeddingContentType::try_from(proto.content_type).unwrap();
+ let content_type = EmbeddingContentType::from_proto(content_type_proto)?;
+ let embedding = if proto.embedding.is_empty() {
+ None
+ } else {
+ Some(proto.embedding.clone())
+ };
+ Ok(Self {
+ fragment_id: proto.fragment_id.clone(),
+ object_id: proto.object_id.clone(),
+ collab_type,
+ content_type,
+ content: proto.content.clone(),
+ embedding,
+ })
+ }
+
+ pub fn to_proto(&self) -> proto::collab::CollabEmbeddingsParams {
+ proto::collab::CollabEmbeddingsParams {
+ fragment_id: self.fragment_id.clone(),
+ object_id: self.object_id.clone(),
+ collab_type: self.collab_type.to_proto() as i32,
+ content_type: self.content_type.to_proto() as i32,
+ content: self.content.clone(),
+ embedding: self.embedding.clone().unwrap_or_default(),
+ }
+ }
+
+ pub fn to_protobuf_bytes(&self) -> Vec {
+ self.to_proto().encode_to_vec()
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct AFCollabEmbeddings {
pub tokens_consumed: u32,
pub params: Vec,
}
+impl AFCollabEmbeddings {
+ pub fn from_proto(proto: proto::collab::CollabEmbeddings) -> Result {
+ let mut params = vec![];
+ for param in proto.embeddings {
+ params.push(AFCollabEmbeddingParams::from_proto(¶m)?);
+ }
+ Ok(Self {
+ tokens_consumed: proto.tokens_consumed,
+ params,
+ })
+ }
+
+ pub fn to_proto(&self) -> proto::collab::CollabEmbeddings {
+ let embeddings: Vec =
+ self.params.iter().map(|param| param.to_proto()).collect();
+ proto::collab::CollabEmbeddings {
+ tokens_consumed: self.tokens_consumed,
+ embeddings,
+ }
+ }
+}
+
/// Type of content stored by the embedding.
/// Currently only plain text of the document is supported.
/// In the future, we might support other kinds like i.e. PDF, images or image-extracted text.
@@ -932,6 +1041,24 @@ pub enum EmbeddingContentType {
PlainText = 0,
}
+impl EmbeddingContentType {
+ pub fn from_proto(proto: proto::collab::EmbeddingContentType) -> Result {
+ match proto {
+ proto::collab::EmbeddingContentType::PlainText => Ok(EmbeddingContentType::PlainText),
+ proto::collab::EmbeddingContentType::Unknown => Err(InvalidData(format!(
+ "{} is not a supported embedding type",
+ proto.as_str_name()
+ ))),
+ }
+ }
+
+ pub fn to_proto(&self) -> proto::collab::EmbeddingContentType {
+ match self {
+ EmbeddingContentType::PlainText => proto::collab::EmbeddingContentType::PlainText,
+ }
+ }
+}
+
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateChatMessageResponse {
pub answer: Option,
@@ -1285,8 +1412,13 @@ pub struct ApproveAccessRequestParams {
#[cfg(test)]
mod test {
- use crate::dto::{CollabParams, CollabParamsV0};
- use collab_entity::CollabType;
+ use crate::dto::{
+ AFCollabEmbeddingParams, AFCollabEmbeddings, CollabParams, CollabParamsV0, EmbeddingContentType,
+ };
+ use crate::error::EntityError;
+ use bytes::Bytes;
+ use collab_entity::{proto, CollabType};
+ use prost::Message;
use uuid::Uuid;
#[test]
@@ -1356,4 +1488,77 @@ mod test {
assert_eq!(collab_params.collab_type, v0.collab_type);
assert_eq!(collab_params.encoded_collab_v1, v0.encoded_collab_v1);
}
+
+ #[test]
+ fn deserialization_using_protobuf() {
+ let collab_params_with_embeddings = CollabParams {
+ object_id: "object_id".to_string(),
+ collab_type: CollabType::Document,
+ encoded_collab_v1: Bytes::default(),
+ embeddings: Some(AFCollabEmbeddings {
+ tokens_consumed: 100,
+ params: vec![AFCollabEmbeddingParams {
+ fragment_id: "fragment_id".to_string(),
+ object_id: "object_id".to_string(),
+ collab_type: CollabType::Document,
+ content_type: EmbeddingContentType::PlainText,
+ content: "content".to_string(),
+ embedding: Some(vec![1.0, 2.0, 3.0]),
+ }],
+ }),
+ };
+
+ let protobuf_encoded = collab_params_with_embeddings.to_protobuf_bytes();
+ let collab_params_decoded = CollabParams::from_protobuf_bytes(&protobuf_encoded).unwrap();
+ assert_eq!(collab_params_with_embeddings, collab_params_decoded);
+ }
+
+ #[test]
+ fn deserialize_collab_params_without_embeddings() {
+ let collab_params = CollabParams {
+ object_id: "object_id".to_string(),
+ collab_type: CollabType::Document,
+ encoded_collab_v1: Bytes::from(vec![1, 2, 3]),
+ embeddings: Some(AFCollabEmbeddings {
+ tokens_consumed: 100,
+ params: vec![AFCollabEmbeddingParams {
+ fragment_id: "fragment_id".to_string(),
+ object_id: "object_id".to_string(),
+ collab_type: CollabType::Document,
+ content_type: EmbeddingContentType::PlainText,
+ content: "content".to_string(),
+ embedding: None,
+ }],
+ }),
+ };
+
+ let protobuf_encoded = collab_params.to_protobuf_bytes();
+ let collab_params_decoded = CollabParams::from_protobuf_bytes(&protobuf_encoded).unwrap();
+ assert_eq!(collab_params, collab_params_decoded);
+ }
+
+ #[test]
+ fn deserialize_collab_params_with_unknown_embedding_type() {
+ let invalid_serialization = proto::collab::CollabParams {
+ object_id: "object_id".to_string(),
+ encoded_collab: vec![1, 2, 3],
+ collab_type: proto::collab::CollabType::Document as i32,
+ embeddings: Some(proto::collab::CollabEmbeddings {
+ tokens_consumed: 100,
+ embeddings: vec![proto::collab::CollabEmbeddingsParams {
+ fragment_id: "fragment_id".to_string(),
+ object_id: "object_id".to_string(),
+ collab_type: proto::collab::CollabType::Document as i32,
+ content_type: proto::collab::EmbeddingContentType::Unknown as i32,
+ content: "content".to_string(),
+ embedding: vec![1.0, 2.0, 3.0],
+ }],
+ }),
+ }
+ .encode_to_vec();
+
+ let result = CollabParams::from_protobuf_bytes(&invalid_serialization);
+ assert!(result.is_err());
+ assert!(matches!(result, Err(EntityError::InvalidData(_))));
+ }
}
diff --git a/libs/database-entity/src/error.rs b/libs/database-entity/src/error.rs
new file mode 100644
index 000000000..f3ae3b499
--- /dev/null
+++ b/libs/database-entity/src/error.rs
@@ -0,0 +1,9 @@
+#[derive(Debug, thiserror::Error)]
+pub enum EntityError {
+ #[error("Invalid data: {0}")]
+ InvalidData(String),
+ #[error("Deserialization error: {0}")]
+ DeserializationError(String),
+ #[error("Serialization error: {0}")]
+ SerializationError(String),
+}
diff --git a/libs/database-entity/src/lib.rs b/libs/database-entity/src/lib.rs
index 0d3d14ec8..1c4fa4ccd 100644
--- a/libs/database-entity/src/lib.rs
+++ b/libs/database-entity/src/lib.rs
@@ -1,3 +1,4 @@
pub mod dto;
+pub mod error;
pub mod file_dto;
mod util;
diff --git a/libs/database/src/access_request.rs b/libs/database/src/access_request.rs
index 4650fd1e6..9b77a7093 100644
--- a/libs/database/src/access_request.rs
+++ b/libs/database/src/access_request.rs
@@ -71,6 +71,7 @@ pub async fn select_access_request_by_request_id<'a, E: Executor<'a, Database =
af_workspace.database_storage_id,
af_workspace.owner_uid,
owner_profile.name,
+ owner_profile.email,
af_workspace.created_at,
af_workspace.workspace_type,
af_workspace.deleted_at,
diff --git a/libs/database/src/collab/collab_storage.rs b/libs/database/src/collab/collab_storage.rs
index d64ef27ac..b54b69cf1 100644
--- a/libs/database/src/collab/collab_storage.rs
+++ b/libs/database/src/collab/collab_storage.rs
@@ -138,6 +138,7 @@ pub trait CollabStorage: Send + Sync + 'static {
&self,
uid: &i64,
queries: Vec,
+ from_editing_collab: bool,
) -> HashMap;
/// Deletes a collaboration from the storage.
@@ -249,8 +250,12 @@ where
&self,
uid: &i64,
queries: Vec,
+ from_editing_collab: bool,
) -> HashMap {
- self.as_ref().batch_get_collab(uid, queries).await
+ self
+ .as_ref()
+ .batch_get_collab(uid, queries, from_editing_collab)
+ .await
}
async fn delete_collab(&self, workspace_id: &str, uid: &i64, object_id: &str) -> AppResult<()> {
diff --git a/libs/database/src/pg_row.rs b/libs/database/src/pg_row.rs
index 357bd2b2d..28e04e525 100644
--- a/libs/database/src/pg_row.rs
+++ b/libs/database/src/pg_row.rs
@@ -19,6 +19,7 @@ pub struct AFWorkspaceRow {
pub database_storage_id: Option,
pub owner_uid: Option,
pub owner_name: Option,
+ pub owner_email: Option,
pub created_at: Option>,
pub workspace_type: i32,
pub deleted_at: Option>,
@@ -46,6 +47,7 @@ impl TryFrom for AFWorkspace {
database_storage_id,
owner_uid,
owner_name: value.owner_name.unwrap_or_default(),
+ owner_email: value.owner_email.unwrap_or_default(),
workspace_type: value.workspace_type,
workspace_name,
created_at,
@@ -61,6 +63,7 @@ pub struct AFWorkspaceWithMemberCountRow {
pub database_storage_id: Option,
pub owner_uid: Option,
pub owner_name: Option,
+ pub owner_email: Option,
pub created_at: Option>,
pub workspace_type: i32,
pub deleted_at: Option>,
@@ -89,6 +92,7 @@ impl TryFrom for AFWorkspace {
database_storage_id,
owner_uid,
owner_name: value.owner_name.unwrap_or_default(),
+ owner_email: value.owner_email.unwrap_or_default(),
workspace_type: value.workspace_type,
workspace_name,
created_at,
diff --git a/libs/database/src/workspace.rs b/libs/database/src/workspace.rs
index 2c90b108c..d6e15a7f7 100644
--- a/libs/database/src/workspace.rs
+++ b/libs/database/src/workspace.rs
@@ -39,19 +39,24 @@ pub async fn insert_user_workspace(
let workspace = sqlx::query_as!(
AFWorkspaceRow,
r#"
- INSERT INTO public.af_workspace (owner_uid, workspace_name)
- VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)
- RETURNING
+ WITH new_workspace AS (
+ INSERT INTO public.af_workspace (owner_uid, workspace_name)
+ VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)
+ RETURNING *
+ )
+ SELECT
workspace_id,
database_storage_id,
owner_uid,
- (SELECT name FROM public.af_user WHERE uid = owner_uid) AS owner_name,
- created_at,
+ owner_profile.name AS owner_name,
+ owner_profile.email AS owner_email,
+ new_workspace.created_at,
workspace_type,
- deleted_at,
+ new_workspace.deleted_at,
workspace_name,
icon
- ;
+ FROM new_workspace
+ JOIN public.af_user AS owner_profile ON new_workspace.owner_uid = owner_profile.uid;
"#,
user_uuid,
workspace_name,
@@ -670,13 +675,16 @@ pub async fn select_workspace<'a, E: Executor<'a, Database = Postgres>>(
workspace_id,
database_storage_id,
owner_uid,
- (SELECT name FROM public.af_user WHERE uid = owner_uid) AS owner_name,
- created_at,
+ owner_profile.name as owner_name,
+ owner_profile.email as owner_email,
+ af_workspace.created_at,
workspace_type,
- deleted_at,
+ af_workspace.deleted_at,
workspace_name,
icon
- FROM public.af_workspace WHERE workspace_id = $1
+ FROM public.af_workspace
+ JOIN public.af_user owner_profile ON af_workspace.owner_uid = owner_profile.uid
+ WHERE workspace_id = $1
"#,
workspace_id
)
@@ -718,7 +726,8 @@ pub async fn select_all_user_workspaces<'a, E: Executor<'a, Database = Postgres>
w.workspace_id,
w.database_storage_id,
w.owner_uid,
- (SELECT name FROM public.af_user WHERE uid = w.owner_uid) AS owner_name,
+ u.name AS owner_name,
+ u.email AS owner_email,
w.created_at,
w.workspace_type,
w.deleted_at,
@@ -726,6 +735,7 @@ pub async fn select_all_user_workspaces<'a, E: Executor<'a, Database = Postgres>
w.icon
FROM af_workspace w
JOIN af_workspace_member wm ON w.workspace_id = wm.workspace_id
+ JOIN public.af_user u ON w.owner_uid = u.uid
WHERE wm.uid = (
SELECT uid FROM public.af_user WHERE uuid = $1
);
diff --git a/script/client_api_deps_check.sh b/script/client_api_deps_check.sh
index 3e8310654..6d296d7ff 100755
--- a/script/client_api_deps_check.sh
+++ b/script/client_api_deps_check.sh
@@ -3,7 +3,7 @@
# Generate the current dependency list
cargo tree > current_deps.txt
-BASELINE_COUNT=620
+BASELINE_COUNT=621
CURRENT_COUNT=$(cat current_deps.txt | wc -l)
echo "Expected dependency count (baseline): $BASELINE_COUNT"
diff --git a/services/appflowy-collaborate/Cargo.toml b/services/appflowy-collaborate/Cargo.toml
index f9aac2dff..a05cfa3b2 100644
--- a/services/appflowy-collaborate/Cargo.toml
+++ b/services/appflowy-collaborate/Cargo.toml
@@ -15,9 +15,19 @@ path = "src/lib.rs"
access-control.workspace = true
actix.workspace = true
actix-web.workspace = true
-actix-http = { workspace = true, default-features = false, features = ["openssl", "compress-brotli", "compress-gzip"] }
+actix-http = { workspace = true, default-features = false, features = [
+ "openssl",
+ "compress-brotli",
+ "compress-gzip",
+] }
actix-web-actors = { version = "4.3" }
-app-error = { workspace = true, features = ["sqlx_error", "actix_web_error", "tokio_error", "bincode_error", "appflowy_ai_error"] }
+app-error = { workspace = true, features = [
+ "sqlx_error",
+ "actix_web_error",
+ "tokio_error",
+ "bincode_error",
+ "appflowy_ai_error",
+] }
authentication.workspace = true
brotli.workspace = true
dashmap.workspace = true
@@ -29,13 +39,24 @@ tracing = "0.1.40"
futures-util = "0.3.30"
tokio-util = { version = "0.7", features = ["codec"] }
tokio-stream = { version = "0.1.14", features = ["sync"] }
-tokio = { workspace = true, features = ["net", "sync", "macros", "rt-multi-thread"] }
+tokio = { workspace = true, features = [
+ "net",
+ "sync",
+ "macros",
+ "rt-multi-thread",
+] }
async-trait = "0.1.77"
prost.workspace = true
serde.workspace = true
serde_json.workspace = true
serde_repr.workspace = true
-sqlx = { workspace = true, default-features = false, features = ["runtime-tokio-rustls", "macros", "postgres", "uuid", "chrono"] }
+sqlx = { workspace = true, default-features = false, features = [
+ "runtime-tokio-rustls",
+ "macros",
+ "postgres",
+ "uuid",
+ "chrono",
+] }
thiserror = "1.0.56"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
anyhow = "1"
@@ -66,6 +87,7 @@ lazy_static = "1.4.0"
itertools = "0.12.0"
validator = "0.16.1"
workspace-access.workspace = true
+rayon.workspace = true
[dev-dependencies]
rand = "0.8.5"
diff --git a/services/appflowy-collaborate/src/collab/storage.rs b/services/appflowy-collaborate/src/collab/storage.rs
index 3c534d60b..002faaa8e 100644
--- a/services/appflowy-collaborate/src/collab/storage.rs
+++ b/services/appflowy-collaborate/src/collab/storage.rs
@@ -7,6 +7,7 @@ use collab::entity::EncodedCollab;
use collab_entity::CollabType;
use collab_rt_entity::ClientCollabMessage;
use itertools::{Either, Itertools};
+use rayon::iter::{IntoParallelIterator, ParallelIterator};
use sqlx::Transaction;
use tokio::time::timeout;
use tracing::warn;
@@ -158,6 +159,40 @@ where
}
}
+ async fn batch_get_encode_collab_from_editing(
+ &self,
+ object_ids: Vec,
+ ) -> HashMap {
+ let (ret, rx) = tokio::sync::oneshot::channel();
+ let timeout_duration = Duration::from_secs(10);
+
+ // Attempt to send the command to the realtime server
+ if let Err(err) = self
+ .rt_cmd_sender
+ .send(CollaborationCommand::BatchGetEncodeCollab { object_ids, ret })
+ .await
+ {
+ error!(
+ "Failed to send get encode collab command to realtime server: {}",
+ err
+ );
+ return HashMap::new();
+ }
+
+ // Await the response from the realtime server with a timeout
+ match timeout(timeout_duration, rx).await {
+ Ok(Ok(batch_encoded_collab)) => batch_encoded_collab,
+ Ok(Err(err)) => {
+ error!("Failed to get encode collab from realtime server: {}", err);
+ HashMap::new()
+ },
+ Err(_) => {
+ error!("Timeout waiting for encode collab from realtime server");
+ HashMap::new()
+ },
+ }
+ }
+
async fn queue_insert_collab(
&self,
workspace_id: &str,
@@ -326,6 +361,7 @@ where
&self,
_uid: &i64,
queries: Vec,
+ from_editing_collab: bool,
) -> HashMap {
// Partition queries based on validation into valid queries and errors (with associated error messages).
let (valid_queries, mut results): (Vec<_>, HashMap<_, _>) =
@@ -340,8 +376,48 @@ where
},
)),
});
+ let cache_queries = if from_editing_collab {
+ let editing_queries = valid_queries.clone();
+ let editing_results = self
+ .batch_get_encode_collab_from_editing(
+ editing_queries
+ .iter()
+ .map(|q| q.object_id.clone())
+ .collect(),
+ )
+ .await;
+ let editing_query_collab_results: HashMap =
+ tokio::task::spawn_blocking(move || {
+ let par_iter = editing_results.into_par_iter();
+ par_iter
+ .map(|(object_id, encoded_collab)| {
+ let encoding_result = encoded_collab.encode_to_bytes();
+ let query_collab_result = match encoding_result {
+ Ok(encoded_collab_bytes) => QueryCollabResult::Success {
+ encode_collab_v1: encoded_collab_bytes,
+ },
+ Err(err) => QueryCollabResult::Failed {
+ error: err.to_string(),
+ },
+ };
+
+ (object_id.clone(), query_collab_result)
+ })
+ .collect()
+ })
+ .await
+ .unwrap();
+ let editing_object_ids: Vec = editing_query_collab_results.keys().cloned().collect();
+ results.extend(editing_query_collab_results);
+ valid_queries
+ .into_iter()
+ .filter(|q| !editing_object_ids.contains(&q.object_id))
+ .collect()
+ } else {
+ valid_queries
+ };
- results.extend(self.cache.batch_get_encode_collab(valid_queries).await);
+ results.extend(self.cache.batch_get_encode_collab(cache_queries).await);
results
}
diff --git a/services/appflowy-collaborate/src/command.rs b/services/appflowy-collaborate/src/command.rs
index 4478f606c..eb9909b38 100644
--- a/services/appflowy-collaborate/src/command.rs
+++ b/services/appflowy-collaborate/src/command.rs
@@ -1,22 +1,36 @@
use crate::{
error::RealtimeError,
- group::cmd::{GroupCommand, GroupCommandSender},
+ group::{
+ cmd::{GroupCommand, GroupCommandSender},
+ manager::GroupManager,
+ },
};
+use access_control::collab::RealtimeAccessControl;
use collab::entity::EncodedCollab;
use collab_rt_entity::ClientCollabMessage;
use dashmap::DashMap;
-use std::sync::Arc;
+use database::collab::CollabStorage;
+use futures::StreamExt;
+use std::{
+ collections::HashMap,
+ sync::{Arc, Weak},
+};
use tracing::error;
pub type CLCommandSender = tokio::sync::mpsc::Sender;
pub type CLCommandReceiver = tokio::sync::mpsc::Receiver;
pub type EncodeCollabSender = tokio::sync::oneshot::Sender>;
+pub type BatchEncodeCollabSender = tokio::sync::oneshot::Sender>;
pub enum CollaborationCommand {
GetEncodeCollab {
object_id: String,
ret: EncodeCollabSender,
},
+ BatchGetEncodeCollab {
+ object_ids: Vec,
+ ret: BatchEncodeCollabSender,
+ },
ServerSendCollabMessage {
object_id: String,
collab_messages: Vec,
@@ -24,10 +38,14 @@ pub enum CollaborationCommand {
},
}
-pub(crate) fn spawn_collaboration_command(
+pub(crate) fn spawn_collaboration_command(
mut command_recv: CLCommandReceiver,
group_sender_by_object_id: &Arc>,
-) {
+ weak_groups: Weak>,
+) where
+ S: CollabStorage,
+ AC: RealtimeAccessControl,
+{
let group_sender_by_object_id = group_sender_by_object_id.clone();
tokio::spawn(async move {
while let Some(cmd) = command_recv.recv().await {
@@ -50,6 +68,35 @@ pub(crate) fn spawn_collaboration_command(
},
}
},
+ CollaborationCommand::BatchGetEncodeCollab { object_ids, ret } => {
+ if let Some(group_manager) = weak_groups.upgrade() {
+ let tasks = futures::stream::iter(object_ids)
+ .map(|object_id| {
+ let cloned_group_manager = group_manager.clone();
+ tokio::task::spawn(async move {
+ let group = cloned_group_manager.get_group(&object_id).await;
+ if let Some(group) = group {
+ (object_id, group.encode_collab().await.ok())
+ } else {
+ (object_id, None)
+ }
+ })
+ })
+ .collect::>()
+ .await;
+
+ let mut outputs: HashMap = HashMap::new();
+ for task in tasks {
+ let result = task.await;
+ if let Ok((object_id, Some(encoded_collab))) = result {
+ outputs.insert(object_id, encoded_collab);
+ }
+ }
+ let _ = ret.send(outputs);
+ } else {
+ let _ = ret.send(HashMap::new());
+ }
+ },
CollaborationCommand::ServerSendCollabMessage {
object_id,
collab_messages,
diff --git a/services/appflowy-collaborate/src/rt_server.rs b/services/appflowy-collaborate/src/rt_server.rs
index ca1f09f1a..f973d6a56 100644
--- a/services/appflowy-collaborate/src/rt_server.rs
+++ b/services/appflowy-collaborate/src/rt_server.rs
@@ -89,7 +89,11 @@ where
spawn_period_check_inactive_group(Arc::downgrade(&group_manager), &group_sender_by_object_id);
- spawn_collaboration_command(command_recv, &group_sender_by_object_id);
+ spawn_collaboration_command(
+ command_recv,
+ &group_sender_by_object_id,
+ Arc::downgrade(&group_manager),
+ );
spawn_metrics(metrics.clone(), storage.clone());
diff --git a/src/api/access_request.rs b/src/api/access_request.rs
index f8b87e672..e235f74a0 100644
--- a/src/api/access_request.rs
+++ b/src/api/access_request.rs
@@ -2,6 +2,8 @@ use actix_web::{
web::{self, Data, Json},
Result, Scope,
};
+use anyhow::anyhow;
+use app_error::AppError;
use authentication::jwt::UserUuid;
use database_entity::dto::{
AccessRequestMinimal, ApproveAccessRequestParams, CreateAccessRequestParams,
@@ -30,15 +32,18 @@ pub fn access_request_scope() -> Scope {
}
async fn get_access_request_handler(
- _uuid: UserUuid,
+ uuid: UserUuid,
access_request_id: web::Path,
state: Data,
) -> Result> {
let access_request_id = access_request_id.into_inner();
+ let uid = state.user_cache.get_user_uid(&uuid).await?;
let access_request = get_access_request(
&state.pg_pool,
state.collab_access_control_storage.clone(),
access_request_id,
+ *uuid,
+ uid,
)
.await?;
Ok(Json(AppResponse::Ok().with_data(access_request)))
@@ -52,7 +57,22 @@ async fn post_access_request_handler(
let uid = state.user_cache.get_user_uid(&uuid).await?;
let workspace_id = create_access_request_params.workspace_id;
let view_id = create_access_request_params.view_id;
- let request_id = create_access_request(&state.pg_pool, workspace_id, view_id, uid).await?;
+ let appflowy_web_url = state
+ .config
+ .appflowy_web_url
+ .clone()
+ .ok_or(AppError::Internal(anyhow!(
+ "AppFlowy web url has not been set"
+ )))?;
+ let request_id = create_access_request(
+ &state.pg_pool,
+ state.mailer.clone(),
+ &appflowy_web_url,
+ workspace_id,
+ view_id,
+ uid,
+ )
+ .await?;
let access_request = AccessRequestMinimal {
request_id,
workspace_id,
@@ -71,7 +91,22 @@ async fn post_approve_access_request_handler(
let uid = state.user_cache.get_user_uid(&uuid).await?;
let access_request_id = access_request_id.into_inner();
let is_approved = approve_access_request_params.is_approved;
- approve_or_reject_access_request(&state.pg_pool, access_request_id, uid, *uuid, is_approved)
- .await?;
+ let appflowy_web_url = state
+ .config
+ .appflowy_web_url
+ .clone()
+ .ok_or(AppError::Internal(anyhow!(
+ "AppFlowy web url has not been set"
+ )))?;
+ approve_or_reject_access_request(
+ &state.pg_pool,
+ state.mailer.clone(),
+ &appflowy_web_url,
+ access_request_id,
+ uid,
+ *uuid,
+ is_approved,
+ )
+ .await?;
Ok(Json(AppResponse::Ok()))
}
diff --git a/src/api/workspace.rs b/src/api/workspace.rs
index d7e371a08..7094f76e5 100644
--- a/src/api/workspace.rs
+++ b/src/api/workspace.rs
@@ -885,7 +885,7 @@ async fn batch_get_collab_handler(
let result = BatchQueryCollabResult(
state
.collab_access_control_storage
- .batch_get_collab(&uid, payload.into_inner().0)
+ .batch_get_collab(&uid, payload.into_inner().0, false)
.await,
);
Ok(Json(AppResponse::Ok().with_data(result)))
diff --git a/src/application.rs b/src/application.rs
index 192f22fbf..d44729255 100644
--- a/src/application.rs
+++ b/src/application.rs
@@ -461,7 +461,11 @@ pub async fn get_aws_s3_client(s3_setting: &S3Setting) -> Result Result {
let request_id = insert_new_access_request(pg_pool, workspace_id, view_id, uid).await?;
+ let access_request = select_access_request_by_request_id(pg_pool, request_id).await?;
+ let cloned_mailer = mailer.clone();
+ let approve_url = format!(
+ "{}/app/approve-request?request_id={}",
+ appflowy_web_url, request_id
+ );
+ let email = access_request.workspace.owner_email.clone();
+ let recipient_name = access_request.workspace.owner_name.clone();
+ // use default icon until we have workspace icon
+ let workspace_icon_url =
+ "https://miro.medium.com/v2/resize:fit:2400/1*mTPfm7CwU31-tLhtLNkyJw.png".to_string();
+ let user_icon_url =
+ "https://cdn.pixabay.com/photo/2015/10/05/22/37/blank-profile-picture-973460_1280.png"
+ .to_string();
+ tokio::spawn(async move {
+ if let Err(err) = cloned_mailer
+ .send_workspace_access_request(
+ &recipient_name,
+ &email,
+ WorkspaceAccessRequestMailerParam {
+ user_icon_url,
+ username: access_request.requester.name,
+ workspace_name: access_request.workspace.workspace_name,
+ workspace_icon_url,
+ workspace_member_count: access_request.workspace.member_count.unwrap_or(0),
+ approve_url,
+ },
+ )
+ .await
+ {
+ tracing::error!("Failed to send access request email: {:?}", err);
+ };
+ });
Ok(request_id)
}
@@ -35,9 +73,17 @@ pub async fn get_access_request(
pg_pool: &PgPool,
collab_storage: Arc,
access_request_id: Uuid,
+ user_uuid: Uuid,
+ user_uid: i64,
) -> Result {
let access_request_with_view_id =
select_access_request_by_request_id(pg_pool, access_request_id).await?;
+ if access_request_with_view_id.workspace.owner_uid != user_uid {
+ return Err(AppError::NotEnoughPermissions {
+ user: user_uuid.to_string(),
+ action: "get access request".to_string(),
+ });
+ }
let folder = get_latest_collab_folder(
collab_storage,
GetCollabOrigin::Server,
@@ -72,6 +118,8 @@ pub async fn get_access_request(
pub async fn approve_or_reject_access_request(
pg_pool: &PgPool,
+ mailer: Mailer,
+ appflowy_web_url: &str,
request_id: Uuid,
uid: i64,
user_uuid: Uuid,
@@ -95,6 +143,35 @@ pub async fn approve_or_reject_access_request(
role,
)
.await?;
+ let cloned_mailer = mailer.clone();
+ let launch_workspace_url = format!(
+ "{}/app/{}",
+ appflowy_web_url, &access_request.workspace.workspace_id
+ );
+
+ // use default icon until we have workspace icon
+ let workspace_icon_url =
+ "https://miro.medium.com/v2/resize:fit:2400/1*mTPfm7CwU31-tLhtLNkyJw.png".to_string();
+ tokio::spawn(async move {
+ if let Err(err) = cloned_mailer
+ .send_workspace_access_request_approval_notification(
+ &access_request.requester.name,
+ &access_request.requester.email,
+ WorkspaceAccessRequestApprovedMailerParam {
+ workspace_name: access_request.workspace.workspace_name,
+ workspace_icon_url,
+ workspace_member_count: access_request.workspace.member_count.unwrap_or(0),
+ launch_workspace_url,
+ },
+ )
+ .await
+ {
+ tracing::error!(
+ "Failed to send access request approved notification email: {:?}",
+ err
+ );
+ };
+ });
}
let status = if is_approved {
AFAccessRequestStatusColumn::Approved
diff --git a/src/biz/workspace/ops.rs b/src/biz/workspace/ops.rs
index b82f7cf66..9fb55165f 100644
--- a/src/biz/workspace/ops.rs
+++ b/src/biz/workspace/ops.rs
@@ -439,7 +439,7 @@ pub async fn invite_workspace_members(
tokio::spawn(async move {
if let Err(err) = cloned_mailer
.send_workspace_invite(
- invitation.email,
+ &invitation.email,
WorkspaceInviteMailerParam {
user_icon_url,
username: inviter_name,
diff --git a/src/biz/workspace/page_view.rs b/src/biz/workspace/page_view.rs
index 9f980d410..b999a17d2 100644
--- a/src/biz/workspace/page_view.rs
+++ b/src/biz/workspace/page_view.rs
@@ -189,7 +189,7 @@ async fn get_page_collab_data_for_database(
})
.collect();
let row_query_collab_results = collab_access_control_storage
- .batch_get_collab(&uid, queries)
+ .batch_get_collab(&uid, queries, true)
.await;
let row_data = tokio::task::spawn_blocking(move || {
let row_collabs: HashMap> = row_query_collab_results
diff --git a/src/config/config.rs b/src/config/config.rs
index 0786f4630..199eb16f7 100644
--- a/src/config/config.rs
+++ b/src/config/config.rs
@@ -48,6 +48,7 @@ pub struct CasbinSetting {
#[derive(serde::Deserialize, Clone, Debug)]
pub struct S3Setting {
+ pub create_bucket: bool,
pub use_minio: bool,
pub minio_url: String,
pub access_key: String,
@@ -205,6 +206,9 @@ pub fn get_configuration() -> Result {
},
redis_uri: get_env_var("APPFLOWY_REDIS_URI", "redis://localhost:6379").into(),
s3: S3Setting {
+ create_bucket: get_env_var("APPFLOWY_S3_CREATE_BUCKET", "true")
+ .parse()
+ .context("fail to get APPFLOWY_S3_CREATE_BUCKET")?,
use_minio: get_env_var("APPFLOWY_S3_USE_MINIO", "true")
.parse()
.context("fail to get APPFLOWY_S3_USE_MINIO")?,
diff --git a/src/mailer.rs b/src/mailer.rs
index 8d4a797bf..ae2c25d13 100644
--- a/src/mailer.rs
+++ b/src/mailer.rs
@@ -4,6 +4,7 @@ use lettre::transport::smtp::authentication::Credentials;
use lettre::Address;
use lettre::AsyncSmtpTransport;
use lettre::AsyncTransport;
+use std::collections::HashMap;
use std::sync::Arc;
use std::sync::RwLock;
@@ -18,6 +19,11 @@ pub struct Mailer {
smtp_username: String,
}
+pub const WORKSPACE_INVITE_TEMPLATE_NAME: &str = "workspace_invite";
+pub const WORKSPACE_ACCESS_REQUEST_TEMPLATE_NAME: &str = "workspace_access_request";
+pub const WORKSPACE_ACCESS_REQUEST_APPROVED_NOTIFICATION_TEMPLATE_NAME: &str =
+ "workspace_access_request_approved_notification";
+
impl Mailer {
pub async fn new(
smtp_username: String,
@@ -33,12 +39,32 @@ impl Mailer {
let workspace_invite_template =
include_str!("../assets/mailer_templates/build_production/workspace_invitation.html");
+ let access_request_template =
+ include_str!("../assets/mailer_templates/build_production/access_request.html");
+ let access_request_approved_notification_template = include_str!(
+ "../assets/mailer_templates/build_production/access_request_approved_notification.html"
+ );
+ let template_strings = HashMap::from([
+ (WORKSPACE_INVITE_TEMPLATE_NAME, workspace_invite_template),
+ (
+ WORKSPACE_ACCESS_REQUEST_TEMPLATE_NAME,
+ access_request_template,
+ ),
+ (
+ WORKSPACE_ACCESS_REQUEST_APPROVED_NOTIFICATION_TEMPLATE_NAME,
+ access_request_approved_notification_template,
+ ),
+ ]);
- HANDLEBARS
- .write()
- .map_err(|err| anyhow::anyhow!(format!("Failed to write handlebars: {}", err)))?
- .register_template_string("workspace_invite", workspace_invite_template)
- .map_err(|err| anyhow::anyhow!(format!("Failed to register handlebars template: {}", err)))?;
+ for (template_name, template_string) in template_strings {
+ HANDLEBARS
+ .write()
+ .map_err(|err| anyhow::anyhow!(format!("Failed to write handlebars: {}", err)))?
+ .register_template_string(template_name, template_string)
+ .map_err(|err| {
+ anyhow::anyhow!(format!("Failed to register handlebars template: {}", err))
+ })?;
+ }
Ok(Self {
smtp_transport,
@@ -46,13 +72,19 @@ impl Mailer {
})
}
- pub async fn send_workspace_invite(
+ async fn send_email_template(
&self,
- email: String,
- param: WorkspaceInviteMailerParam,
- ) -> Result<(), anyhow::Error> {
+ recipient_name: Option,
+ email: &str,
+ template_name: &str,
+ param: T,
+ subject: &str,
+ ) -> Result<(), anyhow::Error>
+ where
+ T: serde::Serialize,
+ {
let rendered = match HANDLEBARS.read() {
- Ok(registory) => registory.render("workspace_invite", ¶m)?,
+ Ok(registory) => registory.render(template_name, ¶m)?,
Err(err) => anyhow::bail!(format!("Failed to render handlebars template: {}", err)),
};
@@ -62,19 +94,74 @@ impl Mailer {
self.smtp_username.parse::()?,
))
.to(lettre::message::Mailbox::new(
- Some(param.username.clone()),
+ recipient_name,
email.parse()?,
))
- .subject(format!(
- "Action required: {} invited you to {} in AppFlowy",
- param.username, param.workspace_name
- ))
+ .subject(subject)
.header(ContentType::TEXT_HTML)
.body(rendered)?;
AsyncTransport::send(&self.smtp_transport, email).await?;
Ok(())
}
+ pub async fn send_workspace_invite(
+ &self,
+ email: &str,
+ param: WorkspaceInviteMailerParam,
+ ) -> Result<(), anyhow::Error> {
+ let subject = format!(
+ "Action required: {} invited you to {} in AppFlowy",
+ param.username, param.workspace_name
+ );
+ self
+ .send_email_template(
+ Some(param.username.clone()),
+ email,
+ WORKSPACE_INVITE_TEMPLATE_NAME,
+ param,
+ &subject,
+ )
+ .await
+ }
+
+ pub async fn send_workspace_access_request(
+ &self,
+ recipient_name: &str,
+ email: &str,
+ param: WorkspaceAccessRequestMailerParam,
+ ) -> Result<(), anyhow::Error> {
+ let subject = format!(
+ "Action required: {} requested access to {} in AppFlowy",
+ param.username, param.workspace_name
+ );
+ self
+ .send_email_template(
+ Some(recipient_name.to_string()),
+ email,
+ WORKSPACE_ACCESS_REQUEST_TEMPLATE_NAME,
+ param,
+ &subject,
+ )
+ .await
+ }
+
+ pub async fn send_workspace_access_request_approval_notification(
+ &self,
+ recipient_name: &str,
+ email: &str,
+ param: WorkspaceAccessRequestApprovedMailerParam,
+ ) -> Result<(), anyhow::Error> {
+ let subject = "Notification: Workspace access request approved";
+ self
+ .send_email_template(
+ Some(recipient_name.to_string()),
+ email,
+ WORKSPACE_ACCESS_REQUEST_APPROVED_NOTIFICATION_TEMPLATE_NAME,
+ param,
+ subject,
+ )
+ .await
+ }
}
#[derive(serde::Serialize)]
@@ -86,3 +173,21 @@ pub struct WorkspaceInviteMailerParam {
pub workspace_member_count: String,
pub accept_url: String,
}
+
+#[derive(serde::Serialize)]
+pub struct WorkspaceAccessRequestMailerParam {
+ pub user_icon_url: String,
+ pub username: String,
+ pub workspace_name: String,
+ pub workspace_icon_url: String,
+ pub workspace_member_count: i64,
+ pub approve_url: String,
+}
+
+#[derive(serde::Serialize)]
+pub struct WorkspaceAccessRequestApprovedMailerParam {
+ pub workspace_name: String,
+ pub workspace_icon_url: String,
+ pub workspace_member_count: i64,
+ pub launch_workspace_url: String,
+}
diff --git a/tests/file_test/mod.rs b/tests/file_test/mod.rs
index 5e18da757..f06746e82 100644
--- a/tests/file_test/mod.rs
+++ b/tests/file_test/mod.rs
@@ -37,6 +37,7 @@ impl Deref for TestBucket {
impl TestBucket {
pub async fn new() -> Self {
let setting = S3Setting {
+ create_bucket: true,
use_minio: true,
minio_url: LOCALHOST_MINIO_URL.to_string(),
access_key: LOCALHOST_MINIO_ACCESS_KEY.to_string(),
diff --git a/tests/workspace/access_request.rs b/tests/workspace/access_request.rs
index 92d3c8a02..78b185c09 100644
--- a/tests/workspace/access_request.rs
+++ b/tests/workspace/access_request.rs
@@ -40,6 +40,13 @@ async fn access_request_test() {
resp.unwrap_err().code,
ErrorCode::AccessRequestAlreadyExists
);
+ // Only workspace owner should be allowed to view access requests
+ let resp = requester_client
+ .get_access_request(access_request.request_id)
+ .await;
+ assert!(resp.is_err());
+ assert_eq!(resp.unwrap_err().code, ErrorCode::NotEnoughPermissions);
+
let access_request_id = access_request.request_id;
let access_request_to_be_approved = owner_client
.get_access_request(access_request_id)